summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-02 19:17:04 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-02 19:17:04 +0000
commitb915e9e0fc85ba6f398b3fab0db6a81a8913af94 (patch)
tree98b8f811c7aff2547cab8642daf372d6c59502fb /include
parent6421cca32f69ac849537a3cff78c352195e99f1b (diff)
downloadsrc-test2-b915e9e0fc85ba6f398b3fab0db6a81a8913af94.tar.gz
src-test2-b915e9e0fc85ba6f398b3fab0db6a81a8913af94.zip
Notes
Diffstat (limited to 'include')
-rw-r--r--include/llvm-c/Core.h84
-rw-r--r--include/llvm-c/Transforms/Scalar.h6
-rw-r--r--include/llvm-c/lto.h44
-rw-r--r--include/llvm/ADT/APFloat.h767
-rw-r--r--include/llvm/ADT/APInt.h151
-rw-r--r--include/llvm/ADT/APSInt.h28
-rw-r--r--include/llvm/ADT/AllocatorList.h226
-rw-r--r--include/llvm/ADT/ArrayRef.h161
-rw-r--r--include/llvm/ADT/BitVector.h4
-rw-r--r--include/llvm/ADT/CachedHashString.h184
-rw-r--r--include/llvm/ADT/DAGDeltaAlgorithm.h6
-rw-r--r--include/llvm/ADT/DenseMap.h93
-rw-r--r--include/llvm/ADT/DenseMapInfo.h30
-rw-r--r--include/llvm/ADT/DenseSet.h88
-rw-r--r--include/llvm/ADT/DepthFirstIterator.h120
-rw-r--r--include/llvm/ADT/EpochTracker.h33
-rw-r--r--include/llvm/ADT/EquivalenceClasses.h17
-rw-r--r--include/llvm/ADT/FoldingSet.h39
-rw-r--r--include/llvm/ADT/GraphTraits.h32
-rw-r--r--include/llvm/ADT/ImmutableList.h32
-rw-r--r--include/llvm/ADT/ImmutableMap.h20
-rw-r--r--include/llvm/ADT/ImmutableSet.h35
-rw-r--r--include/llvm/ADT/IntervalMap.h51
-rw-r--r--include/llvm/ADT/IntrusiveRefCntPtr.h429
-rw-r--r--include/llvm/ADT/MapVector.h23
-rw-r--r--include/llvm/ADT/Optional.h169
-rw-r--r--include/llvm/ADT/PackedVector.h8
-rw-r--r--include/llvm/ADT/PointerSumType.h2
-rw-r--r--include/llvm/ADT/PointerUnion.h19
-rw-r--r--include/llvm/ADT/PostOrderIterator.h86
-rw-r--r--include/llvm/ADT/PriorityQueue.h3
-rw-r--r--include/llvm/ADT/PriorityWorklist.h20
-rw-r--r--include/llvm/ADT/SCCIterator.h17
-rw-r--r--include/llvm/ADT/STLExtras.h468
-rw-r--r--include/llvm/ADT/ScopeExit.h54
-rw-r--r--include/llvm/ADT/ScopedHashTable.h24
-rw-r--r--include/llvm/ADT/SetVector.h36
-rw-r--r--include/llvm/ADT/SmallPtrSet.h125
-rw-r--r--include/llvm/ADT/SmallSet.h13
-rw-r--r--include/llvm/ADT/SmallString.h8
-rw-r--r--include/llvm/ADT/SmallVector.h33
-rw-r--r--include/llvm/ADT/SparseBitVector.h115
-rw-r--r--include/llvm/ADT/SparseMultiSet.h32
-rw-r--r--include/llvm/ADT/SparseSet.h23
-rw-r--r--include/llvm/ADT/Statistic.h13
-rw-r--r--include/llvm/ADT/StringExtras.h61
-rw-r--r--include/llvm/ADT/StringMap.h33
-rw-r--r--include/llvm/ADT/StringRef.h303
-rw-r--r--include/llvm/ADT/StringSwitch.h179
-rw-r--r--include/llvm/ADT/TinyPtrVector.h20
-rw-r--r--include/llvm/ADT/Triple.h29
-rw-r--r--include/llvm/ADT/Twine.h30
-rw-r--r--include/llvm/ADT/ilist.h784
-rw-r--r--include/llvm/ADT/ilist_base.h95
-rw-r--r--include/llvm/ADT/ilist_iterator.h185
-rw-r--r--include/llvm/ADT/ilist_node.h241
-rw-r--r--include/llvm/ADT/ilist_node_base.h53
-rw-r--r--include/llvm/ADT/ilist_node_options.h133
-rw-r--r--include/llvm/ADT/iterator.h22
-rw-r--r--include/llvm/ADT/simple_ilist.h310
-rw-r--r--include/llvm/Analysis/AliasAnalysis.h90
-rw-r--r--include/llvm/Analysis/AliasAnalysisEvaluator.h2
-rw-r--r--include/llvm/Analysis/AliasSetTracker.h87
-rw-r--r--include/llvm/Analysis/AssumptionCache.h10
-rw-r--r--include/llvm/Analysis/BasicAliasAnalysis.h27
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfo.h11
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfoImpl.h21
-rw-r--r--include/llvm/Analysis/BranchProbabilityInfo.h6
-rw-r--r--include/llvm/Analysis/CFGPrinter.h62
-rw-r--r--include/llvm/Analysis/CFLAndersAliasAnalysis.h9
-rw-r--r--include/llvm/Analysis/CFLSteensAliasAnalysis.h19
-rw-r--r--include/llvm/Analysis/CGSCCPassManager.h675
-rw-r--r--include/llvm/Analysis/CallGraph.h77
-rw-r--r--include/llvm/Analysis/CallGraphSCCPass.h4
-rw-r--r--include/llvm/Analysis/CodeMetrics.h2
-rw-r--r--include/llvm/Analysis/ConstantFolding.h28
-rw-r--r--include/llvm/Analysis/DemandedBits.h6
-rw-r--r--include/llvm/Analysis/DependenceAnalysis.h16
-rw-r--r--include/llvm/Analysis/DominanceFrontier.h6
-rw-r--r--include/llvm/Analysis/EHPersonalities.h5
-rw-r--r--include/llvm/Analysis/GlobalsModRef.h4
-rw-r--r--include/llvm/Analysis/IVUsers.h45
-rw-r--r--include/llvm/Analysis/InlineCost.h98
-rw-r--r--include/llvm/Analysis/InstructionSimplify.h13
-rw-r--r--include/llvm/Analysis/Interval.h24
-rw-r--r--include/llvm/Analysis/IteratedDominanceFrontier.h1
-rw-r--r--include/llvm/Analysis/LazyBlockFrequencyInfo.h13
-rw-r--r--include/llvm/Analysis/LazyBranchProbabilityInfo.h109
-rw-r--r--include/llvm/Analysis/LazyCallGraph.h295
-rw-r--r--include/llvm/Analysis/LazyValueInfo.h2
-rw-r--r--include/llvm/Analysis/Loads.h5
-rw-r--r--include/llvm/Analysis/LoopAccessAnalysis.h62
-rw-r--r--include/llvm/Analysis/LoopInfo.h84
-rw-r--r--include/llvm/Analysis/LoopInfoImpl.h146
-rw-r--r--include/llvm/Analysis/LoopIterator.h66
-rw-r--r--include/llvm/Analysis/LoopPass.h20
-rw-r--r--include/llvm/Analysis/LoopPassManager.h57
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h10
-rw-r--r--include/llvm/Analysis/MemoryDependenceAnalysis.h29
-rw-r--r--include/llvm/Analysis/ModuleSummaryAnalysis.h62
-rw-r--r--include/llvm/Analysis/ObjCARCAliasAnalysis.h9
-rw-r--r--include/llvm/Analysis/OptimizationDiagnosticInfo.h196
-rw-r--r--include/llvm/Analysis/PostDominators.h14
-rw-r--r--include/llvm/Analysis/ProfileSummaryInfo.h33
-rw-r--r--include/llvm/Analysis/RegionInfo.h36
-rw-r--r--include/llvm/Analysis/RegionInfoImpl.h187
-rw-r--r--include/llvm/Analysis/RegionIterator.h216
-rw-r--r--include/llvm/Analysis/RegionPass.h4
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h3222
-rw-r--r--include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h4
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h176
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpressions.h46
-rw-r--r--include/llvm/Analysis/ScopedNoAliasAA.h15
-rw-r--r--include/llvm/Analysis/TargetFolder.h5
-rw-r--r--include/llvm/Analysis/TargetLibraryInfo.def3
-rw-r--r--include/llvm/Analysis/TargetLibraryInfo.h76
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h163
-rw-r--r--include/llvm/Analysis/TargetTransformInfoImpl.h81
-rw-r--r--include/llvm/Analysis/TypeBasedAliasAnalysis.h12
-rw-r--r--include/llvm/Analysis/ValueTracking.h75
-rw-r--r--include/llvm/Bitcode/BitCodes.h8
-rw-r--r--include/llvm/Bitcode/BitcodeReader.h (renamed from include/llvm/Bitcode/ReaderWriter.h)180
-rw-r--r--include/llvm/Bitcode/BitcodeWriter.h80
-rw-r--r--include/llvm/Bitcode/BitcodeWriterPass.h4
-rw-r--r--include/llvm/Bitcode/BitstreamReader.h241
-rw-r--r--include/llvm/Bitcode/BitstreamWriter.h19
-rw-r--r--include/llvm/Bitcode/LLVMBitCodes.h84
-rw-r--r--include/llvm/CodeGen/Analysis.h14
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h53
-rw-r--r--include/llvm/CodeGen/BasicTTIImpl.h118
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h50
-rw-r--r--include/llvm/CodeGen/CommandFlags.h91
-rw-r--r--include/llvm/CodeGen/DIE.h319
-rw-r--r--include/llvm/CodeGen/DIEValue.def1
-rw-r--r--include/llvm/CodeGen/FastISel.h15
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h61
-rw-r--r--include/llvm/CodeGen/GlobalISel/CallLowering.h124
-rw-r--r--include/llvm/CodeGen/GlobalISel/GISelAccessor.h6
-rw-r--r--include/llvm/CodeGen/GlobalISel/IRTranslator.h290
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelect.h53
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelector.h63
-rw-r--r--include/llvm/CodeGen/GlobalISel/Legalizer.h65
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerHelper.h104
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerInfo.h207
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h426
-rw-r--r--include/llvm/CodeGen/GlobalISel/RegBankSelect.h29
-rw-r--r--include/llvm/CodeGen/GlobalISel/RegisterBank.h11
-rw-r--r--include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h255
-rw-r--r--include/llvm/CodeGen/GlobalISel/Utils.h43
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h7
-rw-r--r--include/llvm/CodeGen/LiveInterval.h52
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h34
-rw-r--r--include/llvm/CodeGen/LivePhysRegs.h30
-rw-r--r--include/llvm/CodeGen/LiveVariables.h3
-rw-r--r--include/llvm/CodeGen/LowLevelType.h206
-rw-r--r--include/llvm/CodeGen/MIRYamlMapping.h16
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h152
-rw-r--r--include/llvm/CodeGen/MachineBlockFrequencyInfo.h1
-rw-r--r--include/llvm/CodeGen/MachineDominators.h12
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h5
-rw-r--r--include/llvm/CodeGen/MachineFunction.h352
-rw-r--r--include/llvm/CodeGen/MachineFunctionAnalysis.h55
-rw-r--r--include/llvm/CodeGen/MachineInstr.h59
-rw-r--r--include/llvm/CodeGen/MachineInstrBuilder.h28
-rw-r--r--include/llvm/CodeGen/MachineInstrBundle.h31
-rw-r--r--include/llvm/CodeGen/MachineInstrBundleIterator.h218
-rw-r--r--include/llvm/CodeGen/MachineLoopInfo.h38
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h60
-rw-r--r--include/llvm/CodeGen/MachineModuleInfo.h344
-rw-r--r--include/llvm/CodeGen/MachineOperand.h47
-rw-r--r--include/llvm/CodeGen/MachinePassRegistry.h16
-rw-r--r--include/llvm/CodeGen/MachineRegionInfo.h14
-rw-r--r--include/llvm/CodeGen/MachineRegisterInfo.h77
-rw-r--r--include/llvm/CodeGen/MachineScheduler.h124
-rw-r--r--include/llvm/CodeGen/MachineValueType.h4
-rw-r--r--include/llvm/CodeGen/PBQP/Graph.h25
-rw-r--r--include/llvm/CodeGen/PBQP/Math.h235
-rw-r--r--include/llvm/CodeGen/PBQP/Solution.h32
-rw-r--r--include/llvm/CodeGen/Passes.h29
-rw-r--r--include/llvm/CodeGen/PseudoSourceValue.h6
-rw-r--r--include/llvm/CodeGen/RegAllocPBQP.h76
-rw-r--r--include/llvm/CodeGen/RegisterPressure.h12
-rw-r--r--include/llvm/CodeGen/RegisterScavenging.h30
-rw-r--r--include/llvm/CodeGen/RuntimeLibcalls.h11
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h14
-rw-r--r--include/llvm/CodeGen/ScheduleDAGInstrs.h10
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h169
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h454
-rw-r--r--include/llvm/CodeGen/SlotIndexes.h35
-rw-r--r--include/llvm/CodeGen/StackMaps.h85
-rw-r--r--include/llvm/CodeGen/TailDuplicator.h48
-rw-r--r--include/llvm/CodeGen/TargetLoweringObjectFileImpl.h65
-rw-r--r--include/llvm/CodeGen/TargetPassConfig.h43
-rw-r--r--include/llvm/CodeGen/ValueTypes.h6
-rw-r--r--include/llvm/Config/abi-breaking.h.cmake48
-rw-r--r--include/llvm/Config/config.h.cmake248
-rw-r--r--include/llvm/Config/llvm-config.h.cmake36
-rw-r--r--include/llvm/DebugInfo/CodeView/ByteStream.h58
-rw-r--r--include/llvm/DebugInfo/CodeView/CVDebugRecord.h55
-rw-r--r--include/llvm/DebugInfo/CodeView/CVRecord.h65
-rw-r--r--include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h78
-rw-r--r--include/llvm/DebugInfo/CodeView/CVTypeVisitor.h11
-rw-r--r--include/llvm/DebugInfo/CodeView/CodeView.h7
-rw-r--r--include/llvm/DebugInfo/CodeView/CodeViewError.h1
-rw-r--r--include/llvm/DebugInfo/CodeView/CodeViewOStream.h39
-rw-r--r--include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h170
-rw-r--r--include/llvm/DebugInfo/CodeView/EnumTables.h1
-rw-r--r--include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h65
-rw-r--r--include/llvm/DebugInfo/CodeView/ListRecordBuilder.h65
-rw-r--r--include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h48
-rw-r--r--include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h35
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleSubstream.h30
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h109
-rw-r--r--include/llvm/DebugInfo/CodeView/RecordSerialization.h155
-rw-r--r--include/llvm/DebugInfo/CodeView/StreamRef.h104
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolDeserializer.h74
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h10
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolDumper.h4
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolRecord.h1244
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h44
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolSerializer.h96
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h71
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h48
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h14
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeDeserializer.h136
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeDumper.h15
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeIndex.h3
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeRecord.h726
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h8
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeRecordMapping.h52
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeRecords.def3
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeSerializer.h140
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeTableBuilder.h141
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h114
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h45
-rw-r--r--include/llvm/DebugInfo/DIContext.h39
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h90
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h3
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFContext.h37
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h19
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h130
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugLine.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h9
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h77
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDie.h369
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFFormValue.h105
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h68
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFUnit.h72
-rw-r--r--include/llvm/DebugInfo/MSF/ByteStream.h169
-rw-r--r--include/llvm/DebugInfo/MSF/IMSFFile.h (renamed from include/llvm/DebugInfo/PDB/Raw/IPDBFile.h)24
-rw-r--r--include/llvm/DebugInfo/MSF/MSFBuilder.h (renamed from include/llvm/DebugInfo/PDB/Raw/MsfBuilder.h)49
-rw-r--r--include/llvm/DebugInfo/MSF/MSFCommon.h (renamed from include/llvm/DebugInfo/PDB/Raw/MsfCommon.h)32
-rw-r--r--include/llvm/DebugInfo/MSF/MSFError.h47
-rw-r--r--include/llvm/DebugInfo/MSF/MSFStreamLayout.h35
-rw-r--r--include/llvm/DebugInfo/MSF/MappedBlockStream.h144
-rw-r--r--include/llvm/DebugInfo/MSF/SequencedItemStream.h93
-rw-r--r--include/llvm/DebugInfo/MSF/StreamArray.h (renamed from include/llvm/DebugInfo/CodeView/StreamArray.h)50
-rw-r--r--include/llvm/DebugInfo/MSF/StreamInterface.h (renamed from include/llvm/DebugInfo/CodeView/StreamInterface.h)32
-rw-r--r--include/llvm/DebugInfo/MSF/StreamReader.h (renamed from include/llvm/DebugInfo/CodeView/StreamReader.h)50
-rw-r--r--include/llvm/DebugInfo/MSF/StreamRef.h135
-rw-r--r--include/llvm/DebugInfo/MSF/StreamWriter.h (renamed from include/llvm/DebugInfo/CodeView/StreamWriter.h)48
-rw-r--r--include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h14
-rw-r--r--include/llvm/DebugInfo/PDB/DIA/DIAError.h9
-rw-r--r--include/llvm/DebugInfo/PDB/GenericError.h7
-rw-r--r--include/llvm/DebugInfo/PDB/IPDBEnumChildren.h11
-rw-r--r--include/llvm/DebugInfo/PDB/IPDBSession.h2
-rw-r--r--include/llvm/DebugInfo/PDB/PDBContext.h24
-rw-r--r--include/llvm/DebugInfo/PDB/PDBTypes.h10
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/DbiStream.h79
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/DbiStreamBuilder.h74
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/DirectoryStreamData.h37
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/GlobalsStream.h45
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/IPDBStreamData.h38
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/ISectionContribVisitor.h8
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/IndexedStreamData.h34
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/InfoStream.h14
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/InfoStreamBuilder.h21
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/MappedBlockStream.h68
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/ModInfo.h25
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/ModStream.h21
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/NameHashTable.h14
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/NameMap.h5
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/NameMapBuilder.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/PDBFile.h64
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/PDBFileBuilder.h33
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/PublicsStream.h28
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/RawConstants.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/RawError.h3
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/RawSession.h5
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/RawTypes.h222
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/SymbolStream.h9
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/TpiHashing.h95
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/TpiStream.h30
-rw-r--r--include/llvm/DebugInfo/PDB/Raw/TpiStreamBuilder.h82
-rw-r--r--include/llvm/Demangle/Demangle.h28
-rw-r--r--include/llvm/ExecutionEngine/ExecutionEngine.h27
-rw-r--r--include/llvm/ExecutionEngine/JITEventListener.h22
-rw-r--r--include/llvm/ExecutionEngine/JITSymbol.h197
-rw-r--r--include/llvm/ExecutionEngine/JITSymbolFlags.h91
-rw-r--r--include/llvm/ExecutionEngine/ObjectCache.h12
-rw-r--r--include/llvm/ExecutionEngine/ObjectMemoryBuffer.h2
-rw-r--r--include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h314
-rw-r--r--include/llvm/ExecutionEngine/Orc/ExecutionUtils.h20
-rw-r--r--include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h6
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRCompileLayer.h3
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRTransformLayer.h2
-rw-r--r--include/llvm/ExecutionEngine/Orc/IndirectionUtils.h91
-rw-r--r--include/llvm/ExecutionEngine/Orc/JITSymbol.h87
-rw-r--r--include/llvm/ExecutionEngine/Orc/LambdaResolver.h7
-rw-r--r--include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h30
-rw-r--r--include/llvm/ExecutionEngine/Orc/LogicalDylib.h135
-rw-r--r--include/llvm/ExecutionEngine/Orc/NullResolver.h7
-rw-r--r--include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h47
-rw-r--r--include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h4
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcABISupport.h15
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcError.h2
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h252
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h299
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h194
-rw-r--r--include/llvm/ExecutionEngine/Orc/RPCChannel.h249
-rw-r--r--include/llvm/ExecutionEngine/Orc/RPCSerialization.h373
-rw-r--r--include/llvm/ExecutionEngine/Orc/RPCUtils.h1685
-rw-r--r--include/llvm/ExecutionEngine/Orc/RawByteChannel.h175
-rw-r--r--include/llvm/ExecutionEngine/RTDyldMemoryManager.h38
-rw-r--r--include/llvm/ExecutionEngine/RuntimeDyld.h84
-rw-r--r--include/llvm/ExecutionEngine/SectionMemoryManager.h13
-rw-r--r--include/llvm/IR/Attributes.h19
-rw-r--r--include/llvm/IR/AutoUpgrade.h7
-rw-r--r--include/llvm/IR/BasicBlock.h36
-rw-r--r--include/llvm/IR/CFG.h123
-rw-r--r--include/llvm/IR/CMakeLists.txt2
-rw-r--r--include/llvm/IR/CallSite.h78
-rw-r--r--include/llvm/IR/CallingConv.h3
-rw-r--r--include/llvm/IR/Comdat.h8
-rw-r--r--include/llvm/IR/Constant.h14
-rw-r--r--include/llvm/IR/ConstantFolder.h38
-rw-r--r--include/llvm/IR/ConstantRange.h34
-rw-r--r--include/llvm/IR/Constants.h164
-rw-r--r--include/llvm/IR/DIBuilder.h205
-rw-r--r--include/llvm/IR/DataLayout.h29
-rw-r--r--include/llvm/IR/DebugInfo.h22
-rw-r--r--include/llvm/IR/DebugInfoFlags.def16
-rw-r--r--include/llvm/IR/DebugInfoMetadata.h846
-rw-r--r--include/llvm/IR/DebugLoc.h12
-rw-r--r--include/llvm/IR/DerivedTypes.h95
-rw-r--r--include/llvm/IR/DiagnosticInfo.h280
-rw-r--r--include/llvm/IR/DiagnosticPrinter.h8
-rw-r--r--include/llvm/IR/Dominators.h41
-rw-r--r--include/llvm/IR/Function.h121
-rw-r--r--include/llvm/IR/GVMaterializer.h17
-rw-r--r--include/llvm/IR/GetElementPtrTypeIterator.h109
-rw-r--r--include/llvm/IR/GlobalAlias.h12
-rw-r--r--include/llvm/IR/GlobalIFunc.h12
-rw-r--r--include/llvm/IR/GlobalIndirectSymbol.h15
-rw-r--r--include/llvm/IR/GlobalObject.h15
-rw-r--r--include/llvm/IR/GlobalValue.h62
-rw-r--r--include/llvm/IR/GlobalVariable.h39
-rw-r--r--include/llvm/IR/IRBuilder.h78
-rw-r--r--include/llvm/IR/IRPrintingPasses.h2
-rw-r--r--include/llvm/IR/InlineAsm.h17
-rw-r--r--include/llvm/IR/InstIterator.h12
-rw-r--r--include/llvm/IR/InstrTypes.h55
-rw-r--r--include/llvm/IR/Instruction.h73
-rw-r--r--include/llvm/IR/Instructions.h1091
-rw-r--r--include/llvm/IR/IntrinsicInst.h40
-rw-r--r--include/llvm/IR/Intrinsics.h19
-rw-r--r--include/llvm/IR/Intrinsics.td89
-rw-r--r--include/llvm/IR/IntrinsicsAMDGPU.td259
-rw-r--r--include/llvm/IR/IntrinsicsNVVM.td33
-rw-r--r--include/llvm/IR/IntrinsicsPowerPC.td152
-rw-r--r--include/llvm/IR/IntrinsicsSystemZ.td5
-rw-r--r--include/llvm/IR/IntrinsicsX86.td1147
-rw-r--r--include/llvm/IR/LLVMContext.h51
-rw-r--r--include/llvm/IR/LegacyPassManagers.h4
-rw-r--r--include/llvm/IR/LegacyPassNameParser.h12
-rw-r--r--include/llvm/IR/MDBuilder.h3
-rw-r--r--include/llvm/IR/Mangler.h5
-rw-r--r--include/llvm/IR/Metadata.def1
-rw-r--r--include/llvm/IR/Metadata.h113
-rw-r--r--include/llvm/IR/Module.h114
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h192
-rw-r--r--include/llvm/IR/NoFolder.h49
-rw-r--r--include/llvm/IR/Operator.h68
-rw-r--r--include/llvm/IR/PassManager.h1074
-rw-r--r--include/llvm/IR/PassManagerInternal.h154
-rw-r--r--include/llvm/IR/PatternMatch.h35
-rw-r--r--include/llvm/IR/Statepoint.h32
-rw-r--r--include/llvm/IR/SymbolTableListTraits.h26
-rw-r--r--include/llvm/IR/Type.h55
-rw-r--r--include/llvm/IR/Use.h22
-rw-r--r--include/llvm/IR/UseListOrder.h14
-rw-r--r--include/llvm/IR/User.h29
-rw-r--r--include/llvm/IR/Value.h63
-rw-r--r--include/llvm/IR/ValueHandle.h11
-rw-r--r--include/llvm/IR/ValueMap.h37
-rw-r--r--include/llvm/IR/Verifier.h46
-rw-r--r--include/llvm/InitializePasses.h49
-rw-r--r--include/llvm/LTO/Caching.h37
-rw-r--r--include/llvm/LTO/Config.h181
-rw-r--r--include/llvm/LTO/LTO.h435
-rw-r--r--include/llvm/LTO/LTOBackend.h51
-rw-r--r--include/llvm/LTO/legacy/LTOCodeGenerator.h14
-rw-r--r--include/llvm/LTO/legacy/LTOModule.h66
-rw-r--r--include/llvm/LTO/legacy/ThinLTOCodeGenerator.h45
-rw-r--r--include/llvm/LinkAllIR.h1
-rw-r--r--include/llvm/LinkAllPasses.h7
-rw-r--r--include/llvm/Linker/IRMover.h9
-rw-r--r--include/llvm/MC/ConstantPools.h2
-rw-r--r--include/llvm/MC/LaneBitmask.h89
-rw-r--r--include/llvm/MC/MCAsmBackend.h3
-rw-r--r--include/llvm/MC/MCAsmInfo.h51
-rw-r--r--include/llvm/MC/MCAssembler.h4
-rw-r--r--include/llvm/MC/MCCodeView.h113
-rw-r--r--include/llvm/MC/MCContext.h56
-rw-r--r--include/llvm/MC/MCELFStreamer.h2
-rw-r--r--include/llvm/MC/MCExpr.h5
-rw-r--r--include/llvm/MC/MCFixup.h4
-rw-r--r--include/llvm/MC/MCFragment.h21
-rw-r--r--include/llvm/MC/MCInstPrinter.h15
-rw-r--r--include/llvm/MC/MCInstrDesc.h98
-rw-r--r--include/llvm/MC/MCInstrInfo.h4
-rw-r--r--include/llvm/MC/MCInstrItineraries.h2
-rw-r--r--include/llvm/MC/MCObjectFileInfo.h6
-rw-r--r--include/llvm/MC/MCObjectStreamer.h18
-rw-r--r--include/llvm/MC/MCParser/AsmLexer.h4
-rw-r--r--include/llvm/MC/MCParser/MCAsmLexer.h35
-rw-r--r--include/llvm/MC/MCParser/MCAsmParser.h62
-rw-r--r--include/llvm/MC/MCParser/MCAsmParserExtension.h29
-rw-r--r--include/llvm/MC/MCParser/MCTargetAsmParser.h23
-rw-r--r--include/llvm/MC/MCRegisterInfo.h24
-rw-r--r--include/llvm/MC/MCSection.h34
-rw-r--r--include/llvm/MC/MCSectionCOFF.h4
-rw-r--r--include/llvm/MC/MCStreamer.h67
-rw-r--r--include/llvm/MC/MCTargetOptions.h12
-rw-r--r--include/llvm/MC/MCTargetOptionsCommandFlags.h10
-rw-r--r--include/llvm/MC/MCWinCOFFStreamer.h2
-rw-r--r--include/llvm/MC/SectionKind.h9
-rw-r--r--include/llvm/MC/StringTableBuilder.h33
-rw-r--r--include/llvm/Object/Archive.h106
-rw-r--r--include/llvm/Object/ArchiveWriter.h14
-rw-r--r--include/llvm/Object/Binary.h4
-rw-r--r--include/llvm/Object/COFF.h52
-rw-r--r--include/llvm/Object/COFFImportFile.h4
-rw-r--r--include/llvm/Object/ELF.h539
-rw-r--r--include/llvm/Object/ELFObjectFile.h160
-rw-r--r--include/llvm/Object/ELFTypes.h34
-rw-r--r--include/llvm/Object/Error.h1
-rw-r--r--include/llvm/Object/IRObjectFile.h39
-rw-r--r--include/llvm/Object/MachO.h15
-rw-r--r--include/llvm/Object/MachOUniversal.h18
-rw-r--r--include/llvm/Object/ModuleSummaryIndexObjectFile.h19
-rw-r--r--include/llvm/Object/ModuleSymbolTable.h61
-rw-r--r--include/llvm/Object/ObjectFile.h7
-rw-r--r--include/llvm/Object/RelocVisitor.h47
-rw-r--r--include/llvm/Object/StackMapParser.h35
-rw-r--r--include/llvm/Object/SymbolSize.h11
-rw-r--r--include/llvm/Object/SymbolicFile.h13
-rw-r--r--include/llvm/Object/Wasm.h99
-rw-r--r--include/llvm/ObjectYAML/DWARFYAML.h203
-rw-r--r--include/llvm/ObjectYAML/MachOYAML.h9
-rw-r--r--include/llvm/Option/ArgList.h4
-rw-r--r--include/llvm/Pass.h2
-rw-r--r--include/llvm/PassInfo.h16
-rw-r--r--include/llvm/PassSupport.h4
-rw-r--r--include/llvm/Passes/PassBuilder.h95
-rw-r--r--include/llvm/ProfileData/Coverage/CoverageMapping.h58
-rw-r--r--include/llvm/ProfileData/Coverage/CoverageMappingWriter.h4
-rw-r--r--include/llvm/ProfileData/InstrProf.h20
-rw-r--r--include/llvm/ProfileData/InstrProfData.inc7
-rw-r--r--include/llvm/ProfileData/InstrProfWriter.h2
-rw-r--r--include/llvm/ProfileData/ProfileCommon.h19
-rw-r--r--include/llvm/ProfileData/SampleProf.h15
-rw-r--r--include/llvm/Support/AArch64TargetParser.def16
-rw-r--r--include/llvm/Support/ARMBuildAttributes.h1
-rw-r--r--include/llvm/Support/ARMTargetParser.def11
-rw-r--r--include/llvm/Support/AlignOf.h123
-rw-r--r--include/llvm/Support/Allocator.h23
-rw-r--r--include/llvm/Support/ArrayRecycler.h5
-rw-r--r--include/llvm/Support/AtomicOrdering.h48
-rw-r--r--include/llvm/Support/COFF.h7
-rw-r--r--include/llvm/Support/CachePruning.h11
-rw-r--r--include/llvm/Support/Casting.h48
-rw-r--r--include/llvm/Support/Chrono.h55
-rw-r--r--include/llvm/Support/CodeGen.h2
-rw-r--r--include/llvm/Support/CommandLine.h279
-rw-r--r--include/llvm/Support/Compiler.h94
-rw-r--r--include/llvm/Support/Compression.h3
-rw-r--r--include/llvm/Support/ConvertUTF.h24
-rw-r--r--include/llvm/Support/DataExtractor.h2
-rw-r--r--include/llvm/Support/DataStream.h38
-rw-r--r--include/llvm/Support/Debug.h16
-rw-r--r--include/llvm/Support/Dwarf.def444
-rw-r--r--include/llvm/Support/Dwarf.h421
-rw-r--r--include/llvm/Support/ELF.h1402
-rw-r--r--include/llvm/Support/ELFRelocs/AArch64.def338
-rw-r--r--include/llvm/Support/ELFRelocs/AMDGPU.def20
-rw-r--r--include/llvm/Support/ELFRelocs/BPF.def5
-rw-r--r--include/llvm/Support/ELFRelocs/RISCV.def50
-rw-r--r--include/llvm/Support/ELFRelocs/SystemZ.def4
-rw-r--r--include/llvm/Support/Endian.h3
-rw-r--r--include/llvm/Support/Error.h166
-rw-r--r--include/llvm/Support/FileSystem.h38
-rw-r--r--include/llvm/Support/Format.h57
-rw-r--r--include/llvm/Support/FormatAdapters.h93
-rw-r--r--include/llvm/Support/FormatCommon.h69
-rw-r--r--include/llvm/Support/FormatProviders.h413
-rw-r--r--include/llvm/Support/FormatVariadic.h247
-rw-r--r--include/llvm/Support/FormatVariadicDetails.h112
-rw-r--r--include/llvm/Support/GCOV.h34
-rw-r--r--include/llvm/Support/GenericDomTree.h60
-rw-r--r--include/llvm/Support/GenericDomTreeConstruction.h96
-rw-r--r--include/llvm/Support/GlobPattern.h48
-rw-r--r--include/llvm/Support/GraphWriter.h33
-rw-r--r--include/llvm/Support/Host.h7
-rw-r--r--include/llvm/Support/MD5.h4
-rw-r--r--include/llvm/Support/MachO.def10
-rw-r--r--include/llvm/Support/MachO.h215
-rw-r--r--include/llvm/Support/ManagedStatic.h7
-rw-r--r--include/llvm/Support/MathExtras.h144
-rw-r--r--include/llvm/Support/MemoryBuffer.h28
-rw-r--r--include/llvm/Support/MemoryObject.h68
-rw-r--r--include/llvm/Support/NativeFormatting.h49
-rw-r--r--include/llvm/Support/OnDiskHashTable.h3
-rw-r--r--include/llvm/Support/Options.h2
-rw-r--r--include/llvm/Support/Path.h3
-rw-r--r--include/llvm/Support/PointerLikeTypeTraits.h6
-rw-r--r--include/llvm/Support/PrettyStackTrace.h11
-rw-r--r--include/llvm/Support/Printable.h2
-rw-r--r--include/llvm/Support/Process.h11
-rw-r--r--include/llvm/Support/RWMutex.h23
-rw-r--r--include/llvm/Support/RandomNumberGenerator.h24
-rw-r--r--include/llvm/Support/Recycler.h5
-rw-r--r--include/llvm/Support/RecyclingAllocator.h4
-rw-r--r--include/llvm/Support/Regex.h7
-rw-r--r--include/llvm/Support/Registry.h114
-rw-r--r--include/llvm/Support/SHA1.h9
-rw-r--r--include/llvm/Support/SMLoc.h6
-rw-r--r--include/llvm/Support/SourceMgr.h5
-rw-r--r--include/llvm/Support/StreamingMemoryObject.h87
-rw-r--r--include/llvm/Support/StringSaver.h12
-rw-r--r--include/llvm/Support/SwapByteOrder.h3
-rw-r--r--include/llvm/Support/TargetParser.h26
-rw-r--r--include/llvm/Support/TargetRegistry.h24
-rw-r--r--include/llvm/Support/Threading.h7
-rw-r--r--include/llvm/Support/TimeValue.h386
-rw-r--r--include/llvm/Support/Timer.h132
-rw-r--r--include/llvm/Support/TrailingObjects.h62
-rw-r--r--include/llvm/Support/TrigramIndex.h70
-rw-r--r--include/llvm/Support/UnicodeCharRanges.h2
-rw-r--r--include/llvm/Support/Wasm.h87
-rw-r--r--include/llvm/Support/YAMLParser.h13
-rw-r--r--include/llvm/Support/YAMLTraits.h447
-rw-r--r--include/llvm/Support/raw_ostream.h38
-rw-r--r--include/llvm/Support/xxhash.h47
-rw-r--r--include/llvm/TableGen/Record.h287
-rw-r--r--include/llvm/TableGen/SetTheory.h15
-rw-r--r--include/llvm/Target/CostTable.h15
-rw-r--r--include/llvm/Target/GenericOpcodes.td410
-rw-r--r--include/llvm/Target/Target.td54
-rw-r--r--include/llvm/Target/TargetCallingConv.h18
-rw-r--r--include/llvm/Target/TargetFrameLowering.h6
-rw-r--r--include/llvm/Target/TargetGlobalISel.td29
-rw-r--r--include/llvm/Target/TargetInstrInfo.h217
-rw-r--r--include/llvm/Target/TargetIntrinsicInfo.h7
-rw-r--r--include/llvm/Target/TargetItinerary.td2
-rw-r--r--include/llvm/Target/TargetLowering.h247
-rw-r--r--include/llvm/Target/TargetLoweringObjectFile.h74
-rw-r--r--include/llvm/Target/TargetMachine.h21
-rw-r--r--include/llvm/Target/TargetOpcodes.def262
-rw-r--r--include/llvm/Target/TargetOpcodes.h7
-rw-r--r--include/llvm/Target/TargetOptions.h37
-rw-r--r--include/llvm/Target/TargetRecip.h74
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h66
-rw-r--r--include/llvm/Target/TargetSelectionDAG.td10
-rw-r--r--include/llvm/Target/TargetSubtargetInfo.h23
-rw-r--r--include/llvm/Transforms/Coroutines.h38
-rw-r--r--include/llvm/Transforms/GCOVProfiler.h2
-rw-r--r--include/llvm/Transforms/IPO.h22
-rw-r--r--include/llvm/Transforms/IPO/AlwaysInliner.h40
-rw-r--r--include/llvm/Transforms/IPO/CrossDSOCFI.h2
-rw-r--r--include/llvm/Transforms/IPO/FunctionAttrs.h5
-rw-r--r--include/llvm/Transforms/IPO/FunctionImport.h28
-rw-r--r--include/llvm/Transforms/IPO/GlobalOpt.h2
-rw-r--r--include/llvm/Transforms/IPO/GlobalSplit.h30
-rw-r--r--include/llvm/Transforms/IPO/InferFunctionAttrs.h2
-rw-r--r--include/llvm/Transforms/IPO/Inliner.h108
-rw-r--r--include/llvm/Transforms/IPO/InlinerPass.h94
-rw-r--r--include/llvm/Transforms/IPO/Internalize.h2
-rw-r--r--include/llvm/Transforms/IPO/LowerTypeTests.h2
-rw-r--r--include/llvm/Transforms/IPO/PartialInlining.h3
-rw-r--r--include/llvm/Transforms/IPO/PassManagerBuilder.h13
-rw-r--r--include/llvm/Transforms/IPO/SCCP.h2
-rw-r--r--include/llvm/Transforms/IPO/WholeProgramDevirt.h5
-rw-r--r--include/llvm/Transforms/InstCombine/InstCombine.h13
-rw-r--r--include/llvm/Transforms/InstCombine/InstCombineWorklist.h16
-rw-r--r--include/llvm/Transforms/InstrProfiling.h11
-rw-r--r--include/llvm/Transforms/Instrumentation.h9
-rw-r--r--include/llvm/Transforms/PGOInstrumentation.h6
-rw-r--r--include/llvm/Transforms/SampleProfile.h2
-rw-r--r--include/llvm/Transforms/Scalar.h41
-rw-r--r--include/llvm/Transforms/Scalar/DCE.h2
-rw-r--r--include/llvm/Transforms/Scalar/DeadStoreElimination.h2
-rw-r--r--include/llvm/Transforms/Scalar/EarlyCSE.h6
-rw-r--r--include/llvm/Transforms/Scalar/GVN.h10
-rw-r--r--include/llvm/Transforms/Scalar/GVNExpression.h605
-rw-r--r--include/llvm/Transforms/Scalar/GuardWidening.h2
-rw-r--r--include/llvm/Transforms/Scalar/IndVarSimplify.h3
-rw-r--r--include/llvm/Transforms/Scalar/JumpThreading.h11
-rw-r--r--include/llvm/Transforms/Scalar/LICM.h3
-rw-r--r--include/llvm/Transforms/Scalar/LoopDataPrefetch.h31
-rw-r--r--include/llvm/Transforms/Scalar/LoopDeletion.h3
-rw-r--r--include/llvm/Transforms/Scalar/LoopIdiomRecognize.h3
-rw-r--r--include/llvm/Transforms/Scalar/LoopInstSimplify.h3
-rw-r--r--include/llvm/Transforms/Scalar/LoopRotation.h8
-rw-r--r--include/llvm/Transforms/Scalar/LoopSimplifyCFG.h3
-rw-r--r--include/llvm/Transforms/Scalar/LoopStrengthReduce.h38
-rw-r--r--include/llvm/Transforms/Scalar/LoopUnrollPass.h30
-rw-r--r--include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h28
-rw-r--r--include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h2
-rw-r--r--include/llvm/Transforms/Scalar/NaryReassociate.h174
-rw-r--r--include/llvm/Transforms/Scalar/NewGVN.h28
-rw-r--r--include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h2
-rw-r--r--include/llvm/Transforms/Scalar/Reassociate.h2
-rw-r--r--include/llvm/Transforms/Scalar/SCCP.h2
-rw-r--r--include/llvm/Transforms/Scalar/SROA.h2
-rw-r--r--include/llvm/Transforms/Scalar/SimplifyCFG.h2
-rw-r--r--include/llvm/Transforms/Scalar/Sink.h2
-rw-r--r--include/llvm/Transforms/Scalar/SpeculativeExecution.h92
-rw-r--r--include/llvm/Transforms/Utils/ASanStackFrameLayout.h49
-rw-r--r--include/llvm/Transforms/Utils/AddDiscriminators.h2
-rw-r--r--include/llvm/Transforms/Utils/BasicBlockUtils.h15
-rw-r--r--include/llvm/Transforms/Utils/BreakCriticalEdges.h29
-rw-r--r--include/llvm/Transforms/Utils/Cloning.h20
-rw-r--r--include/llvm/Transforms/Utils/CmpInstAnalysis.h27
-rw-r--r--include/llvm/Transforms/Utils/CodeExtractor.h30
-rw-r--r--include/llvm/Transforms/Utils/EscapeEnumerator.h49
-rw-r--r--include/llvm/Transforms/Utils/FunctionComparator.h376
-rw-r--r--include/llvm/Transforms/Utils/FunctionImportUtils.h12
-rw-r--r--include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h107
-rw-r--r--include/llvm/Transforms/Utils/LCSSA.h2
-rw-r--r--include/llvm/Transforms/Utils/LibCallsShrinkWrap.h27
-rw-r--r--include/llvm/Transforms/Utils/Local.h46
-rw-r--r--include/llvm/Transforms/Utils/LoopSimplify.h2
-rw-r--r--include/llvm/Transforms/Utils/LoopUtils.h76
-rw-r--r--include/llvm/Transforms/Utils/LowerInvoke.h30
-rw-r--r--include/llvm/Transforms/Utils/Mem2Reg.h2
-rw-r--r--include/llvm/Transforms/Utils/MemorySSA.h189
-rw-r--r--include/llvm/Transforms/Utils/ModuleUtils.h26
-rw-r--r--include/llvm/Transforms/Utils/NameAnonGlobals.h31
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdaterImpl.h5
-rw-r--r--include/llvm/Transforms/Utils/SimplifyInstructions.h2
-rw-r--r--include/llvm/Transforms/Utils/SimplifyLibCalls.h2
-rw-r--r--include/llvm/Transforms/Utils/SymbolRewriter.h57
-rw-r--r--include/llvm/Transforms/Utils/UnrollLoop.h16
-rw-r--r--include/llvm/Transforms/Vectorize/LoopVectorize.h5
-rw-r--r--include/llvm/Transforms/Vectorize/SLPVectorizer.h15
-rw-r--r--include/llvm/module.modulemap25
-rw-r--r--include/llvm/module.modulemap.build4
660 files changed, 35522 insertions, 19351 deletions
diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h
index 76f8b31580ac..7f5c05d21e65 100644
--- a/include/llvm-c/Core.h
+++ b/include/llvm-c/Core.h
@@ -55,51 +55,6 @@ extern "C" {
*/
typedef enum {
- LLVMZExtAttribute = 1<<0,
- LLVMSExtAttribute = 1<<1,
- LLVMNoReturnAttribute = 1<<2,
- LLVMInRegAttribute = 1<<3,
- LLVMStructRetAttribute = 1<<4,
- LLVMNoUnwindAttribute = 1<<5,
- LLVMNoAliasAttribute = 1<<6,
- LLVMByValAttribute = 1<<7,
- LLVMNestAttribute = 1<<8,
- LLVMReadNoneAttribute = 1<<9,
- LLVMReadOnlyAttribute = 1<<10,
- LLVMNoInlineAttribute = 1<<11,
- LLVMAlwaysInlineAttribute = 1<<12,
- LLVMOptimizeForSizeAttribute = 1<<13,
- LLVMStackProtectAttribute = 1<<14,
- LLVMStackProtectReqAttribute = 1<<15,
- LLVMAlignment = 31<<16,
- LLVMNoCaptureAttribute = 1<<21,
- LLVMNoRedZoneAttribute = 1<<22,
- LLVMNoImplicitFloatAttribute = 1<<23,
- LLVMNakedAttribute = 1<<24,
- LLVMInlineHintAttribute = 1<<25,
- LLVMStackAlignment = 7<<26,
- LLVMReturnsTwice = 1 << 29,
- LLVMUWTable = 1 << 30,
- LLVMNonLazyBind = 1 << 31
-
- /* FIXME: These attributes are currently not included in the C API as
- a temporary measure until the API/ABI impact to the C API is understood
- and the path forward agreed upon.
- LLVMSanitizeAddressAttribute = 1ULL << 32,
- LLVMStackProtectStrongAttribute = 1ULL<<35,
- LLVMColdAttribute = 1ULL << 40,
- LLVMOptimizeNoneAttribute = 1ULL << 42,
- LLVMInAllocaAttribute = 1ULL << 43,
- LLVMNonNullAttribute = 1ULL << 44,
- LLVMJumpTableAttribute = 1ULL << 45,
- LLVMConvergentAttribute = 1ULL << 46,
- LLVMSafeStackAttribute = 1ULL << 47,
- LLVMSwiftSelfAttribute = 1ULL << 48,
- LLVMSwiftErrorAttribute = 1ULL << 49,
- */
-} LLVMAttribute;
-
-typedef enum {
/* Terminator Instructions */
LLVMRet = 1,
LLVMBr = 2,
@@ -1752,6 +1707,7 @@ LLVMValueRef LLVMConstNSWMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant)
LLVMValueRef LLVMConstNUWMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstFMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstExactUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstExactSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstFDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
@@ -2010,8 +1966,6 @@ void LLVMSetGC(LLVMValueRef Fn, const char *Name);
*
* @see llvm::Function::addAttribute()
*/
-void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA);
-
void LLVMAddAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
LLVMAttributeRef A);
unsigned LLVMGetAttributeCountAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx);
@@ -2036,18 +1990,6 @@ void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A,
const char *V);
/**
- * Obtain an attribute from a function.
- *
- * @see llvm::Function::getAttributes()
- */
-LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn);
-
-/**
- * Remove an attribute from a function.
- */
-void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA);
-
-/**
* @defgroup LLVMCCoreValueFunctionParameters Function Parameters
*
* Functions in this group relate to arguments/parameters on functions.
@@ -2129,25 +2071,6 @@ LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg);
LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg);
/**
- * Add an attribute to a function argument.
- *
- * @see llvm::Argument::addAttr()
- */
-void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA);
-
-/**
- * Remove an attribute from a function argument.
- *
- * @see llvm::Argument::removeAttr()
- */
-void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA);
-
-/**
- * Get an attribute from a function argument.
- */
-LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg);
-
-/**
* Set the alignment for a function parameter.
*
* @see llvm::Argument::addAttr()
@@ -2595,9 +2518,6 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC);
*/
unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr);
-void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute);
-void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
- LLVMAttribute);
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned Align);
@@ -2962,6 +2882,8 @@ LLVMValueRef LLVMBuildFMul(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
const char *Name);
LLVMValueRef LLVMBuildUDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
const char *Name);
+LLVMValueRef LLVMBuildExactUDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
LLVMValueRef LLVMBuildSDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
const char *Name);
LLVMValueRef LLVMBuildExactSDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
diff --git a/include/llvm-c/Transforms/Scalar.h b/include/llvm-c/Transforms/Scalar.h
index b8a09984aa4d..8991e0904849 100644
--- a/include/llvm-c/Transforms/Scalar.h
+++ b/include/llvm-c/Transforms/Scalar.h
@@ -56,6 +56,9 @@ void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM);
/** See llvm::createGVNPass function. */
void LLVMAddGVNPass(LLVMPassManagerRef PM);
+/** See llvm::createGVNPass function. */
+void LLVMAddNewGVNPass(LLVMPassManagerRef PM);
+
/** See llvm::createIndVarSimplifyPass function. */
void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM);
@@ -135,6 +138,9 @@ void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM);
/** See llvm::createEarlyCSEPass function */
void LLVMAddEarlyCSEPass(LLVMPassManagerRef PM);
+/** See llvm::createEarlyCSEPass function */
+void LLVMAddEarlyCSEMemSSAPass(LLVMPassManagerRef PM);
+
/** See llvm::createLowerExpectIntrinsicPass function */
void LLVMAddLowerExpectIntrinsicPass(LLVMPassManagerRef PM);
diff --git a/include/llvm-c/lto.h b/include/llvm-c/lto.h
index b1f5a45d6650..c3af74cdedab 100644
--- a/include/llvm-c/lto.h
+++ b/include/llvm-c/lto.h
@@ -44,7 +44,7 @@ typedef bool lto_bool_t;
* @{
*/
-#define LTO_API_VERSION 20
+#define LTO_API_VERSION 21
/**
* \since prior to LTO_API_VERSION=3
@@ -145,10 +145,10 @@ extern lto_bool_t
lto_module_has_objc_category(const void *mem, size_t length);
/**
-* Checks if a buffer is a loadable object file.
-*
-* \since prior to LTO_API_VERSION=3
-*/
+ * Checks if a buffer is a loadable object file.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
extern lto_bool_t lto_module_is_object_file_in_memory(const void *mem,
size_t length);
@@ -637,6 +637,29 @@ extern LTOObjectBuffer thinlto_module_get_object(thinlto_code_gen_t cg,
unsigned int index);
/**
+ * Returns the number of object files produced by the ThinLTO CodeGenerator.
+ *
+ * It usually matches the number of input files, but this is not a guarantee of
+ * the API and may change in future implementation, so the client should not
+ * assume it.
+ *
+ * \since LTO_API_VERSION=21
+ */
+unsigned int thinlto_module_get_num_object_files(thinlto_code_gen_t cg);
+
+/**
+ * Returns the path to the ith object file produced by the ThinLTO
+ * CodeGenerator.
+ *
+ * Client should use \p thinlto_module_get_num_object_files() to get the number
+ * of available objects.
+ *
+ * \since LTO_API_VERSION=21
+ */
+const char *thinlto_module_get_object_file(thinlto_code_gen_t cg,
+ unsigned int index);
+
+/**
* Sets which PIC code model to generate.
* Returns true on error (check lto_get_error_message() for details).
*
@@ -725,6 +748,17 @@ extern void thinlto_codegen_set_savetemps_dir(thinlto_code_gen_t cg,
const char *save_temps_dir);
/**
+ * Set the path to a directory where to save generated object files. This
+ * path can be used by a linker to request on-disk files instead of in-memory
+ * buffers. When set, results are available through
+ * thinlto_module_get_object_file() instead of thinlto_module_get_object().
+ *
+ * \since LTO_API_VERSION=21
+ */
+void thinlto_set_generated_objects_dir(thinlto_code_gen_t cg,
+ const char *save_temps_dir);
+
+/**
* Sets the cpu to generate code for.
*
* \since LTO_API_VERSION=18
diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h
index 3f6bd00a779c..00304230a991 100644
--- a/include/llvm/ADT/APFloat.h
+++ b/include/llvm/ADT/APFloat.h
@@ -18,12 +18,16 @@
#define LLVM_ADT_APFLOAT_H
#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
namespace llvm {
struct fltSemantics;
class APSInt;
class StringRef;
+class APFloat;
+class raw_ostream;
template <typename T> class SmallVectorImpl;
@@ -121,33 +125,30 @@ enum lostFraction { // Example of truncated bits:
///
/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
///
-class APFloat {
-public:
+// This is the common type definitions shared by APFloat and its internal
+// implementation classes. This struct should not define any non-static data
+// members.
+struct APFloatBase {
/// A signed type to represent a floating point numbers unbiased exponent.
typedef signed short ExponentType;
/// \name Floating Point Semantics.
/// @{
- static const fltSemantics IEEEhalf;
- static const fltSemantics IEEEsingle;
- static const fltSemantics IEEEdouble;
- static const fltSemantics IEEEquad;
- static const fltSemantics PPCDoubleDouble;
- static const fltSemantics x87DoubleExtended;
+ static const fltSemantics &IEEEhalf();
+ static const fltSemantics &IEEEsingle();
+ static const fltSemantics &IEEEdouble();
+ static const fltSemantics &IEEEquad();
+ static const fltSemantics &PPCDoubleDouble();
+ static const fltSemantics &x87DoubleExtended();
/// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
/// anything real.
- static const fltSemantics Bogus;
+ static const fltSemantics &Bogus();
/// @}
- static unsigned int semanticsPrecision(const fltSemantics &);
- static ExponentType semanticsMinExponent(const fltSemantics &);
- static ExponentType semanticsMaxExponent(const fltSemantics &);
- static unsigned int semanticsSizeInBits(const fltSemantics &);
-
/// IEEE-754R 5.11: Floating Point Comparison Relations.
enum cmpResult {
cmpLessThan,
@@ -190,19 +191,39 @@ public:
uninitialized
};
+ /// \brief Enumeration of \c ilogb error results.
+ enum IlogbErrorKinds {
+ IEK_Zero = INT_MIN + 1,
+ IEK_NaN = INT_MIN,
+ IEK_Inf = INT_MAX
+ };
+
+ static unsigned int semanticsPrecision(const fltSemantics &);
+ static ExponentType semanticsMinExponent(const fltSemantics &);
+ static ExponentType semanticsMaxExponent(const fltSemantics &);
+ static unsigned int semanticsSizeInBits(const fltSemantics &);
+
+ /// Returns the size of the floating point number (in bits) in the given
+ /// semantics.
+ static unsigned getSizeInBits(const fltSemantics &Sem);
+};
+
+namespace detail {
+
+class IEEEFloat final : public APFloatBase {
+public:
/// \name Constructors
/// @{
- APFloat(const fltSemantics &); // Default construct to 0.0
- APFloat(const fltSemantics &, StringRef);
- APFloat(const fltSemantics &, integerPart);
- APFloat(const fltSemantics &, uninitializedTag);
- APFloat(const fltSemantics &, const APInt &);
- explicit APFloat(double d);
- explicit APFloat(float f);
- APFloat(const APFloat &);
- APFloat(APFloat &&);
- ~APFloat();
+ IEEEFloat(const fltSemantics &); // Default construct to 0.0
+ IEEEFloat(const fltSemantics &, integerPart);
+ IEEEFloat(const fltSemantics &, uninitializedTag);
+ IEEEFloat(const fltSemantics &, const APInt &);
+ explicit IEEEFloat(double d);
+ explicit IEEEFloat(float f);
+ IEEEFloat(const IEEEFloat &);
+ IEEEFloat(IEEEFloat &&);
+ ~IEEEFloat();
/// @}
@@ -212,79 +233,6 @@ public:
/// \name Convenience "constructors"
/// @{
- /// Factory for Positive and Negative Zero.
- ///
- /// \param Negative True iff the number should be negative.
- static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
- APFloat Val(Sem, uninitialized);
- Val.makeZero(Negative);
- return Val;
- }
-
- /// Factory for Positive and Negative Infinity.
- ///
- /// \param Negative True iff the number should be negative.
- static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
- APFloat Val(Sem, uninitialized);
- Val.makeInf(Negative);
- return Val;
- }
-
- /// Factory for QNaN values.
- ///
- /// \param Negative - True iff the NaN generated should be negative.
- /// \param type - The unspecified fill bits for creating the NaN, 0 by
- /// default. The value is truncated as necessary.
- static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
- unsigned type = 0) {
- if (type) {
- APInt fill(64, type);
- return getQNaN(Sem, Negative, &fill);
- } else {
- return getQNaN(Sem, Negative, nullptr);
- }
- }
-
- /// Factory for QNaN values.
- static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
- const APInt *payload = nullptr) {
- return makeNaN(Sem, false, Negative, payload);
- }
-
- /// Factory for SNaN values.
- static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
- const APInt *payload = nullptr) {
- return makeNaN(Sem, true, Negative, payload);
- }
-
- /// Returns the largest finite number in the given semantics.
- ///
- /// \param Negative - True iff the number should be negative
- static APFloat getLargest(const fltSemantics &Sem, bool Negative = false);
-
- /// Returns the smallest (by magnitude) finite number in the given semantics.
- /// Might be denormalized, which implies a relative loss of precision.
- ///
- /// \param Negative - True iff the number should be negative
- static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false);
-
- /// Returns the smallest (by magnitude) normalized finite number in the given
- /// semantics.
- ///
- /// \param Negative - True iff the number should be negative
- static APFloat getSmallestNormalized(const fltSemantics &Sem,
- bool Negative = false);
-
- /// Returns a float which is bitcasted from an all one value int.
- ///
- /// \param BitWidth - Select float type
- /// \param isIEEE - If 128 bit number, select between PPC and IEEE
- static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
-
- /// Returns the size of the floating point number (in bits) in the given
- /// semantics.
- static unsigned getSizeInBits(const fltSemantics &Sem);
-
/// @}
/// Used to insert APFloat objects, or objects that contain APFloat objects,
@@ -294,47 +242,47 @@ public:
/// \name Arithmetic
/// @{
- opStatus add(const APFloat &, roundingMode);
- opStatus subtract(const APFloat &, roundingMode);
- opStatus multiply(const APFloat &, roundingMode);
- opStatus divide(const APFloat &, roundingMode);
+ opStatus add(const IEEEFloat &, roundingMode);
+ opStatus subtract(const IEEEFloat &, roundingMode);
+ opStatus multiply(const IEEEFloat &, roundingMode);
+ opStatus divide(const IEEEFloat &, roundingMode);
/// IEEE remainder.
- opStatus remainder(const APFloat &);
+ opStatus remainder(const IEEEFloat &);
/// C fmod, or llvm frem.
- opStatus mod(const APFloat &);
- opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
+ opStatus mod(const IEEEFloat &);
+ opStatus fusedMultiplyAdd(const IEEEFloat &, const IEEEFloat &, roundingMode);
opStatus roundToIntegral(roundingMode);
/// IEEE-754R 5.3.1: nextUp/nextDown.
opStatus next(bool nextDown);
/// \brief Operator+ overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
- APFloat operator+(const APFloat &RHS) const {
- APFloat Result = *this;
+ IEEEFloat operator+(const IEEEFloat &RHS) const {
+ IEEEFloat Result = *this;
Result.add(RHS, rmNearestTiesToEven);
return Result;
}
/// \brief Operator- overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
- APFloat operator-(const APFloat &RHS) const {
- APFloat Result = *this;
+ IEEEFloat operator-(const IEEEFloat &RHS) const {
+ IEEEFloat Result = *this;
Result.subtract(RHS, rmNearestTiesToEven);
return Result;
}
/// \brief Operator* overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
- APFloat operator*(const APFloat &RHS) const {
- APFloat Result = *this;
+ IEEEFloat operator*(const IEEEFloat &RHS) const {
+ IEEEFloat Result = *this;
Result.multiply(RHS, rmNearestTiesToEven);
return Result;
}
/// \brief Operator/ overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
- APFloat operator/(const APFloat &RHS) const {
- APFloat Result = *this;
+ IEEEFloat operator/(const IEEEFloat &RHS) const {
+ IEEEFloat Result = *this;
Result.divide(RHS, rmNearestTiesToEven);
return Result;
}
@@ -346,11 +294,11 @@ public:
void changeSign();
void clearSign();
- void copySign(const APFloat &);
+ void copySign(const IEEEFloat &);
/// \brief A static helper to produce a copy of an APFloat value with its sign
/// copied from some other APFloat.
- static APFloat copySign(APFloat Value, const APFloat &Sign) {
+ static IEEEFloat copySign(IEEEFloat Value, const IEEEFloat &Sign) {
Value.copySign(Sign);
return Value;
}
@@ -379,14 +327,14 @@ public:
/// The definition of equality is not straightforward for floating point, so
/// we won't use operator==. Use one of the following, or write whatever it
/// is you really mean.
- bool operator==(const APFloat &) const = delete;
+ bool operator==(const IEEEFloat &) const = delete;
/// IEEE comparison with another floating point number (NaNs compare
/// unordered, 0==-0).
- cmpResult compare(const APFloat &) const;
+ cmpResult compare(const IEEEFloat &) const;
/// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
- bool bitwiseIsEqual(const APFloat &) const;
+ bool bitwiseIsEqual(const IEEEFloat &) const;
/// Write out a hexadecimal representation of the floating point value to DST,
/// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
@@ -456,8 +404,8 @@ public:
/// @}
- APFloat &operator=(const APFloat &);
- APFloat &operator=(APFloat &&);
+ IEEEFloat &operator=(const IEEEFloat &);
+ IEEEFloat &operator=(IEEEFloat &&);
/// \brief Overload to compute a hash code for an APFloat value.
///
@@ -468,7 +416,7 @@ public:
/// emphasizes producing different codes for different inputs in order to
/// be used in canonicalization and memoization. As such, equality is
/// bitwiseIsEqual, and 0 != -0.
- friend hash_code hash_value(const APFloat &Arg);
+ friend hash_code hash_value(const IEEEFloat &Arg);
/// Converts this value into a decimal string.
///
@@ -495,14 +443,7 @@ public:
/// If this value has an exact multiplicative inverse, store it in inv and
/// return true.
- bool getExactInverse(APFloat *inv) const;
-
- /// \brief Enumeration of \c ilogb error results.
- enum IlogbErrorKinds {
- IEK_Zero = INT_MIN+1,
- IEK_NaN = INT_MIN,
- IEK_Inf = INT_MAX
- };
+ bool getExactInverse(IEEEFloat *inv) const;
/// \brief Returns the exponent of the internal representation of the APFloat.
///
@@ -513,15 +454,35 @@ public:
/// 0 -> \c IEK_Zero
/// Inf -> \c IEK_Inf
///
- friend int ilogb(const APFloat &Arg);
+ friend int ilogb(const IEEEFloat &Arg);
/// \brief Returns: X * 2^Exp for integral exponents.
- friend APFloat scalbn(APFloat X, int Exp, roundingMode);
+ friend IEEEFloat scalbn(IEEEFloat X, int Exp, roundingMode);
- friend APFloat frexp(const APFloat &X, int &Exp, roundingMode);
+ friend IEEEFloat frexp(const IEEEFloat &X, int &Exp, roundingMode);
-private:
+ /// \name Special value setters.
+ /// @{
+
+ void makeLargest(bool Neg = false);
+ void makeSmallest(bool Neg = false);
+ void makeNaN(bool SNaN = false, bool Neg = false,
+ const APInt *fill = nullptr);
+ void makeInf(bool Neg = false);
+ void makeZero(bool Neg = false);
+ void makeQuiet();
+ /// Returns the smallest (by magnitude) normalized finite number in the given
+ /// semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ void makeSmallestNormalized(bool Negative = false);
+
+ /// @}
+
+ cmpResult compareAbsoluteValue(const IEEEFloat &) const;
+
+private:
/// \name Simple Queries
/// @{
@@ -534,11 +495,11 @@ private:
/// \name Significand operations.
/// @{
- integerPart addSignificand(const APFloat &);
- integerPart subtractSignificand(const APFloat &, integerPart);
- lostFraction addOrSubtractSignificand(const APFloat &, bool subtract);
- lostFraction multiplySignificand(const APFloat &, const APFloat *);
- lostFraction divideSignificand(const APFloat &);
+ integerPart addSignificand(const IEEEFloat &);
+ integerPart subtractSignificand(const IEEEFloat &, integerPart);
+ lostFraction addOrSubtractSignificand(const IEEEFloat &, bool subtract);
+ lostFraction multiplySignificand(const IEEEFloat &, const IEEEFloat *);
+ lostFraction divideSignificand(const IEEEFloat &);
void incrementSignificand();
void initialize(const fltSemantics *);
void shiftSignificandLeft(unsigned int);
@@ -556,25 +517,10 @@ private:
/// \name Arithmetic on special values.
/// @{
- opStatus addOrSubtractSpecials(const APFloat &, bool subtract);
- opStatus divideSpecials(const APFloat &);
- opStatus multiplySpecials(const APFloat &);
- opStatus modSpecials(const APFloat &);
-
- /// @}
-
- /// \name Special value setters.
- /// @{
-
- void makeLargest(bool Neg = false);
- void makeSmallest(bool Neg = false);
- void makeNaN(bool SNaN = false, bool Neg = false,
- const APInt *fill = nullptr);
- static APFloat makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative,
- const APInt *fill);
- void makeInf(bool Neg = false);
- void makeZero(bool Neg = false);
- void makeQuiet();
+ opStatus addOrSubtractSpecials(const IEEEFloat &, bool subtract);
+ opStatus divideSpecials(const IEEEFloat &);
+ opStatus multiplySpecials(const IEEEFloat &);
+ opStatus modSpecials(const IEEEFloat &);
/// @}
@@ -583,8 +529,7 @@ private:
bool convertFromStringSpecials(StringRef str);
opStatus normalize(roundingMode, lostFraction);
- opStatus addOrSubtract(const APFloat &, roundingMode, bool subtract);
- cmpResult compareAbsoluteValue(const APFloat &) const;
+ opStatus addOrSubtract(const IEEEFloat &, roundingMode, bool subtract);
opStatus handleOverflow(roundingMode);
bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
opStatus convertToSignExtendedInteger(integerPart *, unsigned int, bool,
@@ -614,10 +559,11 @@ private:
void initFromF80LongDoubleAPInt(const APInt &api);
void initFromPPCDoubleDoubleAPInt(const APInt &api);
- void assign(const APFloat &);
- void copySignificand(const APFloat &);
+ void assign(const IEEEFloat &);
+ void copySignificand(const IEEEFloat &);
void freeSignificand();
+ /// Note: this must be the first data member.
/// The semantics that this value obeys.
const fltSemantics *semantics;
@@ -642,20 +588,513 @@ private:
unsigned int sign : 1;
};
+hash_code hash_value(const IEEEFloat &Arg);
+int ilogb(const IEEEFloat &Arg);
+IEEEFloat scalbn(IEEEFloat X, int Exp, IEEEFloat::roundingMode);
+IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM);
+
+// This mode implements more precise float in terms of two APFloats.
+// The interface and layout is designed for arbitray underlying semantics,
+// though currently only PPCDoubleDouble semantics are supported, whose
+// corresponding underlying semantics are IEEEdouble.
+class DoubleAPFloat final : public APFloatBase {
+ // Note: this must be the first data member.
+ const fltSemantics *Semantics;
+ std::unique_ptr<APFloat[]> Floats;
+
+ opStatus addImpl(const APFloat &a, const APFloat &aa, const APFloat &c,
+ const APFloat &cc, roundingMode RM);
+
+ opStatus addWithSpecial(const DoubleAPFloat &LHS, const DoubleAPFloat &RHS,
+ DoubleAPFloat &Out, roundingMode RM);
+
+public:
+ DoubleAPFloat(const fltSemantics &S);
+ DoubleAPFloat(const fltSemantics &S, uninitializedTag);
+ DoubleAPFloat(const fltSemantics &S, integerPart);
+ DoubleAPFloat(const fltSemantics &S, const APInt &I);
+ DoubleAPFloat(const fltSemantics &S, APFloat &&First, APFloat &&Second);
+ DoubleAPFloat(const DoubleAPFloat &RHS);
+ DoubleAPFloat(DoubleAPFloat &&RHS);
+
+ DoubleAPFloat &operator=(const DoubleAPFloat &RHS);
+
+ DoubleAPFloat &operator=(DoubleAPFloat &&RHS) {
+ if (this != &RHS) {
+ this->~DoubleAPFloat();
+ new (this) DoubleAPFloat(std::move(RHS));
+ }
+ return *this;
+ }
+
+ bool needsCleanup() const { return Floats != nullptr; }
+
+ APFloat &getFirst() { return Floats[0]; }
+ const APFloat &getFirst() const { return Floats[0]; }
+ APFloat &getSecond() { return Floats[1]; }
+ const APFloat &getSecond() const { return Floats[1]; }
+
+ opStatus add(const DoubleAPFloat &RHS, roundingMode RM);
+ opStatus subtract(const DoubleAPFloat &RHS, roundingMode RM);
+ void changeSign();
+ cmpResult compareAbsoluteValue(const DoubleAPFloat &RHS) const;
+
+ fltCategory getCategory() const;
+ bool isNegative() const;
+
+ void makeInf(bool Neg);
+ void makeNaN(bool SNaN, bool Neg, const APInt *fill);
+};
+
+} // End detail namespace
+
+// This is a interface class that is currently forwarding functionalities from
+// detail::IEEEFloat.
+class APFloat : public APFloatBase {
+ typedef detail::IEEEFloat IEEEFloat;
+ typedef detail::DoubleAPFloat DoubleAPFloat;
+
+ static_assert(std::is_standard_layout<IEEEFloat>::value, "");
+
+ union Storage {
+ const fltSemantics *semantics;
+ IEEEFloat IEEE;
+ DoubleAPFloat Double;
+
+ explicit Storage(IEEEFloat F, const fltSemantics &S);
+ explicit Storage(DoubleAPFloat F, const fltSemantics &S)
+ : Double(std::move(F)) {
+ assert(&S == &PPCDoubleDouble());
+ }
+
+ template <typename... ArgTypes>
+ Storage(const fltSemantics &Semantics, ArgTypes &&... Args) {
+ if (usesLayout<IEEEFloat>(Semantics)) {
+ new (&IEEE) IEEEFloat(Semantics, std::forward<ArgTypes>(Args)...);
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(Semantics)) {
+ new (&Double) DoubleAPFloat(Semantics, std::forward<ArgTypes>(Args)...);
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ ~Storage() {
+ if (usesLayout<IEEEFloat>(*semantics)) {
+ IEEE.~IEEEFloat();
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*semantics)) {
+ Double.~DoubleAPFloat();
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ Storage(const Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(RHS.IEEE);
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(RHS.Double);
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ Storage(Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(std::move(RHS.IEEE));
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(std::move(RHS.Double));
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ Storage &operator=(const Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = RHS.IEEE;
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = RHS.Double;
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(RHS);
+ }
+ return *this;
+ }
+
+ Storage &operator=(Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = std::move(RHS.IEEE);
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = std::move(RHS.Double);
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(std::move(RHS));
+ }
+ return *this;
+ }
+ } U;
+
+ template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
+ static_assert(std::is_same<T, IEEEFloat>::value ||
+ std::is_same<T, DoubleAPFloat>::value, "");
+ if (std::is_same<T, DoubleAPFloat>::value) {
+ return &Semantics == &PPCDoubleDouble();
+ }
+ return &Semantics != &PPCDoubleDouble();
+ }
+
+ IEEEFloat &getIEEE() {
+ if (usesLayout<IEEEFloat>(*U.semantics))
+ return U.IEEE;
+ if (usesLayout<DoubleAPFloat>(*U.semantics))
+ return U.Double.getFirst().U.IEEE;
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ const IEEEFloat &getIEEE() const {
+ if (usesLayout<IEEEFloat>(*U.semantics))
+ return U.IEEE;
+ if (usesLayout<DoubleAPFloat>(*U.semantics))
+ return U.Double.getFirst().U.IEEE;
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ void makeZero(bool Neg) { getIEEE().makeZero(Neg); }
+
+ void makeInf(bool Neg) {
+ if (usesLayout<IEEEFloat>(*U.semantics))
+ return U.IEEE.makeInf(Neg);
+ if (usesLayout<DoubleAPFloat>(*U.semantics))
+ return U.Double.makeInf(Neg);
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ void makeNaN(bool SNaN, bool Neg, const APInt *fill) {
+ getIEEE().makeNaN(SNaN, Neg, fill);
+ }
+
+ void makeLargest(bool Neg) { getIEEE().makeLargest(Neg); }
+
+ void makeSmallest(bool Neg) { getIEEE().makeSmallest(Neg); }
+
+ void makeSmallestNormalized(bool Neg) {
+ getIEEE().makeSmallestNormalized(Neg);
+ }
+
+ // FIXME: This is due to clang 3.3 (or older version) always checks for the
+ // default constructor in an array aggregate initialization, even if no
+ // elements in the array is default initialized.
+ APFloat() : U(IEEEdouble()) {
+ llvm_unreachable("This is a workaround for old clang.");
+ }
+
+ explicit APFloat(IEEEFloat F, const fltSemantics &S) : U(std::move(F), S) {}
+ explicit APFloat(DoubleAPFloat F, const fltSemantics &S)
+ : U(std::move(F), S) {}
+
+ cmpResult compareAbsoluteValue(const APFloat &RHS) const {
+ assert(&getSemantics() == &RHS.getSemantics());
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.compareAbsoluteValue(RHS.U.IEEE);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.compareAbsoluteValue(RHS.U.Double);
+ llvm_unreachable("Unexpected semantics");
+ }
+
+public:
+ APFloat(const fltSemantics &Semantics) : U(Semantics) {}
+ APFloat(const fltSemantics &Semantics, StringRef S);
+ APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {}
+ // TODO: Remove this constructor. This isn't faster than the first one.
+ APFloat(const fltSemantics &Semantics, uninitializedTag)
+ : U(Semantics, uninitialized) {}
+ APFloat(const fltSemantics &Semantics, const APInt &I) : U(Semantics, I) {}
+ explicit APFloat(double d) : U(IEEEFloat(d), IEEEdouble()) {}
+ explicit APFloat(float f) : U(IEEEFloat(f), IEEEsingle()) {}
+ APFloat(const APFloat &RHS) = default;
+ APFloat(APFloat &&RHS) = default;
+
+ ~APFloat() = default;
+
+ bool needsCleanup() const {
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.needsCleanup();
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.needsCleanup();
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ /// Factory for Positive and Negative Zero.
+ ///
+ /// \param Negative True iff the number should be negative.
+ static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeZero(Negative);
+ return Val;
+ }
+
+ /// Factory for Positive and Negative Infinity.
+ ///
+ /// \param Negative True iff the number should be negative.
+ static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeInf(Negative);
+ return Val;
+ }
+
+ /// Factory for NaN values.
+ ///
+ /// \param Negative - True iff the NaN generated should be negative.
+ /// \param type - The unspecified fill bits for creating the NaN, 0 by
+ /// default. The value is truncated as necessary.
+ static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
+ unsigned type = 0) {
+ if (type) {
+ APInt fill(64, type);
+ return getQNaN(Sem, Negative, &fill);
+ } else {
+ return getQNaN(Sem, Negative, nullptr);
+ }
+ }
+
+ /// Factory for QNaN values.
+ static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
+ const APInt *payload = nullptr) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeNaN(false, Negative, payload);
+ return Val;
+ }
+
+ /// Factory for SNaN values.
+ static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
+ const APInt *payload = nullptr) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeNaN(true, Negative, payload);
+ return Val;
+ }
+
+ /// Returns the largest finite number in the given semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getLargest(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeLargest(Negative);
+ return Val;
+ }
+
+ /// Returns the smallest (by magnitude) finite number in the given semantics.
+ /// Might be denormalized, which implies a relative loss of precision.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeSmallest(Negative);
+ return Val;
+ }
+
+ /// Returns the smallest (by magnitude) normalized finite number in the given
+ /// semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getSmallestNormalized(const fltSemantics &Sem,
+ bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeSmallestNormalized(Negative);
+ return Val;
+ }
+
+ /// Returns a float which is bitcasted from an all one value int.
+ ///
+ /// \param BitWidth - Select float type
+ /// \param isIEEE - If 128 bit number, select between PPC and IEEE
+ static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+
+ void Profile(FoldingSetNodeID &NID) const { getIEEE().Profile(NID); }
+
+ opStatus add(const APFloat &RHS, roundingMode RM) {
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.add(RHS.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.add(RHS.U.Double, RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus subtract(const APFloat &RHS, roundingMode RM) {
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.subtract(RHS.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.subtract(RHS.U.Double, RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus multiply(const APFloat &RHS, roundingMode RM) {
+ return getIEEE().multiply(RHS.getIEEE(), RM);
+ }
+ opStatus divide(const APFloat &RHS, roundingMode RM) {
+ return getIEEE().divide(RHS.getIEEE(), RM);
+ }
+ opStatus remainder(const APFloat &RHS) {
+ return getIEEE().remainder(RHS.getIEEE());
+ }
+ opStatus mod(const APFloat &RHS) { return getIEEE().mod(RHS.getIEEE()); }
+ opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend,
+ roundingMode RM) {
+ return getIEEE().fusedMultiplyAdd(Multiplicand.getIEEE(), Addend.getIEEE(),
+ RM);
+ }
+ opStatus roundToIntegral(roundingMode RM) {
+ return getIEEE().roundToIntegral(RM);
+ }
+ opStatus next(bool nextDown) { return getIEEE().next(nextDown); }
+
+ APFloat operator+(const APFloat &RHS) const {
+ return APFloat(getIEEE() + RHS.getIEEE(), getSemantics());
+ }
+
+ APFloat operator-(const APFloat &RHS) const {
+ return APFloat(getIEEE() - RHS.getIEEE(), getSemantics());
+ }
+
+ APFloat operator*(const APFloat &RHS) const {
+ return APFloat(getIEEE() * RHS.getIEEE(), getSemantics());
+ }
+
+ APFloat operator/(const APFloat &RHS) const {
+ return APFloat(getIEEE() / RHS.getIEEE(), getSemantics());
+ }
+
+ void changeSign() { getIEEE().changeSign(); }
+ void clearSign() { getIEEE().clearSign(); }
+ void copySign(const APFloat &RHS) { getIEEE().copySign(RHS.getIEEE()); }
+
+ static APFloat copySign(APFloat Value, const APFloat &Sign) {
+ return APFloat(IEEEFloat::copySign(Value.getIEEE(), Sign.getIEEE()),
+ Value.getSemantics());
+ }
+
+ opStatus convert(const fltSemantics &ToSemantics, roundingMode RM,
+ bool *losesInfo);
+ opStatus convertToInteger(integerPart *Input, unsigned int Width,
+ bool IsSigned, roundingMode RM,
+ bool *IsExact) const {
+ return getIEEE().convertToInteger(Input, Width, IsSigned, RM, IsExact);
+ }
+ opStatus convertToInteger(APSInt &Result, roundingMode RM,
+ bool *IsExact) const {
+ return getIEEE().convertToInteger(Result, RM, IsExact);
+ }
+ opStatus convertFromAPInt(const APInt &Input, bool IsSigned,
+ roundingMode RM) {
+ return getIEEE().convertFromAPInt(Input, IsSigned, RM);
+ }
+ opStatus convertFromSignExtendedInteger(const integerPart *Input,
+ unsigned int InputSize, bool IsSigned,
+ roundingMode RM) {
+ return getIEEE().convertFromSignExtendedInteger(Input, InputSize, IsSigned,
+ RM);
+ }
+ opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+ unsigned int InputSize, bool IsSigned,
+ roundingMode RM) {
+ return getIEEE().convertFromZeroExtendedInteger(Input, InputSize, IsSigned,
+ RM);
+ }
+ opStatus convertFromString(StringRef, roundingMode);
+ APInt bitcastToAPInt() const { return getIEEE().bitcastToAPInt(); }
+ double convertToDouble() const { return getIEEE().convertToDouble(); }
+ float convertToFloat() const { return getIEEE().convertToFloat(); }
+
+ bool operator==(const APFloat &) const = delete;
+
+ cmpResult compare(const APFloat &RHS) const {
+ return getIEEE().compare(RHS.getIEEE());
+ }
+
+ bool bitwiseIsEqual(const APFloat &RHS) const {
+ return getIEEE().bitwiseIsEqual(RHS.getIEEE());
+ }
+
+ unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+ bool UpperCase, roundingMode RM) const {
+ return getIEEE().convertToHexString(DST, HexDigits, UpperCase, RM);
+ }
+
+ bool isZero() const { return getCategory() == fcZero; }
+ bool isInfinity() const { return getCategory() == fcInfinity; }
+ bool isNaN() const { return getCategory() == fcNaN; }
+
+ bool isNegative() const { return getIEEE().isNegative(); }
+ bool isDenormal() const { return getIEEE().isDenormal(); }
+ bool isSignaling() const { return getIEEE().isSignaling(); }
+
+ bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+ bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+ fltCategory getCategory() const { return getIEEE().getCategory(); }
+ const fltSemantics &getSemantics() const { return *U.semantics; }
+ bool isNonZero() const { return !isZero(); }
+ bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+ bool isPosZero() const { return isZero() && !isNegative(); }
+ bool isNegZero() const { return isZero() && isNegative(); }
+ bool isSmallest() const { return getIEEE().isSmallest(); }
+ bool isLargest() const { return getIEEE().isLargest(); }
+ bool isInteger() const { return getIEEE().isInteger(); }
+
+ APFloat &operator=(const APFloat &RHS) = default;
+ APFloat &operator=(APFloat &&RHS) = default;
+
+ void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+ unsigned FormatMaxPadding = 3) const {
+ return getIEEE().toString(Str, FormatPrecision, FormatMaxPadding);
+ }
+
+ void print(raw_ostream &) const;
+ void dump() const;
+
+ bool getExactInverse(APFloat *inv) const {
+ return getIEEE().getExactInverse(inv ? &inv->getIEEE() : nullptr);
+ }
+
+ // This is for internal test only.
+ // TODO: Remove it after the PPCDoubleDouble transition.
+ const APFloat &getSecondFloat() const {
+ assert(&getSemantics() == &PPCDoubleDouble());
+ return U.Double.getSecond();
+ }
+
+ friend hash_code hash_value(const APFloat &Arg);
+ friend int ilogb(const APFloat &Arg) { return ilogb(Arg.getIEEE()); }
+ friend APFloat scalbn(APFloat X, int Exp, roundingMode RM);
+ friend APFloat frexp(const APFloat &X, int &Exp, roundingMode RM);
+ friend IEEEFloat;
+ friend DoubleAPFloat;
+};
+
/// See friend declarations above.
///
/// These additional declarations are required in order to compile LLVM with IBM
/// xlC compiler.
hash_code hash_value(const APFloat &Arg);
-int ilogb(const APFloat &Arg);
-APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode);
+inline APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM) {
+ return APFloat(scalbn(X.getIEEE(), Exp, RM), X.getSemantics());
+}
/// \brief Equivalent of C standard library function.
///
/// While the C standard says Exp is an unspecified value for infinity and nan,
/// this returns INT_MAX for infinities, and INT_MIN for NaNs.
-APFloat frexp(const APFloat &Val, int &Exp, APFloat::roundingMode RM);
-
+inline APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM) {
+ return APFloat(frexp(X.getIEEE(), Exp, RM), X.getSemantics());
+}
/// \brief Returns the absolute value of the argument.
inline APFloat abs(APFloat X) {
X.clearSign();
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index d77d1c7ca93f..2c0713da256c 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -40,6 +40,10 @@ const unsigned int host_char_bit = 8;
const unsigned int integerPartWidth =
host_char_bit * static_cast<unsigned int>(sizeof(integerPart));
+class APInt;
+
+inline APInt operator-(APInt);
+
//===----------------------------------------------------------------------===//
// APInt Class
//===----------------------------------------------------------------------===//
@@ -70,7 +74,7 @@ const unsigned int integerPartWidth =
/// * In general, the class tries to follow the style of computation that LLVM
/// uses in its IR. This simplifies its use for LLVM.
///
-class APInt {
+class LLVM_NODISCARD APInt {
unsigned BitWidth; ///< The number of bits in this APInt.
/// This union is used to store the integer value. When the
@@ -620,18 +624,6 @@ public:
return Result;
}
- /// \brief Unary negation operator
- ///
- /// Negates *this using two's complement logic.
- ///
- /// \returns An APInt value representing the negation of *this.
- APInt operator-() const {
- APInt Result(*this);
- Result.flipAllBits();
- ++Result;
- return Result;
- }
-
/// \brief Logical negation operator.
///
/// Performs logical negation operation on this APInt.
@@ -750,6 +742,7 @@ public:
///
/// \returns *this
APInt &operator+=(const APInt &RHS);
+ APInt &operator+=(uint64_t RHS);
/// \brief Subtraction assignment operator.
///
@@ -757,6 +750,7 @@ public:
///
/// \returns *this
APInt &operator-=(const APInt &RHS);
+ APInt &operator-=(uint64_t RHS);
/// \brief Left-shift assignment function.
///
@@ -783,9 +777,7 @@ public:
return APInt(getBitWidth(), VAL & RHS.VAL);
return AndSlowCase(RHS);
}
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APInt &RHS) const {
- return this->operator&(RHS);
- }
+ APInt And(const APInt &RHS) const { return this->operator&(RHS); }
/// \brief Bitwise OR operator.
///
@@ -805,9 +797,7 @@ public:
/// calling operator|.
///
/// \returns An APInt value representing the bitwise OR of *this and RHS.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APInt &RHS) const {
- return this->operator|(RHS);
- }
+ APInt Or(const APInt &RHS) const { return this->operator|(RHS); }
/// \brief Bitwise XOR operator.
///
@@ -827,27 +817,13 @@ public:
/// through the usage of operator^.
///
/// \returns An APInt value representing the bitwise XOR of *this and RHS.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APInt &RHS) const {
- return this->operator^(RHS);
- }
+ APInt Xor(const APInt &RHS) const { return this->operator^(RHS); }
/// \brief Multiplication operator.
///
/// Multiplies this APInt by RHS and returns the result.
APInt operator*(const APInt &RHS) const;
- /// \brief Addition operator.
- ///
- /// Adds RHS to this APInt and returns the result.
- APInt operator+(const APInt &RHS) const;
- APInt operator+(uint64_t RHS) const;
-
- /// \brief Subtraction operator.
- ///
- /// Subtracts RHS from this APInt and returns the result.
- APInt operator-(const APInt &RHS) const;
- APInt operator-(uint64_t RHS) const;
-
/// \brief Left logical shift operator.
///
/// Shifts this APInt left by \p Bits and returns the result.
@@ -861,17 +837,17 @@ public:
/// \brief Arithmetic right-shift function.
///
/// Arithmetic right-shift this APInt by shiftAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT ashr(unsigned shiftAmt) const;
+ APInt ashr(unsigned shiftAmt) const;
/// \brief Logical right-shift function.
///
/// Logical right-shift this APInt by shiftAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(unsigned shiftAmt) const;
+ APInt lshr(unsigned shiftAmt) const;
/// \brief Left-shift function.
///
/// Left-shift this APInt by shiftAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(unsigned shiftAmt) const {
+ APInt shl(unsigned shiftAmt) const {
assert(shiftAmt <= BitWidth && "Invalid shift amount");
if (isSingleWord()) {
if (shiftAmt >= BitWidth)
@@ -882,31 +858,31 @@ public:
}
/// \brief Rotate left by rotateAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotl(unsigned rotateAmt) const;
+ APInt rotl(unsigned rotateAmt) const;
/// \brief Rotate right by rotateAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotr(unsigned rotateAmt) const;
+ APInt rotr(unsigned rotateAmt) const;
/// \brief Arithmetic right-shift function.
///
/// Arithmetic right-shift this APInt by shiftAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT ashr(const APInt &shiftAmt) const;
+ APInt ashr(const APInt &shiftAmt) const;
/// \brief Logical right-shift function.
///
/// Logical right-shift this APInt by shiftAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(const APInt &shiftAmt) const;
+ APInt lshr(const APInt &shiftAmt) const;
/// \brief Left-shift function.
///
/// Left-shift this APInt by shiftAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(const APInt &shiftAmt) const;
+ APInt shl(const APInt &shiftAmt) const;
/// \brief Rotate left by rotateAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotl(const APInt &rotateAmt) const;
+ APInt rotl(const APInt &rotateAmt) const;
/// \brief Rotate right by rotateAmt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotr(const APInt &rotateAmt) const;
+ APInt rotr(const APInt &rotateAmt) const;
/// \brief Unsigned division operation.
///
@@ -914,12 +890,12 @@ public:
/// RHS are treated as unsigned quantities for purposes of this division.
///
/// \returns a new APInt value containing the division result
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT udiv(const APInt &RHS) const;
+ APInt udiv(const APInt &RHS) const;
/// \brief Signed division function for APInt.
///
/// Signed divide this APInt by APInt RHS.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT sdiv(const APInt &RHS) const;
+ APInt sdiv(const APInt &RHS) const;
/// \brief Unsigned remainder operation.
///
@@ -930,12 +906,12 @@ public:
/// is *this.
///
/// \returns a new APInt value containing the remainder result
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT urem(const APInt &RHS) const;
+ APInt urem(const APInt &RHS) const;
/// \brief Function for signed remainder operation.
///
/// Signed remainder operation on APInt.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT srem(const APInt &RHS) const;
+ APInt srem(const APInt &RHS) const;
/// \brief Dual division/remainder interface.
///
@@ -1178,7 +1154,7 @@ public:
///
/// Truncate the APInt to a specified width. It is an error to specify a width
/// that is greater than or equal to the current width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(unsigned width) const;
+ APInt trunc(unsigned width) const;
/// \brief Sign extend to a new width.
///
@@ -1186,38 +1162,38 @@ public:
/// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
/// It is an error to specify a width that is less than or equal to the
/// current width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT sext(unsigned width) const;
+ APInt sext(unsigned width) const;
/// \brief Zero extend to a new width.
///
/// This operation zero extends the APInt to a new width. The high order bits
/// are filled with 0 bits. It is an error to specify a width that is less
/// than or equal to the current width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const;
+ APInt zext(unsigned width) const;
/// \brief Sign extend or truncate to width
///
/// Make this APInt have the bit width given by \p width. The value is sign
/// extended, truncated, or left alone to make it that width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrTrunc(unsigned width) const;
+ APInt sextOrTrunc(unsigned width) const;
/// \brief Zero extend or truncate to width
///
/// Make this APInt have the bit width given by \p width. The value is zero
/// extended, truncated, or left alone to make it that width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrTrunc(unsigned width) const;
+ APInt zextOrTrunc(unsigned width) const;
/// \brief Sign extend or truncate to width
///
/// Make this APInt have the bit width given by \p width. The value is sign
/// extended, or left alone to make it that width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrSelf(unsigned width) const;
+ APInt sextOrSelf(unsigned width) const;
/// \brief Zero extend or truncate to width
///
/// Make this APInt have the bit width given by \p width. The value is zero
/// extended, or left alone to make it that width.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrSelf(unsigned width) const;
+ APInt zextOrSelf(unsigned width) const;
/// @}
/// \name Bit Manipulation Operators
@@ -1454,11 +1430,11 @@ public:
std::string toString(unsigned Radix, bool Signed) const;
/// \returns a byte-swapped representation of this APInt Value.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT byteSwap() const;
+ APInt byteSwap() const;
/// \returns the value with the bit representation reversed of this APInt
/// Value.
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT reverseBits() const;
+ APInt reverseBits() const;
/// \brief Converts this APInt to a double value.
double roundToDouble(bool isSigned) const;
@@ -1501,7 +1477,7 @@ public:
///
/// The conversion does not do a translation from double to integer, it just
/// re-interprets the bits of the double.
- static APInt LLVM_ATTRIBUTE_UNUSED_RESULT doubleToBits(double V) {
+ static APInt doubleToBits(double V) {
union {
uint64_t I;
double D;
@@ -1514,7 +1490,7 @@ public:
///
/// The conversion does not do a translation from float to integer, it just
/// re-interprets the bits of the float.
- static APInt LLVM_ATTRIBUTE_UNUSED_RESULT floatToBits(float V) {
+ static APInt floatToBits(float V) {
union {
unsigned I;
float F;
@@ -1532,7 +1508,9 @@ public:
/// \returns the ceil log base 2 of this APInt.
unsigned ceilLogBase2() const {
- return BitWidth - (*this - 1).countLeadingZeros();
+ APInt temp(*this);
+ --temp;
+ return BitWidth - temp.countLeadingZeros();
}
/// \returns the nearest log base 2 of this APInt. Ties round up.
@@ -1573,12 +1551,12 @@ public:
}
/// \brief Compute the square root
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT sqrt() const;
+ APInt sqrt() const;
/// \brief Get the absolute value;
///
/// If *this is < 0 then return -(*this), otherwise *this;
- APInt LLVM_ATTRIBUTE_UNUSED_RESULT abs() const {
+ APInt abs() const {
if (isNegative())
return -(*this);
return *this;
@@ -1750,6 +1728,55 @@ inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
return OS;
}
+inline APInt operator-(APInt v) {
+ v.flipAllBits();
+ ++v;
+ return v;
+}
+
+inline APInt operator+(APInt a, const APInt &b) {
+ a += b;
+ return a;
+}
+
+inline APInt operator+(const APInt &a, APInt &&b) {
+ b += a;
+ return std::move(b);
+}
+
+inline APInt operator+(APInt a, uint64_t RHS) {
+ a += RHS;
+ return a;
+}
+
+inline APInt operator+(uint64_t LHS, APInt b) {
+ b += LHS;
+ return b;
+}
+
+inline APInt operator-(APInt a, const APInt &b) {
+ a -= b;
+ return a;
+}
+
+inline APInt operator-(const APInt &a, APInt &&b) {
+ b = -std::move(b);
+ b += a;
+ return std::move(b);
+}
+
+inline APInt operator-(APInt a, uint64_t RHS) {
+ a -= RHS;
+ return a;
+}
+
+inline APInt operator-(uint64_t LHS, APInt b) {
+ b = -std::move(b);
+ b += LHS;
+ return b;
+}
+
+
namespace APIntOps {
/// \brief Determine the smaller of two APInts considered to be signed.
diff --git a/include/llvm/ADT/APSInt.h b/include/llvm/ADT/APSInt.h
index a6552d0a2f36..813b3686d6b1 100644
--- a/include/llvm/ADT/APSInt.h
+++ b/include/llvm/ADT/APSInt.h
@@ -19,7 +19,7 @@
namespace llvm {
-class APSInt : public APInt {
+class LLVM_NODISCARD APSInt : public APInt {
bool IsUnsigned;
public:
@@ -78,22 +78,22 @@ public:
return isSigned() ? getSExtValue() : getZExtValue();
}
- APSInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(uint32_t width) const {
+ APSInt trunc(uint32_t width) const {
return APSInt(APInt::trunc(width), IsUnsigned);
}
- APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extend(uint32_t width) const {
+ APSInt extend(uint32_t width) const {
if (IsUnsigned)
return APSInt(zext(width), IsUnsigned);
else
return APSInt(sext(width), IsUnsigned);
}
- APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extOrTrunc(uint32_t width) const {
- if (IsUnsigned)
- return APSInt(zextOrTrunc(width), IsUnsigned);
- else
- return APSInt(sextOrTrunc(width), IsUnsigned);
+ APSInt extOrTrunc(uint32_t width) const {
+ if (IsUnsigned)
+ return APSInt(zextOrTrunc(width), IsUnsigned);
+ else
+ return APSInt(sextOrTrunc(width), IsUnsigned);
}
const APSInt &operator%=(const APSInt &RHS) {
@@ -235,25 +235,19 @@ public:
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
}
- APSInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APSInt& RHS) const {
- return this->operator&(RHS);
- }
+ APSInt And(const APSInt &RHS) const { return this->operator&(RHS); }
APSInt operator|(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
}
- APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APSInt& RHS) const {
- return this->operator|(RHS);
- }
+ APSInt Or(const APSInt &RHS) const { return this->operator|(RHS); }
APSInt operator^(const APSInt &RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
}
- APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APSInt& RHS) const {
- return this->operator^(RHS);
- }
+ APSInt Xor(const APSInt &RHS) const { return this->operator^(RHS); }
APSInt operator*(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
diff --git a/include/llvm/ADT/AllocatorList.h b/include/llvm/ADT/AllocatorList.h
new file mode 100644
index 000000000000..05a549f96ec7
--- /dev/null
+++ b/include/llvm/ADT/AllocatorList.h
@@ -0,0 +1,226 @@
+//===- llvm/ADT/AllocatorList.h - Custom allocator list ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ALLOCATORLIST_H
+#define LLVM_ADT_ALLOCATORLIST_H
+
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/Support/Allocator.h"
+#include <type_traits>
+
+namespace llvm {
+
+/// A linked-list with a custom, local allocator.
+///
+/// Expose a std::list-like interface that owns and uses a custom LLVM-style
+/// allocator (e.g., BumpPtrAllocator), leveraging \a simple_ilist for the
+/// implementation details.
+///
+/// Because this list owns the allocator, calling \a splice() with a different
+/// list isn't generally safe. As such, \a splice has been left out of the
+/// interface entirely.
+template <class T, class AllocatorT> class AllocatorList : AllocatorT {
+ struct Node : ilist_node<Node> {
+ Node(Node &&) = delete;
+ Node(const Node &) = delete;
+ Node &operator=(Node &&) = delete;
+ Node &operator=(const Node &) = delete;
+
+ Node(T &&V) : V(std::move(V)) {}
+ Node(const T &V) : V(V) {}
+ template <class... Ts> Node(Ts &&... Vs) : V(std::forward<Ts>(Vs)...) {}
+ T V;
+ };
+
+ typedef simple_ilist<Node> list_type;
+ list_type List;
+
+ AllocatorT &getAlloc() { return *this; }
+ const AllocatorT &getAlloc() const { return *this; }
+
+ template <class... ArgTs> Node *create(ArgTs &&... Args) {
+ return new (getAlloc()) Node(std::forward<ArgTs>(Args)...);
+ }
+
+ struct Cloner {
+ AllocatorList &AL;
+ Cloner(AllocatorList &AL) : AL(AL) {}
+ Node *operator()(const Node &N) const { return AL.create(N.V); }
+ };
+
+ struct Disposer {
+ AllocatorList &AL;
+ Disposer(AllocatorList &AL) : AL(AL) {}
+ void operator()(Node *N) const {
+ N->~Node();
+ AL.getAlloc().Deallocate(N);
+ }
+ };
+
+public:
+ typedef T value_type;
+ typedef T *pointer;
+ typedef T &reference;
+ typedef const T *const_pointer;
+ typedef const T &const_reference;
+ typedef typename list_type::size_type size_type;
+ typedef typename list_type::difference_type difference_type;
+
+private:
+ template <class ValueT, class IteratorBase>
+ class IteratorImpl
+ : public iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
+ IteratorBase,
+ std::bidirectional_iterator_tag, ValueT> {
+ template <class OtherValueT, class OtherIteratorBase>
+ friend class IteratorImpl;
+ friend AllocatorList;
+
+ typedef iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
+ IteratorBase, std::bidirectional_iterator_tag,
+ ValueT>
+ base_type;
+
+ public:
+ typedef ValueT value_type;
+ typedef ValueT *pointer;
+ typedef ValueT &reference;
+
+ IteratorImpl() = default;
+ IteratorImpl(const IteratorImpl &) = default;
+ IteratorImpl &operator=(const IteratorImpl &) = default;
+ ~IteratorImpl() = default;
+
+ explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}
+
+ template <class OtherValueT, class OtherIteratorBase>
+ IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X,
+ typename std::enable_if<std::is_convertible<
+ OtherIteratorBase, IteratorBase>::value>::type * = nullptr)
+ : base_type(X.wrapped()) {}
+
+ reference operator*() const { return base_type::wrapped()->V; }
+ pointer operator->() const { return &operator*(); }
+
+ friend bool operator==(const IteratorImpl &L, const IteratorImpl &R) {
+ return L.wrapped() == R.wrapped();
+ }
+ friend bool operator!=(const IteratorImpl &L, const IteratorImpl &R) {
+ return !(L == R);
+ }
+ };
+
+public:
+ typedef IteratorImpl<T, typename list_type::iterator> iterator;
+ typedef IteratorImpl<T, typename list_type::reverse_iterator>
+ reverse_iterator;
+ typedef IteratorImpl<const T, typename list_type::const_iterator>
+ const_iterator;
+ typedef IteratorImpl<const T, typename list_type::const_reverse_iterator>
+ const_reverse_iterator;
+
+ AllocatorList() = default;
+ AllocatorList(AllocatorList &&X)
+ : AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}
+ AllocatorList(const AllocatorList &X) {
+ List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+ }
+ AllocatorList &operator=(AllocatorList &&X) {
+ clear(); // Dispose of current nodes explicitly.
+ List = std::move(X.List);
+ getAlloc() = std::move(X.getAlloc());
+ return *this;
+ }
+ AllocatorList &operator=(const AllocatorList &X) {
+ List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+ return *this;
+ }
+ ~AllocatorList() { clear(); }
+
+ void swap(AllocatorList &RHS) {
+ List.swap(RHS.List);
+ std::swap(getAlloc(), RHS.getAlloc());
+ }
+
+ bool empty() { return List.empty(); }
+ size_t size() { return List.size(); }
+
+ iterator begin() { return iterator(List.begin()); }
+ iterator end() { return iterator(List.end()); }
+ const_iterator begin() const { return const_iterator(List.begin()); }
+ const_iterator end() const { return const_iterator(List.end()); }
+ reverse_iterator rbegin() { return reverse_iterator(List.rbegin()); }
+ reverse_iterator rend() { return reverse_iterator(List.rend()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(List.rbegin());
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(List.rend());
+ }
+
+ T &back() { return List.back().V; }
+ T &front() { return List.front().V; }
+ const T &back() const { return List.back().V; }
+ const T &front() const { return List.front().V; }
+
+ template <class... Ts> iterator emplace(iterator I, Ts &&... Vs) {
+ return iterator(List.insert(I.wrapped(), *create(std::forward<Ts>(Vs)...)));
+ }
+
+ iterator insert(iterator I, T &&V) {
+ return iterator(List.insert(I.wrapped(), *create(std::move(V))));
+ }
+ iterator insert(iterator I, const T &V) {
+ return iterator(List.insert(I.wrapped(), *create(V)));
+ }
+
+ template <class Iterator>
+ void insert(iterator I, Iterator First, Iterator Last) {
+ for (; First != Last; ++First)
+ List.insert(I.wrapped(), *create(*First));
+ }
+
+ iterator erase(iterator I) {
+ return iterator(List.eraseAndDispose(I.wrapped(), Disposer(*this)));
+ }
+
+ iterator erase(iterator First, iterator Last) {
+ return iterator(
+ List.eraseAndDispose(First.wrapped(), Last.wrapped(), Disposer(*this)));
+ }
+
+ void clear() { List.clearAndDispose(Disposer(*this)); }
+ void pop_back() { List.eraseAndDispose(--List.end(), Disposer(*this)); }
+ void pop_front() { List.eraseAndDispose(List.begin(), Disposer(*this)); }
+ void push_back(T &&V) { insert(end(), std::move(V)); }
+ void push_front(T &&V) { insert(begin(), std::move(V)); }
+ void push_back(const T &V) { insert(end(), V); }
+ void push_front(const T &V) { insert(begin(), V); }
+ template <class... Ts> void emplace_back(Ts &&... Vs) {
+ emplace(end(), std::forward<Ts>(Vs)...);
+ }
+ template <class... Ts> void emplace_front(Ts &&... Vs) {
+ emplace(begin(), std::forward<Ts>(Vs)...);
+ }
+
+ /// Reset the underlying allocator.
+ ///
+ /// \pre \c empty()
+ void resetAlloc() {
+ assert(empty() && "Cannot reset allocator if not empty");
+ getAlloc().Reset();
+ }
+};
+
+template <class T> using BumpPtrList = AllocatorList<T, BumpPtrAllocator>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ALLOCATORLIST_H
diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h
index 95a1e62ef005..b3fe31f4a806 100644
--- a/include/llvm/ADT/ArrayRef.h
+++ b/include/llvm/ADT/ArrayRef.h
@@ -12,7 +12,9 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include <array>
#include <vector>
namespace llvm {
@@ -28,7 +30,7 @@ namespace llvm {
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template<typename T>
- class ArrayRef {
+ class LLVM_NODISCARD ArrayRef {
public:
typedef const T *iterator;
typedef const T *const_iterator;
@@ -78,10 +80,14 @@ namespace llvm {
/*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
+ /// Construct an ArrayRef from a std::array
+ template <size_t N>
+ /*implicit*/ constexpr ArrayRef(const std::array<T, N> &Arr)
+ : Data(Arr.data()), Length(N) {}
+
/// Construct an ArrayRef from a C array.
template <size_t N>
- /*implicit*/ LLVM_CONSTEXPR ArrayRef(const T (&Arr)[N])
- : Data(Arr), Length(N) {}
+ /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
/// Construct an ArrayRef from a std::initializer_list.
/*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
@@ -160,12 +166,6 @@ namespace llvm {
return std::equal(begin(), end(), RHS.begin());
}
- /// slice(n) - Chop off the first N elements of the array.
- ArrayRef<T> slice(size_t N) const {
- assert(N <= size() && "Invalid specifier");
- return ArrayRef<T>(data()+N, size()-N);
- }
-
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
ArrayRef<T> slice(size_t N, size_t M) const {
@@ -173,6 +173,9 @@ namespace llvm {
return ArrayRef<T>(data()+N, M);
}
+ /// slice(n) - Chop off the first N elements of the array.
+ ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
+
/// \brief Drop the first \p N elements of the array.
ArrayRef<T> drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
@@ -185,6 +188,44 @@ namespace llvm {
return slice(0, size() - N);
}
+ /// \brief Return a copy of *this with the first N elements satisfying the
+ /// given predicate removed.
+ template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
+ return ArrayRef<T>(find_if_not(*this, Pred), end());
+ }
+
+ /// \brief Return a copy of *this with the first N elements not satisfying
+ /// the given predicate removed.
+ template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
+ return ArrayRef<T>(find_if(*this, Pred), end());
+ }
+
+ /// \brief Return a copy of *this with only the first \p N elements.
+ ArrayRef<T> take_front(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// \brief Return a copy of *this with only the last \p N elements.
+ ArrayRef<T> take_back(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ /// \brief Return the first N elements of this Array that satisfy the given
+ /// predicate.
+ template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
+ return ArrayRef<T>(begin(), find_if_not(*this, Pred));
+ }
+
+ /// \brief Return the first N elements of this Array that don't satisfy the
+ /// given predicate.
+ template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
+ return ArrayRef<T>(begin(), find_if(*this, Pred));
+ }
+
/// @}
/// @name Operator Overloads
/// @{
@@ -193,6 +234,22 @@ namespace llvm {
return Data[Index];
}
+ /// Disallow accidental assignment from a temporary.
+ ///
+ /// The declaration here is extra complicated so that "arrayRef = {}"
+ /// continues to select the move assignment operator.
+ template <typename U>
+ typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+ operator=(U &&Temporary) = delete;
+
+ /// Disallow accidental assignment from a temporary.
+ ///
+ /// The declaration here is extra complicated so that "arrayRef = {}"
+ /// continues to select the move assignment operator.
+ template <typename U>
+ typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type &
+ operator=(std::initializer_list<U>) = delete;
+
/// @}
/// @name Expensive Operations
/// @{
@@ -223,7 +280,7 @@ namespace llvm {
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template<typename T>
- class MutableArrayRef : public ArrayRef<T> {
+ class LLVM_NODISCARD MutableArrayRef : public ArrayRef<T> {
public:
typedef T *iterator;
@@ -253,10 +310,14 @@ namespace llvm {
/*implicit*/ MutableArrayRef(std::vector<T> &Vec)
: ArrayRef<T>(Vec) {}
+ /// Construct an ArrayRef from a std::array
+ template <size_t N>
+ /*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr)
+ : ArrayRef<T>(Arr) {}
+
/// Construct an MutableArrayRef from a C array.
template <size_t N>
- /*implicit*/ LLVM_CONSTEXPR MutableArrayRef(T (&Arr)[N])
- : ArrayRef<T>(Arr) {}
+ /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {}
T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
@@ -278,17 +339,16 @@ namespace llvm {
return data()[this->size()-1];
}
- /// slice(n) - Chop off the first N elements of the array.
- MutableArrayRef<T> slice(size_t N) const {
- assert(N <= this->size() && "Invalid specifier");
- return MutableArrayRef<T>(data()+N, this->size()-N);
- }
-
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
MutableArrayRef<T> slice(size_t N, size_t M) const {
- assert(N+M <= this->size() && "Invalid specifier");
- return MutableArrayRef<T>(data()+N, M);
+ assert(N + M <= this->size() && "Invalid specifier");
+ return MutableArrayRef<T>(this->data() + N, M);
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ MutableArrayRef<T> slice(size_t N) const {
+ return slice(N, this->size() - N);
}
/// \brief Drop the first \p N elements of the array.
@@ -302,6 +362,48 @@ namespace llvm {
return slice(0, this->size() - N);
}
+ /// \brief Return a copy of *this with the first N elements satisfying the
+ /// given predicate removed.
+ template <class PredicateT>
+ MutableArrayRef<T> drop_while(PredicateT Pred) const {
+ return MutableArrayRef<T>(find_if_not(*this, Pred), end());
+ }
+
+ /// \brief Return a copy of *this with the first N elements not satisfying
+ /// the given predicate removed.
+ template <class PredicateT>
+ MutableArrayRef<T> drop_until(PredicateT Pred) const {
+ return MutableArrayRef<T>(find_if(*this, Pred), end());
+ }
+
+ /// \brief Return a copy of *this with only the first \p N elements.
+ MutableArrayRef<T> take_front(size_t N = 1) const {
+ if (N >= this->size())
+ return *this;
+ return drop_back(this->size() - N);
+ }
+
+ /// \brief Return a copy of *this with only the last \p N elements.
+ MutableArrayRef<T> take_back(size_t N = 1) const {
+ if (N >= this->size())
+ return *this;
+ return drop_front(this->size() - N);
+ }
+
+ /// \brief Return the first N elements of this Array that satisfy the given
+ /// predicate.
+ template <class PredicateT>
+ MutableArrayRef<T> take_while(PredicateT Pred) const {
+ return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
+ }
+
+ /// \brief Return the first N elements of this Array that don't satisfy the
+ /// given predicate.
+ template <class PredicateT>
+ MutableArrayRef<T> take_until(PredicateT Pred) const {
+ return MutableArrayRef<T>(begin(), find_if(*this, Pred));
+ }
+
/// @}
/// @name Operator Overloads
/// @{
@@ -311,6 +413,25 @@ namespace llvm {
}
};
+ /// This is a MutableArrayRef that owns its array.
+ template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
+ public:
+ OwningArrayRef() {}
+ OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}
+ OwningArrayRef(ArrayRef<T> Data)
+ : MutableArrayRef<T>(new T[Data.size()], Data.size()) {
+ std::copy(Data.begin(), Data.end(), this->begin());
+ }
+ OwningArrayRef(OwningArrayRef &&Other) { *this = Other; }
+ OwningArrayRef &operator=(OwningArrayRef &&Other) {
+ delete[] this->data();
+ this->MutableArrayRef<T>::operator=(Other);
+ Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
+ return *this;
+ }
+ ~OwningArrayRef() { delete[] this->data(); }
+ };
+
/// @name ArrayRef Convenience constructors
/// @{
diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h
index 661437126d48..cf3756d0d9c1 100644
--- a/include/llvm/ADT/BitVector.h
+++ b/include/llvm/ADT/BitVector.h
@@ -21,6 +21,7 @@
#include <cstdint>
#include <cstdlib>
#include <cstring>
+#include <utility>
namespace llvm {
@@ -45,14 +46,13 @@ public:
BitWord *WordRef;
unsigned BitPos;
- reference(); // Undefined
-
public:
reference(BitVector &b, unsigned Idx) {
WordRef = &b.Bits[Idx / BITWORD_SIZE];
BitPos = Idx % BITWORD_SIZE;
}
+ reference() = delete;
reference(const reference&) = default;
reference &operator=(reference t) {
diff --git a/include/llvm/ADT/CachedHashString.h b/include/llvm/ADT/CachedHashString.h
new file mode 100644
index 000000000000..a56a6213a073
--- /dev/null
+++ b/include/llvm/ADT/CachedHashString.h
@@ -0,0 +1,184 @@
+//===- llvm/ADT/CachedHashString.h - Prehashed string/StringRef -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CachedHashString and CachedHashStringRef. These are owning
+// and not-owning string types that store their hash in addition to their string
+// data.
+//
+// Unlike std::string, CachedHashString can be used in DenseSet/DenseMap
+// (because, unlike std::string, CachedHashString lets us have empty and
+// tombstone values).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_CACHED_HASH_STRING_H
+#define LLVM_ADT_CACHED_HASH_STRING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// A container which contains a StringRef plus a precomputed hash.
+class CachedHashStringRef {
+ const char *P;
+ uint32_t Size;
+ uint32_t Hash;
+
+public:
+ // Explicit because hashing a string isn't free.
+ explicit CachedHashStringRef(StringRef S)
+ : CachedHashStringRef(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+ CachedHashStringRef(StringRef S, uint32_t Hash)
+ : P(S.data()), Size(S.size()), Hash(Hash) {
+ assert(S.size() <= std::numeric_limits<uint32_t>::max());
+ }
+
+ StringRef val() const { return StringRef(P, Size); }
+ uint32_t size() const { return Size; }
+ uint32_t hash() const { return Hash; }
+};
+
+template <> struct DenseMapInfo<CachedHashStringRef> {
+ static CachedHashStringRef getEmptyKey() {
+ return CachedHashStringRef(DenseMapInfo<StringRef>::getEmptyKey(), 0);
+ }
+ static CachedHashStringRef getTombstoneKey() {
+ return CachedHashStringRef(DenseMapInfo<StringRef>::getTombstoneKey(), 1);
+ }
+ static unsigned getHashValue(const CachedHashStringRef &S) {
+ assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+ assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+ return S.hash();
+ }
+ static bool isEqual(const CachedHashStringRef &LHS,
+ const CachedHashStringRef &RHS) {
+ return LHS.hash() == RHS.hash() &&
+ DenseMapInfo<StringRef>::isEqual(LHS.val(), RHS.val());
+ }
+};
+
+/// A container which contains a string, which it owns, plus a precomputed hash.
+///
+/// We do not null-terminate the string.
+class CachedHashString {
+ friend struct DenseMapInfo<CachedHashString>;
+
+ char *P;
+ uint32_t Size;
+ uint32_t Hash;
+
+ static char *getEmptyKeyPtr() { return DenseMapInfo<char *>::getEmptyKey(); }
+ static char *getTombstoneKeyPtr() {
+ return DenseMapInfo<char *>::getTombstoneKey();
+ }
+
+ bool isEmptyOrTombstone() const {
+ return P == getEmptyKeyPtr() || P == getTombstoneKeyPtr();
+ }
+
+ struct ConstructEmptyOrTombstoneTy {};
+
+ CachedHashString(ConstructEmptyOrTombstoneTy, char *EmptyOrTombstonePtr)
+ : P(EmptyOrTombstonePtr), Size(0), Hash(0) {
+ assert(isEmptyOrTombstone());
+ }
+
+ // TODO: Use small-string optimization to avoid allocating.
+
+public:
+ explicit CachedHashString(const char *S) : CachedHashString(StringRef(S)) {}
+
+ // Explicit because copying and hashing a string isn't free.
+ explicit CachedHashString(StringRef S)
+ : CachedHashString(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+ CachedHashString(StringRef S, uint32_t Hash)
+ : P(new char[S.size()]), Size(S.size()), Hash(Hash) {
+ memcpy(P, S.data(), S.size());
+ }
+
+ // Ideally this class would not be copyable. But SetVector requires copyable
+ // keys, and we want this to be usable there.
+ CachedHashString(const CachedHashString &Other)
+ : Size(Other.Size), Hash(Other.Hash) {
+ if (Other.isEmptyOrTombstone()) {
+ P = Other.P;
+ } else {
+ P = new char[Size];
+ memcpy(P, Other.P, Size);
+ }
+ }
+
+ CachedHashString &operator=(CachedHashString Other) {
+ swap(*this, Other);
+ return *this;
+ }
+
+ CachedHashString(CachedHashString &&Other) noexcept
+ : P(Other.P), Size(Other.Size), Hash(Other.Hash) {
+ Other.P = getEmptyKeyPtr();
+ }
+
+ ~CachedHashString() {
+ if (!isEmptyOrTombstone())
+ delete[] P;
+ }
+
+ StringRef val() const { return StringRef(P, Size); }
+ uint32_t size() const { return Size; }
+ uint32_t hash() const { return Hash; }
+
+ operator StringRef() const { return val(); }
+ operator CachedHashStringRef() const {
+ return CachedHashStringRef(val(), Hash);
+ }
+
+ friend void swap(CachedHashString &LHS, CachedHashString &RHS) {
+ using std::swap;
+ swap(LHS.P, RHS.P);
+ swap(LHS.Size, RHS.Size);
+ swap(LHS.Hash, RHS.Hash);
+ }
+};
+
+template <> struct DenseMapInfo<CachedHashString> {
+ static CachedHashString getEmptyKey() {
+ return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+ CachedHashString::getEmptyKeyPtr());
+ }
+ static CachedHashString getTombstoneKey() {
+ return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+ CachedHashString::getTombstoneKeyPtr());
+ }
+ static unsigned getHashValue(const CachedHashString &S) {
+ assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+ assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+ return S.hash();
+ }
+ static bool isEqual(const CachedHashString &LHS,
+ const CachedHashString &RHS) {
+ if (LHS.hash() != RHS.hash())
+ return false;
+ if (LHS.P == CachedHashString::getEmptyKeyPtr())
+ return RHS.P == CachedHashString::getEmptyKeyPtr();
+ if (LHS.P == CachedHashString::getTombstoneKeyPtr())
+ return RHS.P == CachedHashString::getTombstoneKeyPtr();
+
+ // This is safe because if RHS.P is the empty or tombstone key, it will have
+ // length 0, so we'll never dereference its pointer.
+ return LHS.val() == RHS.val();
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/ADT/DAGDeltaAlgorithm.h b/include/llvm/ADT/DAGDeltaAlgorithm.h
index 3dd862c8b220..5ea0fe872868 100644
--- a/include/llvm/ADT/DAGDeltaAlgorithm.h
+++ b/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -10,6 +10,7 @@
#define LLVM_ADT_DAGDELTAALGORITHM_H
#include <set>
+#include <utility>
#include <vector>
namespace llvm {
@@ -37,6 +38,7 @@ namespace llvm {
/// should satisfy.
class DAGDeltaAlgorithm {
virtual void anchor();
+
public:
typedef unsigned change_ty;
typedef std::pair<change_ty, change_ty> edge_ty;
@@ -46,7 +48,7 @@ public:
typedef std::vector<changeset_ty> changesetlist_ty;
public:
- virtual ~DAGDeltaAlgorithm() {}
+ virtual ~DAGDeltaAlgorithm() = default;
/// Run - Minimize the DAG formed by the \p Changes vertices and the
/// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
@@ -74,4 +76,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_DAGDELTAALGORITHM_H
diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h
index 917c086beba3..0b4b09d4b733 100644
--- a/include/llvm/ADT/DenseMap.h
+++ b/include/llvm/ADT/DenseMap.h
@@ -19,20 +19,20 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
-#include <climits>
#include <cstddef>
#include <cstring>
#include <iterator>
+#include <limits>
#include <new>
#include <utility>
namespace llvm {
namespace detail {
+
// We extend a pair to allow users to override the bucket type with their own
// implementation without requiring two members.
template <typename KeyT, typename ValueT>
@@ -42,7 +42,8 @@ struct DenseMapPair : public std::pair<KeyT, ValueT> {
ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
};
-}
+
+} // end namespace detail
template <
typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
@@ -76,7 +77,7 @@ public:
return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
}
- bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
+ LLVM_NODISCARD bool empty() const {
return getNumEntries() == 0;
}
unsigned size() const { return getNumEntries(); }
@@ -169,30 +170,45 @@ public:
// If the key is already in the map, it returns false and doesn't update the
// value.
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ return try_emplace(KV.first, KV.second);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ return try_emplace(std::move(KV.first), std::move(KV.second));
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
BucketT *TheBucket;
- if (LookupBucketFor(KV.first, TheBucket))
+ if (LookupBucketFor(Key, TheBucket))
return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
false); // Already in map.
// Otherwise, insert the new element.
- TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
+ TheBucket =
+ InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
true);
}
// Inserts key,value pair into the map if the key isn't already in the map.
- // If the key is already in the map, it returns false and doesn't update the
- // value.
- std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
BucketT *TheBucket;
- if (LookupBucketFor(KV.first, TheBucket))
+ if (LookupBucketFor(Key, TheBucket))
return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
false); // Already in map.
// Otherwise, insert the new element.
- TheBucket = InsertIntoBucket(std::move(KV.first),
- std::move(KV.second),
- TheBucket);
+ TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
true);
}
@@ -211,8 +227,8 @@ public:
false); // Already in map.
// Otherwise, insert the new element.
- TheBucket = InsertIntoBucket(std::move(KV.first), std::move(KV.second), Val,
- TheBucket);
+ TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
+ std::move(KV.second), Val);
return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
true);
}
@@ -224,7 +240,6 @@ public:
insert(*I);
}
-
bool erase(const KeyT &Val) {
BucketT *TheBucket;
if (!LookupBucketFor(Val, TheBucket))
@@ -249,7 +264,7 @@ public:
if (LookupBucketFor(Key, TheBucket))
return *TheBucket;
- return *InsertIntoBucket(Key, ValueT(), TheBucket);
+ return *InsertIntoBucket(TheBucket, Key);
}
ValueT &operator[](const KeyT &Key) {
@@ -261,7 +276,7 @@ public:
if (LookupBucketFor(Key, TheBucket))
return *TheBucket;
- return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
+ return *InsertIntoBucket(TheBucket, std::move(Key));
}
ValueT &operator[](KeyT &&Key) {
@@ -429,36 +444,19 @@ private:
static_cast<DerivedT *>(this)->shrink_and_clear();
}
-
- BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
- BucketT *TheBucket) {
+ template <typename KeyArg, typename... ValueArgs>
+ BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+ ValueArgs &&... Values) {
TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
- TheBucket->getFirst() = Key;
- ::new (&TheBucket->getSecond()) ValueT(Value);
- return TheBucket;
- }
-
- BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
- BucketT *TheBucket) {
- TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
-
- TheBucket->getFirst() = Key;
- ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
- return TheBucket;
- }
-
- BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
- TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
-
- TheBucket->getFirst() = std::move(Key);
- ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
+ TheBucket->getFirst() = std::forward<KeyArg>(Key);
+ ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
return TheBucket;
}
template <typename LookupKeyT>
- BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, LookupKeyT &Lookup,
- BucketT *TheBucket) {
+ BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+ ValueT &&Value, LookupKeyT &Lookup) {
TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
TheBucket->getFirst() = std::move(Key);
@@ -530,7 +528,7 @@ private:
unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
unsigned ProbeAmt = 1;
- while (1) {
+ while (true) {
const BucketT *ThisBucket = BucketsPtr + BucketNo;
// Found Val's bucket? If so, return it.
if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
@@ -744,6 +742,8 @@ class SmallDenseMap
// simplicity of referring to them.
typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+ static_assert(isPowerOf2_64(InlineBuckets),
+ "InlineBuckets must be a power of 2.");
unsigned Small : 1;
unsigned NumEntries : 31;
@@ -968,7 +968,8 @@ private:
return NumEntries;
}
void setNumEntries(unsigned Num) {
- assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
+ // NumEntries is hardcoded to be 31 bits wide.
+ assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
NumEntries = Num;
}
@@ -1042,8 +1043,10 @@ public:
typedef value_type *pointer;
typedef value_type &reference;
typedef std::forward_iterator_tag iterator_category;
+
private:
pointer Ptr, End;
+
public:
DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
@@ -1117,4 +1120,4 @@ capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_DENSEMAP_H
diff --git a/include/llvm/ADT/DenseMapInfo.h b/include/llvm/ADT/DenseMapInfo.h
index 18c692e0cbcc..a844ebcccf5b 100644
--- a/include/llvm/ADT/DenseMapInfo.h
+++ b/include/llvm/ADT/DenseMapInfo.h
@@ -30,36 +30,6 @@ struct DenseMapInfo {
//static bool isEqual(const T &LHS, const T &RHS);
};
-template <typename T> struct CachedHash {
- CachedHash(T Val) : Val(std::move(Val)) {
- Hash = DenseMapInfo<T>::getHashValue(Val);
- }
- CachedHash(T Val, unsigned Hash) : Val(std::move(Val)), Hash(Hash) {}
- T Val;
- unsigned Hash;
-};
-
-// Provide DenseMapInfo for all CachedHash<T>.
-template <typename T> struct DenseMapInfo<CachedHash<T>> {
- static CachedHash<T> getEmptyKey() {
- T N = DenseMapInfo<T>::getEmptyKey();
- return {N, 0};
- }
- static CachedHash<T> getTombstoneKey() {
- T N = DenseMapInfo<T>::getTombstoneKey();
- return {N, 0};
- }
- static unsigned getHashValue(CachedHash<T> Val) {
- assert(!isEqual(Val, getEmptyKey()) && "Cannot hash the empty key!");
- assert(!isEqual(Val, getTombstoneKey()) &&
- "Cannot hash the tombstone key!");
- return Val.Hash;
- }
- static bool isEqual(CachedHash<T> A, CachedHash<T> B) {
- return DenseMapInfo<T>::isEqual(A.Val, B.Val);
- }
-};
-
// Provide DenseMapInfo for all pointers.
template<typename T>
struct DenseMapInfo<T*> {
diff --git a/include/llvm/ADT/DenseSet.h b/include/llvm/ADT/DenseSet.h
index 3724a09623f3..b25d3b7cba6f 100644
--- a/include/llvm/ADT/DenseSet.h
+++ b/include/llvm/ADT/DenseSet.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the DenseSet class.
+// This file defines the DenseSet and SmallDenseSet classes.
//
//===----------------------------------------------------------------------===//
@@ -15,6 +15,7 @@
#define LLVM_ADT_DENSESET_H
#include "llvm/ADT/DenseMap.h"
+#include <initializer_list>
namespace llvm {
@@ -32,13 +33,18 @@ public:
DenseSetEmpty &getSecond() { return *this; }
const DenseSetEmpty &getSecond() const { return *this; }
};
-}
-/// DenseSet - This implements a dense probed hash-table based set.
-template<typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT> >
-class DenseSet {
- typedef DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
- detail::DenseSetPair<ValueT>> MapTy;
+/// Base class for DenseSet and DenseSmallSet.
+///
+/// MapTy should be either
+///
+/// DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+/// detail::DenseSetPair<ValueT>>
+///
+/// or the equivalent SmallDenseMap type. ValueInfoT must implement the
+/// DenseMapInfo "concept".
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+class DenseSetImpl {
static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
"DenseMap buckets unexpectedly large!");
MapTy TheMap;
@@ -48,7 +54,12 @@ public:
typedef ValueT value_type;
typedef unsigned size_type;
- explicit DenseSet(unsigned NumInitBuckets = 0) : TheMap(NumInitBuckets) {}
+ explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
+
+ DenseSetImpl(std::initializer_list<ValueT> Elems)
+ : DenseSetImpl(Elems.size()) {
+ insert(Elems.begin(), Elems.end());
+ }
bool empty() const { return TheMap.empty(); }
size_type size() const { return TheMap.size(); }
@@ -58,6 +69,10 @@ public:
/// the Size of the set.
void resize(size_t Size) { TheMap.resize(Size); }
+ /// Grow the DenseSet so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_t Size) { TheMap.reserve(Size); }
+
void clear() {
TheMap.clear();
}
@@ -71,15 +86,13 @@ public:
return TheMap.erase(V);
}
- void swap(DenseSet& RHS) {
- TheMap.swap(RHS.TheMap);
- }
+ void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); }
// Iterators.
class Iterator {
typename MapTy::iterator I;
- friend class DenseSet;
+ friend class DenseSetImpl;
public:
typedef typename MapTy::iterator::difference_type difference_type;
@@ -131,6 +144,9 @@ public:
const_iterator end() const { return ConstIterator(TheMap.end()); }
iterator find(const ValueT &V) { return Iterator(TheMap.find(V)); }
+ const_iterator find(const ValueT &V) const {
+ return ConstIterator(TheMap.find(V));
+ }
/// Alternative version of find() which allows a different, and possibly less
/// expensive, key type.
@@ -151,7 +167,12 @@ public:
std::pair<iterator, bool> insert(const ValueT &V) {
detail::DenseSetEmpty Empty;
- return TheMap.insert(std::make_pair(V, Empty));
+ return TheMap.try_emplace(V, Empty);
+ }
+
+ std::pair<iterator, bool> insert(ValueT &&V) {
+ detail::DenseSetEmpty Empty;
+ return TheMap.try_emplace(std::move(V), Empty);
}
/// Alternative version of insert that uses a different (and possibly less
@@ -159,12 +180,11 @@ public:
template <typename LookupKeyT>
std::pair<iterator, bool> insert_as(const ValueT &V,
const LookupKeyT &LookupKey) {
- return insert_as(ValueT(V), LookupKey);
+ return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey);
}
template <typename LookupKeyT>
std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
- detail::DenseSetEmpty Empty;
- return TheMap.insert_as(std::make_pair(std::move(V), Empty), LookupKey);
+ return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey);
}
// Range insertion of values.
@@ -175,6 +195,42 @@ public:
}
};
+} // namespace detail
+
+/// Implements a dense probed hash-table based set.
+template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
+class DenseSet : public detail::DenseSetImpl<
+ ValueT, DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+ detail::DenseSetPair<ValueT>>,
+ ValueInfoT> {
+ using BaseT =
+ detail::DenseSetImpl<ValueT,
+ DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+ detail::DenseSetPair<ValueT>>,
+ ValueInfoT>;
+
+public:
+ using BaseT::BaseT;
+};
+
+/// Implements a dense probed hash-table based set with some number of buckets
+/// stored inline.
+template <typename ValueT, unsigned InlineBuckets = 4,
+ typename ValueInfoT = DenseMapInfo<ValueT>>
+class SmallDenseSet
+ : public detail::DenseSetImpl<
+ ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT> {
+ using BaseT = detail::DenseSetImpl<
+ ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT>;
+
+public:
+ using BaseT::BaseT;
+};
+
} // end namespace llvm
#endif
diff --git a/include/llvm/ADT/DepthFirstIterator.h b/include/llvm/ADT/DepthFirstIterator.h
index c9317b8539b3..c54573204588 100644
--- a/include/llvm/ADT/DepthFirstIterator.h
+++ b/include/llvm/ADT/DepthFirstIterator.h
@@ -34,10 +34,13 @@
#define LLVM_ADT_DEPTHFIRSTITERATOR_H
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
+#include <iterator>
#include <set>
+#include <utility>
#include <vector>
namespace llvm {
@@ -55,44 +58,56 @@ class df_iterator_storage<SetType, true> {
public:
df_iterator_storage(SetType &VSet) : Visited(VSet) {}
df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}
+
SetType &Visited;
};
+// The visited stated for the iteration is a simple set augmented with
+// one more method, completed, which is invoked when all children of a
+// node have been processed. It is intended to distinguish of back and
+// cross edges in the spanning tree but is not used in the common case.
+template <typename NodeRef, unsigned SmallSize=8>
+struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
+ typedef SmallPtrSet<NodeRef, SmallSize> BaseSet;
+ typedef typename BaseSet::iterator iterator;
+ std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N) ; }
+ template <typename IterT>
+ void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }
+
+ void completed(NodeRef) { }
+};
+
// Generic Depth First Iterator
-template<class GraphT,
-class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
- bool ExtStorage = false, class GT = GraphTraits<GraphT> >
-class df_iterator : public std::iterator<std::forward_iterator_tag,
- typename GT::NodeType, ptrdiff_t>,
- public df_iterator_storage<SetType, ExtStorage> {
- typedef std::iterator<std::forward_iterator_tag,
- typename GT::NodeType, ptrdiff_t> super;
-
- typedef typename GT::NodeType NodeType;
+template <class GraphT,
+ class SetType =
+ df_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+ bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class df_iterator
+ : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+ public df_iterator_storage<SetType, ExtStorage> {
+ typedef std::iterator<std::forward_iterator_tag, typename GT::NodeRef> super;
+
+ typedef typename GT::NodeRef NodeRef;
typedef typename GT::ChildIteratorType ChildItTy;
- typedef PointerIntPair<NodeType*, 1> PointerIntTy;
+
+ // First element is node reference, second is the 'next child' to visit.
+ // The second child is initialized lazily to pick up graph changes during the
+ // DFS.
+ typedef std::pair<NodeRef, Optional<ChildItTy>> StackElement;
// VisitStack - Used to maintain the ordering. Top = current block
- // First element is node pointer, second is the 'next child' to visit
- // if the int in PointerIntTy is 0, the 'next child' to visit is invalid
- std::vector<std::pair<PointerIntTy, ChildItTy>> VisitStack;
+ std::vector<StackElement> VisitStack;
private:
- inline df_iterator(NodeType *Node) {
+ inline df_iterator(NodeRef Node) {
this->Visited.insert(Node);
- VisitStack.push_back(
- std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node)));
+ VisitStack.push_back(StackElement(Node, None));
}
- inline df_iterator() {
- // End is when stack is empty
- }
- inline df_iterator(NodeType *Node, SetType &S)
- : df_iterator_storage<SetType, ExtStorage>(S) {
- if (!S.count(Node)) {
- VisitStack.push_back(
- std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node)));
- this->Visited.insert(Node);
- }
+ inline df_iterator() = default; // End is when stack is empty
+ inline df_iterator(NodeRef Node, SetType &S)
+ : df_iterator_storage<SetType, ExtStorage>(S) {
+ if (this->Visited.insert(Node).second)
+ VisitStack.push_back(StackElement(Node, None));
}
inline df_iterator(SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
@@ -101,26 +116,26 @@ private:
inline void toNext() {
do {
- std::pair<PointerIntTy, ChildItTy> &Top = VisitStack.back();
- NodeType *Node = Top.first.getPointer();
- ChildItTy &It = Top.second;
- if (!Top.first.getInt()) {
- // now retrieve the real begin of the children before we dive in
- It = GT::child_begin(Node);
- Top.first.setInt(1);
- }
+ NodeRef Node = VisitStack.back().first;
+ Optional<ChildItTy> &Opt = VisitStack.back().second;
+
+ if (!Opt)
+ Opt.emplace(GT::child_begin(Node));
- while (It != GT::child_end(Node)) {
- NodeType *Next = *It++;
+ // Notice that we directly mutate *Opt here, so that
+ // VisitStack.back().second actually gets updated as the iterator
+ // increases.
+ while (*Opt != GT::child_end(Node)) {
+ NodeRef Next = *(*Opt)++;
// Has our next sibling been visited?
- if (Next && this->Visited.insert(Next).second) {
+ if (this->Visited.insert(Next).second) {
// No, do it now.
- VisitStack.push_back(
- std::make_pair(PointerIntTy(Next, 0), GT::child_begin(Next)));
+ VisitStack.push_back(StackElement(Next, None));
return;
}
}
-
+ this->Visited.completed(Node);
+
// Oops, ran out of successors... go up a level on the stack.
VisitStack.pop_back();
} while (!VisitStack.empty());
@@ -146,13 +161,13 @@ public:
}
bool operator!=(const df_iterator &x) const { return !(*this == x); }
- pointer operator*() const { return VisitStack.back().first.getPointer(); }
+ const NodeRef &operator*() const { return VisitStack.back().first; }
// This is a nonstandard operator-> that dereferences the pointer an extra
// time... so that you can actually call methods ON the Node, because
// the contained type is a pointer. This allows BBIt->getTerminator() f.e.
//
- NodeType *operator->() const { return **this; }
+ NodeRef operator->() const { return **this; }
df_iterator &operator++() { // Preincrement
toNext();
@@ -180,7 +195,7 @@ public:
// specified node. This is public, and will probably be used to iterate over
// nodes that a depth first iteration did not find: ie unreachable nodes.
//
- bool nodeVisited(NodeType *Node) const {
+ bool nodeVisited(NodeRef Node) const {
return this->Visited.count(Node) != 0;
}
@@ -190,9 +205,7 @@ public:
/// getPath - Return the n'th node in the path from the entry node to the
/// current node.
- NodeType *getPath(unsigned n) const {
- return VisitStack[n].first.getPointer();
- }
+ NodeRef getPath(unsigned n) const { return VisitStack[n].first; }
};
// Provide global constructors that automatically figure out correct types...
@@ -214,7 +227,7 @@ iterator_range<df_iterator<T>> depth_first(const T& G) {
}
// Provide global definitions of external depth first iterators...
-template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
struct df_ext_iterator : public df_iterator<T, SetTy, true> {
df_ext_iterator(const df_iterator<T, SetTy, true> &V)
: df_iterator<T, SetTy, true>(V) {}
@@ -238,7 +251,8 @@ iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
// Provide global definitions of inverse depth first iterators...
template <class T,
- class SetTy = llvm::SmallPtrSet<typename GraphTraits<T>::NodeType*, 8>,
+ class SetTy =
+ df_iterator_default_set<typename GraphTraits<T>::NodeRef>,
bool External = false>
struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
@@ -262,7 +276,7 @@ iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
}
// Provide global definitions of external inverse depth first iterators...
-template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
: idf_iterator<T, SetTy, true>(V) {}
@@ -286,6 +300,6 @@ iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_DEPTHFIRSTITERATOR_H
diff --git a/include/llvm/ADT/EpochTracker.h b/include/llvm/ADT/EpochTracker.h
index 582d58179d13..db39ba4e0c50 100644
--- a/include/llvm/ADT/EpochTracker.h
+++ b/include/llvm/ADT/EpochTracker.h
@@ -16,28 +16,14 @@
#ifndef LLVM_ADT_EPOCH_TRACKER_H
#define LLVM_ADT_EPOCH_TRACKER_H
+#include "llvm/Config/abi-breaking.h"
#include "llvm/Config/llvm-config.h"
#include <cstdint>
namespace llvm {
-#ifndef LLVM_ENABLE_ABI_BREAKING_CHECKS
-
-class DebugEpochBase {
-public:
- void incrementEpoch() {}
-
- class HandleBase {
- public:
- HandleBase() = default;
- explicit HandleBase(const DebugEpochBase *) {}
- bool isHandleInSync() const { return true; }
- const void *getEpochAddress() const { return nullptr; }
- };
-};
-
-#else
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
/// \brief A base class for data structure classes wishing to make iterators
/// ("handles") pointing into themselves fail-fast. When building without
@@ -92,6 +78,21 @@ public:
};
};
+#else
+
+class DebugEpochBase {
+public:
+ void incrementEpoch() {}
+
+ class HandleBase {
+ public:
+ HandleBase() = default;
+ explicit HandleBase(const DebugEpochBase *) {}
+ bool isHandleInSync() const { return true; }
+ const void *getEpochAddress() const { return nullptr; }
+ };
+};
+
#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS
} // namespace llvm
diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h
index d6a26f88e67d..8fcac178ffc9 100644
--- a/include/llvm/ADT/EquivalenceClasses.h
+++ b/include/llvm/ADT/EquivalenceClasses.h
@@ -15,9 +15,10 @@
#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
#define LLVM_ADT_EQUIVALENCECLASSES_H
-#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstddef>
+#include <cstdint>
+#include <iterator>
#include <set>
namespace llvm {
@@ -70,6 +71,7 @@ class EquivalenceClasses {
friend class EquivalenceClasses;
mutable const ECValue *Leader, *Next;
ElemTy Data;
+
// ECValue ctor - Start out with EndOfList pointing to this node, Next is
// Null, isLeader = true.
ECValue(const ElemTy &Elt)
@@ -81,6 +83,7 @@ class EquivalenceClasses {
// Path compression.
return Leader = Leader->getLeader();
}
+
const ECValue *getEndOfList() const {
assert(isLeader() && "Cannot get the end of a list for a non-leader!");
return Leader;
@@ -90,6 +93,7 @@ class EquivalenceClasses {
assert(getNext() == nullptr && "Already has a next pointer!");
Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
}
+
public:
ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
Data(RHS.Data) {
@@ -115,7 +119,7 @@ class EquivalenceClasses {
std::set<ECValue> TheMapping;
public:
- EquivalenceClasses() {}
+ EquivalenceClasses() = default;
EquivalenceClasses(const EquivalenceClasses &RHS) {
operator=(RHS);
}
@@ -187,7 +191,6 @@ public:
return NC;
}
-
//===--------------------------------------------------------------------===//
// Mutation methods
@@ -210,7 +213,6 @@ public:
return findLeader(TheMapping.find(V));
}
-
/// union - Merge the two equivalence sets for the specified values, inserting
/// them if they do not already exist in the equivalence set.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
@@ -243,12 +245,13 @@ public:
const ElemTy, ptrdiff_t> super;
const ECValue *Node;
friend class EquivalenceClasses;
+
public:
typedef size_t size_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
- explicit member_iterator() {}
+ explicit member_iterator() = default;
explicit member_iterator(const ECValue *N) : Node(N) {}
reference operator*() const {
@@ -278,6 +281,6 @@ public:
};
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_EQUIVALENCECLASSES_H
diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h
index f16258af4ae2..dab18297dd3b 100644
--- a/include/llvm/ADT/FoldingSet.h
+++ b/include/llvm/ADT/FoldingSet.h
@@ -19,8 +19,13 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
namespace llvm {
+
/// This folding set used for two purposes:
/// 1. Given information about a node we want to create, look up the unique
/// instance of the node in the set. If the node already exists, return
@@ -184,6 +189,7 @@ public:
/// EltCount-th node won't cause a rebucket operation. reserve is permitted
/// to allocate more space than requested by EltCount.
void reserve(unsigned EltCount);
+
/// capacity - Returns the number of nodes permitted in the folding set
/// before a rebucket operation is performed.
unsigned capacity() {
@@ -200,14 +206,17 @@ private:
/// NewBucketCount must be a power of two, and must be greater than the old
/// bucket count.
void GrowBucketCount(unsigned NewBucketCount);
+
protected:
/// GetNodeProfile - Instantiations of the FoldingSet template implement
/// this function to gather data bits for the given node.
virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
+
/// NodeEquals - Instantiations of the FoldingSet template implement
/// this function to compare the given node with the given ID.
virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const=0;
+
/// ComputeNodeHash - Instantiations of the FoldingSet template implement
/// this function to compute a hash value for the given node.
virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
@@ -215,8 +224,6 @@ protected:
//===----------------------------------------------------------------------===//
-template<typename T> struct FoldingSetTrait;
-
/// DefaultFoldingSetTrait - This class provides default implementations
/// for FoldingSetTrait implementations.
///
@@ -252,8 +259,6 @@ template<typename T> struct DefaultFoldingSetTrait {
template<typename T> struct FoldingSetTrait
: public DefaultFoldingSetTrait<T> {};
-template<typename T, typename Ctx> struct ContextualFoldingSetTrait;
-
/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
/// for ContextualFoldingSets.
template<typename T, typename Ctx>
@@ -261,6 +266,7 @@ struct DefaultContextualFoldingSetTrait {
static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
X.Profile(ID, Context);
}
+
static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID, Ctx Context);
static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
@@ -279,11 +285,11 @@ template<typename T, typename Ctx> struct ContextualFoldingSetTrait
/// is often much larger than necessary, and the possibility of heap
/// allocation means it requires a non-trivial destructor call.
class FoldingSetNodeIDRef {
- const unsigned *Data;
- size_t Size;
+ const unsigned *Data = nullptr;
+ size_t Size = 0;
public:
- FoldingSetNodeIDRef() : Data(nullptr), Size(0) {}
+ FoldingSetNodeIDRef() = default;
FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
@@ -313,7 +319,7 @@ class FoldingSetNodeID {
SmallVector<unsigned, 32> Bits;
public:
- FoldingSetNodeID() {}
+ FoldingSetNodeID() = default;
FoldingSetNodeID(FoldingSetNodeIDRef Ref)
: Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
@@ -418,6 +424,7 @@ private:
T *TN = static_cast<T *>(N);
FoldingSetTrait<T>::Profile(*TN, ID);
}
+
/// NodeEquals - Instantiations may optionally provide a way to compare a
/// node with a specified ID.
bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
@@ -425,6 +432,7 @@ private:
T *TN = static_cast<T *>(N);
return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
}
+
/// ComputeNodeHash - Instantiations may optionally provide a way to compute a
/// hash value directly from a node.
unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
@@ -483,7 +491,7 @@ public:
///
/// T must be a subclass of FoldingSetNode and implement a Profile
/// function with signature
-/// void Profile(llvm::FoldingSetNodeID &, Ctx);
+/// void Profile(FoldingSetNodeID &, Ctx);
template <class T, class Ctx>
class ContextualFoldingSet final : public FoldingSetImpl {
// Unfortunately, this can't derive from FoldingSet<T> because the
@@ -501,12 +509,14 @@ private:
T *TN = static_cast<T *>(N);
ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
}
+
bool NodeEquals(FoldingSetImpl::Node *N, const FoldingSetNodeID &ID,
unsigned IDHash, FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
Context);
}
+
unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
@@ -558,7 +568,7 @@ public:
/// to provide the interface of FoldingSet but with deterministic iteration
/// order based on the insertion order. T must be a subclass of FoldingSetNode
/// and implement a Profile function.
-template <class T, class VectorT = SmallVector<T*, 8> >
+template <class T, class VectorT = SmallVector<T*, 8>>
class FoldingSetVector {
FoldingSet<T> Set;
VectorT Vector;
@@ -623,7 +633,9 @@ public:
class FoldingSetIteratorImpl {
protected:
FoldingSetNode *NodePtr;
+
FoldingSetIteratorImpl(void **Bucket);
+
void advance();
public:
@@ -754,11 +766,12 @@ template<typename T> struct FoldingSetTrait<T*> {
template <typename T1, typename T2>
struct FoldingSetTrait<std::pair<T1, T2>> {
static inline void Profile(const std::pair<T1, T2> &P,
- llvm::FoldingSetNodeID &ID) {
+ FoldingSetNodeID &ID) {
ID.Add(P.first);
ID.Add(P.second);
}
};
-} // End of namespace llvm.
-#endif
+} // end namespace llvm
+
+#endif // LLVM_ADT_FOLDINGSET_H
diff --git a/include/llvm/ADT/GraphTraits.h b/include/llvm/ADT/GraphTraits.h
index eb67b7c83659..29bbcb010eee 100644
--- a/include/llvm/ADT/GraphTraits.h
+++ b/include/llvm/ADT/GraphTraits.h
@@ -27,14 +27,10 @@ template<class GraphType>
struct GraphTraits {
// Elements to provide:
- // NOTICE: We are in a transition from migration interfaces that require
- // NodeType *, to NodeRef. NodeRef is required to be cheap to copy, but does
- // not have to be a raw pointer. In the transition, user should define
- // NodeType, and NodeRef = NodeType *.
- //
- // typedef NodeType - Type of Node in the graph
- // typedef NodeRef - NodeType *
- // typedef ChildIteratorType - Type used to iterate over children in graph
+ // typedef NodeRef - Type of Node token in the graph, which should
+ // be cheap to copy.
+ // typedef ChildIteratorType - Type used to iterate over children in graph,
+ // dereference to a NodeRef.
// static NodeRef getEntryNode(const GraphType &)
// Return the entry node of the graph
@@ -45,7 +41,7 @@ struct GraphTraits {
// node list for the specified node.
//
- // typedef ...iterator nodes_iterator;
+ // typedef ...iterator nodes_iterator; - dereference to a NodeRef
// static nodes_iterator nodes_begin(GraphType *G)
// static nodes_iterator nodes_end (GraphType *G)
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
@@ -88,23 +84,7 @@ struct Inverse {
// Provide a partial specialization of GraphTraits so that the inverse of an
// inverse falls back to the original graph.
-template<class T>
-struct GraphTraits<Inverse<Inverse<T> > > {
- typedef typename GraphTraits<T>::NodeType NodeType;
- typedef typename GraphTraits<T>::ChildIteratorType ChildIteratorType;
-
- static NodeType *getEntryNode(Inverse<Inverse<T> > *G) {
- return GraphTraits<T>::getEntryNode(G->Graph.Graph);
- }
-
- static ChildIteratorType child_begin(NodeType* N) {
- return GraphTraits<T>::child_begin(N);
- }
-
- static ChildIteratorType child_end(NodeType* N) {
- return GraphTraits<T>::child_end(N);
- }
-};
+template <class T> struct GraphTraits<Inverse<Inverse<T>>> : GraphTraits<T> {};
} // End llvm namespace
diff --git a/include/llvm/ADT/ImmutableList.h b/include/llvm/ADT/ImmutableList.h
index a1d26bd97045..e5f51bafe995 100644
--- a/include/llvm/ADT/ImmutableList.h
+++ b/include/llvm/ADT/ImmutableList.h
@@ -16,8 +16,9 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/DataTypes.h"
#include <cassert>
+#include <cstdint>
+#include <new>
namespace llvm {
@@ -25,18 +26,18 @@ template <typename T> class ImmutableListFactory;
template <typename T>
class ImmutableListImpl : public FoldingSetNode {
+ friend class ImmutableListFactory<T>;
+
T Head;
const ImmutableListImpl* Tail;
ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
: Head(head), Tail(tail) {}
- friend class ImmutableListFactory<T>;
-
- void operator=(const ImmutableListImpl&) = delete;
- ImmutableListImpl(const ImmutableListImpl&) = delete;
-
public:
+ ImmutableListImpl(const ImmutableListImpl &) = delete;
+ ImmutableListImpl &operator=(const ImmutableListImpl &) = delete;
+
const T& getHead() const { return Head; }
const ImmutableListImpl* getTail() const { return Tail; }
@@ -79,15 +80,17 @@ public:
}
class iterator {
- const ImmutableListImpl<T>* L;
+ const ImmutableListImpl<T>* L = nullptr;
+
public:
- iterator() : L(nullptr) {}
+ iterator() = default;
iterator(ImmutableList l) : L(l.getInternalPointer()) {}
iterator& operator++() { L = L->getTail(); return *this; }
bool operator==(const iterator& I) const { return L == I.L; }
bool operator!=(const iterator& I) const { return L != I.L; }
const value_type& operator*() const { return L->getHead(); }
+
ImmutableList getList() const { return L; }
};
@@ -121,7 +124,7 @@ public:
/// getHead - Returns the head of the list.
const T& getHead() {
- assert (!isEmpty() && "Cannot get the head of an empty list.");
+ assert(!isEmpty() && "Cannot get the head of an empty list.");
return X->getHead();
}
@@ -145,7 +148,7 @@ class ImmutableListFactory {
uintptr_t Allocator;
bool ownsAllocator() const {
- return Allocator & 0x1 ? false : true;
+ return (Allocator & 0x1) == 0;
}
BumpPtrAllocator& getAllocator() const {
@@ -203,18 +206,21 @@ public:
//===----------------------------------------------------------------------===//
template<typename T> struct DenseMapInfo;
-template<typename T> struct DenseMapInfo<ImmutableList<T> > {
+template<typename T> struct DenseMapInfo<ImmutableList<T>> {
static inline ImmutableList<T> getEmptyKey() {
return reinterpret_cast<ImmutableListImpl<T>*>(-1);
}
+
static inline ImmutableList<T> getTombstoneKey() {
return reinterpret_cast<ImmutableListImpl<T>*>(-2);
}
+
static unsigned getHashValue(ImmutableList<T> X) {
uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
return (unsigned((uintptr_t)PtrVal) >> 4) ^
(unsigned((uintptr_t)PtrVal) >> 9);
}
+
static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
return X1 == X2;
}
@@ -222,8 +228,8 @@ template<typename T> struct DenseMapInfo<ImmutableList<T> > {
template <typename T> struct isPodLike;
template <typename T>
-struct isPodLike<ImmutableList<T> > { static const bool value = true; };
+struct isPodLike<ImmutableList<T>> { static const bool value = true; };
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_ADT_IMMUTABLELIST_H
diff --git a/include/llvm/ADT/ImmutableMap.h b/include/llvm/ADT/ImmutableMap.h
index 7480cd73da61..f197d407ba3b 100644
--- a/include/llvm/ADT/ImmutableMap.h
+++ b/include/llvm/ADT/ImmutableMap.h
@@ -14,7 +14,10 @@
#ifndef LLVM_ADT_IMMUTABLEMAP_H
#define LLVM_ADT_IMMUTABLEMAP_H
+#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Allocator.h"
+#include <utility>
namespace llvm {
@@ -56,7 +59,7 @@ struct ImutKeyValueInfo {
};
template <typename KeyT, typename ValT,
- typename ValInfo = ImutKeyValueInfo<KeyT,ValT> >
+ typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMap {
public:
typedef typename ValInfo::value_type value_type;
@@ -106,6 +109,9 @@ public:
Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
+ Factory(const Factory &) = delete;
+ Factory &operator=(const Factory &) = delete;
+
ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
@@ -121,10 +127,6 @@ public:
typename TreeTy::Factory *getTreeFactory() const {
return const_cast<typename TreeTy::Factory *>(&F);
}
-
- private:
- Factory(const Factory& RHS) = delete;
- void operator=(const Factory& RHS) = delete;
};
bool contains(key_type_ref K) const {
@@ -203,9 +205,10 @@ public:
//===--------------------------------------------------===//
class iterator : public ImutAVLValueIterator<ImmutableMap> {
+ friend class ImmutableMap;
+
iterator() = default;
explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
- friend class ImmutableMap;
public:
key_type_ref getKey() const { return (*this)->first; }
@@ -248,7 +251,7 @@ public:
// NOTE: This will possibly become the new implementation of ImmutableMap some day.
template <typename KeyT, typename ValT,
-typename ValInfo = ImutKeyValueInfo<KeyT,ValT> >
+typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMapRef {
public:
typedef typename ValInfo::value_type value_type;
@@ -362,9 +365,10 @@ public:
//===--------------------------------------------------===//
class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
+ friend class ImmutableMapRef;
+
iterator() = default;
explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
- friend class ImmutableMapRef;
public:
key_type_ref getKey() const { return (*this)->first; }
diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h
index 87026f019fec..0724a28306a0 100644
--- a/include/llvm/ADT/ImmutableSet.h
+++ b/include/llvm/ADT/ImmutableSet.h
@@ -16,12 +16,16 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <functional>
#include <vector>
+#include <cstdint>
+#include <iterator>
+#include <new>
namespace llvm {
@@ -329,11 +333,13 @@ private:
public:
void retain() { ++refCount; }
+
void release() {
assert(refCount > 0);
if (--refCount == 0)
destroy();
}
+
void destroy() {
if (left)
left->release();
@@ -375,7 +381,7 @@ class ImutAVLFactory {
std::vector<TreeTy*> freeNodes;
bool ownsAllocator() const {
- return Allocator & 0x1 ? false : true;
+ return (Allocator & 0x1) == 0;
}
BumpPtrAllocator& getAllocator() const {
@@ -414,7 +420,6 @@ public:
TreeTy* getEmptyTree() const { return nullptr; }
protected:
-
//===--------------------------------------------------===//
// A bunch of quick helper functions used for reasoning
// about the properties of trees and their children.
@@ -649,13 +654,14 @@ class ImutAVLTreeGenericIterator
: public std::iterator<std::bidirectional_iterator_tag,
ImutAVLTree<ImutInfo>> {
SmallVector<uintptr_t,20> stack;
+
public:
enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
Flags=0x3 };
typedef ImutAVLTree<ImutInfo> TreeTy;
- ImutAVLTreeGenericIterator() {}
+ ImutAVLTreeGenericIterator() = default;
ImutAVLTreeGenericIterator(const TreeTy *Root) {
if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
}
@@ -671,7 +677,6 @@ public:
return stack.back() & Flags;
}
-
bool atEnd() const { return stack.empty(); }
bool atBeginning() const {
@@ -881,7 +886,6 @@ struct ImutProfileInfo<bool> {
}
};
-
/// Generic profile trait for pointer types. We treat pointers as
/// references to unique objects.
template <typename T>
@@ -901,7 +905,6 @@ struct ImutProfileInfo<T*> {
// for element profiling.
//===----------------------------------------------------------------------===//
-
/// ImutContainerInfo - Generic definition of comparison operations for
/// elements of immutable containers that defaults to using
/// std::equal_to<> and std::less<> to perform comparison of elements.
@@ -954,7 +957,7 @@ struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
// Immutable Set
//===----------------------------------------------------------------------===//
-template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> >
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSet {
public:
typedef typename ValInfo::value_type value_type;
@@ -972,9 +975,11 @@ public:
explicit ImmutableSet(TreeTy* R) : Root(R) {
if (Root) { Root->retain(); }
}
+
ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
if (Root) { Root->retain(); }
}
+
ImmutableSet &operator=(const ImmutableSet &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@@ -983,6 +988,7 @@ public:
}
return *this;
}
+
~ImmutableSet() {
if (Root) { Root->release(); }
}
@@ -998,6 +1004,9 @@ public:
Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
+ Factory(const Factory& RHS) = delete;
+ void operator=(const Factory& RHS) = delete;
+
/// getEmptySet - Returns an immutable set that contains no elements.
ImmutableSet getEmptySet() {
return ImmutableSet(F.getEmptyTree());
@@ -1032,10 +1041,6 @@ public:
typename TreeTy::Factory *getTreeFactory() const {
return const_cast<typename TreeTy::Factory *>(&F);
}
-
- private:
- Factory(const Factory& RHS) = delete;
- void operator=(const Factory& RHS) = delete;
};
friend class Factory;
@@ -1104,7 +1109,7 @@ public:
};
// NOTE: This may some day replace the current ImmutableSet.
-template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> >
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSetRef {
public:
typedef typename ValInfo::value_type value_type;
@@ -1126,11 +1131,13 @@ public:
Factory(F) {
if (Root) { Root->retain(); }
}
+
ImmutableSetRef(const ImmutableSetRef &X)
: Root(X.Root),
Factory(X.Factory) {
if (Root) { Root->retain(); }
}
+
ImmutableSetRef &operator=(const ImmutableSetRef &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@@ -1215,4 +1222,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_IMMUTABLESET_H
diff --git a/include/llvm/ADT/IntervalMap.h b/include/llvm/ADT/IntervalMap.h
index f8843b2a4e50..430b9671bd1d 100644
--- a/include/llvm/ADT/IntervalMap.h
+++ b/include/llvm/ADT/IntervalMap.h
@@ -104,11 +104,14 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/RecyclingAllocator.h"
+#include <algorithm>
+#include <cassert>
#include <iterator>
+#include <new>
+#include <utility>
namespace llvm {
-
//===----------------------------------------------------------------------===//
//--- Key traits ---//
//===----------------------------------------------------------------------===//
@@ -131,7 +134,6 @@ namespace llvm {
template <typename T>
struct IntervalMapInfo {
-
/// startLess - Return true if x is not in [a;b].
/// This is x < a both for closed intervals and for [a;b) half-open intervals.
static inline bool startLess(const T &x, const T &a) {
@@ -150,11 +152,15 @@ struct IntervalMapInfo {
return a+1 == b;
}
+ /// nonEmpty - Return true if [a;b] is non-empty.
+ /// This is a <= b for a closed interval, a < b for [a;b) half-open intervals.
+ static inline bool nonEmpty(const T &a, const T &b) {
+ return a <= b;
+ }
};
template <typename T>
struct IntervalMapHalfOpenInfo {
-
/// startLess - Return true if x is not in [a;b).
static inline bool startLess(const T &x, const T &a) {
return x < a;
@@ -170,19 +176,18 @@ struct IntervalMapHalfOpenInfo {
return a == b;
}
+ /// nonEmpty - Return true if [a;b) is non-empty.
+ static inline bool nonEmpty(const T &a, const T &b) {
+ return a < b;
+ }
};
/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
/// It should be considered private to the implementation.
namespace IntervalMapImpl {
-// Forward declarations.
-template <typename, typename, unsigned, typename> class LeafNode;
-template <typename, typename, unsigned, typename> class BranchNode;
-
typedef std::pair<unsigned,unsigned> IdxPair;
-
//===----------------------------------------------------------------------===//
//--- IntervalMapImpl::NodeBase ---//
//===----------------------------------------------------------------------===//
@@ -406,7 +411,6 @@ IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
const unsigned *CurSize, unsigned NewSize[],
unsigned Position, bool Grow);
-
//===----------------------------------------------------------------------===//
//--- IntervalMapImpl::NodeSizer ---//
//===----------------------------------------------------------------------===//
@@ -459,10 +463,8 @@ struct NodeSizer {
/// different kinds of maps.
typedef RecyclingAllocator<BumpPtrAllocator, char,
AllocBytes, CacheLineBytes> Allocator;
-
};
-
//===----------------------------------------------------------------------===//
//--- IntervalMapImpl::NodeRef ---//
//===----------------------------------------------------------------------===//
@@ -494,7 +496,7 @@ class NodeRef {
public:
/// NodeRef - Create a null ref.
- NodeRef() {}
+ NodeRef() = default;
/// operator bool - Detect a null ref.
explicit operator bool() const { return pip.getOpaqueValue(); }
@@ -674,7 +676,6 @@ insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
return Size + 1;
}
-
//===----------------------------------------------------------------------===//
//--- IntervalMapImpl::BranchNode ---//
//===----------------------------------------------------------------------===//
@@ -919,8 +920,7 @@ public:
}
};
-} // namespace IntervalMapImpl
-
+} // end namespace IntervalMapImpl
//===----------------------------------------------------------------------===//
//--- IntervalMap ----//
@@ -928,7 +928,7 @@ public:
template <typename KeyT, typename ValT,
unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
- typename Traits = IntervalMapInfo<KeyT> >
+ typename Traits = IntervalMapInfo<KeyT>>
class IntervalMap {
typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer;
typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf;
@@ -995,6 +995,7 @@ private:
assert(!branched() && "Cannot acces leaf data in branched root");
return dataAs<RootLeaf>();
}
+
RootBranchData &rootBranchData() const {
assert(branched() && "Cannot access branch data in non-branched root");
return dataAs<RootBranchData>();
@@ -1003,6 +1004,7 @@ private:
assert(branched() && "Cannot access branch data in non-branched root");
return dataAs<RootBranchData>();
}
+
const RootBranch &rootBranch() const { return rootBranchData().node; }
RootBranch &rootBranch() { return rootBranchData().node; }
KeyT rootBranchStart() const { return rootBranchData().start; }
@@ -1041,7 +1043,7 @@ private:
public:
explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
- assert((uintptr_t(data.buffer) & (alignOf<RootLeaf>() - 1)) == 0 &&
+ assert((uintptr_t(data.buffer) & (alignof(RootLeaf) - 1)) == 0 &&
"Insufficient alignment");
new(&rootLeaf()) RootLeaf();
}
@@ -1149,7 +1151,6 @@ treeSafeLookup(KeyT x, ValT NotFound) const {
return NR.get<Leaf>().safeLookup(x, NotFound);
}
-
// branchRoot - Switch from a leaf root to a branched root.
// Return the new (root offset, node offset) corresponding to Position.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
@@ -1284,6 +1285,7 @@ clear() {
template <typename KeyT, typename ValT, unsigned N, typename Traits>
class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
public std::iterator<std::bidirectional_iterator_tag, ValT> {
+
protected:
friend class IntervalMap;
@@ -1436,7 +1438,6 @@ public:
path.leafOffset() =
map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
}
-
};
/// pathFillFind - Complete path by searching for x.
@@ -1523,7 +1524,7 @@ class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
public:
/// iterator - Create null iterator.
- iterator() {}
+ iterator() = default;
/// setStart - Move the start of the current interval.
/// This may cause coalescing with the previous interval.
@@ -1589,7 +1590,6 @@ public:
operator--();
return tmp;
}
-
};
/// canCoalesceLeft - Can the current interval coalesce to the left after
@@ -1669,7 +1669,7 @@ iterator::setNodeStop(unsigned Level, KeyT Stop) {
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::setStart(KeyT a) {
- assert(Traits::stopLess(a, this->stop()) && "Cannot move start beyond stop");
+ assert(Traits::nonEmpty(a, this->stop()) && "Cannot move start beyond stop");
KeyT &CurStart = this->unsafeStart();
if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
CurStart = a;
@@ -1685,7 +1685,7 @@ iterator::setStart(KeyT a) {
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::setStop(KeyT b) {
- assert(Traits::stopLess(this->start(), b) && "Cannot move stop beyond start");
+ assert(Traits::nonEmpty(this->start(), b) && "Cannot move stop beyond start");
if (Traits::startLess(b, this->stop()) ||
!canCoalesceRight(b, this->value())) {
setStopUnchecked(b);
@@ -1790,7 +1790,6 @@ iterator::insert(KeyT a, KeyT b, ValT y) {
treeInsert(a, b, y);
}
-
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::treeInsert(KeyT a, KeyT b, ValT y) {
@@ -2151,6 +2150,6 @@ public:
}
};
-} // namespace llvm
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_INTERVALMAP_H
diff --git a/include/llvm/ADT/IntrusiveRefCntPtr.h b/include/llvm/ADT/IntrusiveRefCntPtr.h
index 8057ec10be00..559fb40773aa 100644
--- a/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -7,14 +7,49 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines IntrusiveRefCntPtr, a template class that
-// implements a "smart" pointer for objects that maintain their own
-// internal reference count, and RefCountedBase/RefCountedBaseVPTR, two
-// generic base classes for objects that wish to have their lifetimes
-// managed using reference counting.
+// This file defines the RefCountedBase, ThreadSafeRefCountedBase, and
+// IntrusiveRefCntPtr classes.
//
-// IntrusiveRefCntPtr is similar to Boost's intrusive_ptr with added
-// LLVM-style casting.
+// IntrusiveRefCntPtr is a smart pointer to an object which maintains a
+// reference count. (ThreadSafe)RefCountedBase is a mixin class that adds a
+// refcount member variable and methods for updating the refcount. An object
+// that inherits from (ThreadSafe)RefCountedBase deletes itself when its
+// refcount hits zero.
+//
+// For example:
+//
+// class MyClass : public RefCountedBase<MyClass> {};
+//
+// void foo() {
+// // Objects that inherit from RefCountedBase should always be instantiated
+// // on the heap, never on the stack.
+// IntrusiveRefCntPtr<MyClass> Ptr1(new MyClass());
+//
+// // Copying an IntrusiveRefCntPtr increases the pointee's refcount by 1.
+// IntrusiveRefCntPtr<MyClass> Ptr2(Ptr1);
+//
+// // Constructing an IntrusiveRefCntPtr has no effect on the object's
+// // refcount. After a move, the moved-from pointer is null.
+// IntrusiveRefCntPtr<MyClass> Ptr3(std::move(Ptr1));
+// assert(Ptr1 == nullptr);
+//
+// // Clearing an IntrusiveRefCntPtr decreases the pointee's refcount by 1.
+// Ptr2.reset();
+//
+// // The object deletes itself when we return from the function, because
+// // Ptr3's destructor decrements its refcount to 0.
+// }
+//
+// You can use IntrusiveRefCntPtr with isa<T>(), dyn_cast<T>(), etc.:
+//
+// IntrusiveRefCntPtr<MyClass> Ptr(new MyClass());
+// OtherClass *Other = dyn_cast<OtherClass>(Ptr); // Ptr.get() not required
+//
+// IntrusiveRefCntPtr works with any class that
+//
+// - inherits from (ThreadSafe)RefCountedBase,
+// - has Retain() and Release() methods, or
+// - specializes IntrusiveRefCntPtrInfo.
//
//===----------------------------------------------------------------------===//
@@ -27,261 +62,207 @@
namespace llvm {
- template <class T>
- class IntrusiveRefCntPtr;
-
-//===----------------------------------------------------------------------===//
-/// RefCountedBase - A generic base class for objects that wish to
-/// have their lifetimes managed using reference counts. Classes
-/// subclass RefCountedBase to obtain such functionality, and are
-/// typically handled with IntrusiveRefCntPtr "smart pointers" (see below)
-/// which automatically handle the management of reference counts.
-/// Objects that subclass RefCountedBase should not be allocated on
-/// the stack, as invoking "delete" (which is called when the
-/// reference count hits 0) on such objects is an error.
-//===----------------------------------------------------------------------===//
- template <class Derived>
- class RefCountedBase {
- mutable unsigned ref_cnt;
-
- public:
- RefCountedBase() : ref_cnt(0) {}
- RefCountedBase(const RefCountedBase &) : ref_cnt(0) {}
-
- void Retain() const { ++ref_cnt; }
- void Release() const {
- assert (ref_cnt > 0 && "Reference count is already zero.");
- if (--ref_cnt == 0) delete static_cast<const Derived*>(this);
- }
- };
-
-//===----------------------------------------------------------------------===//
-/// RefCountedBaseVPTR - A class that has the same function as
-/// RefCountedBase, but with a virtual destructor. Should be used
-/// instead of RefCountedBase for classes that already have virtual
-/// methods to enforce dynamic allocation via 'new'. Classes that
-/// inherit from RefCountedBaseVPTR can't be allocated on stack -
-/// attempting to do this will produce a compile error.
-//===----------------------------------------------------------------------===//
- class RefCountedBaseVPTR {
- mutable unsigned ref_cnt;
- virtual void anchor();
-
- protected:
- RefCountedBaseVPTR() : ref_cnt(0) {}
- RefCountedBaseVPTR(const RefCountedBaseVPTR &) : ref_cnt(0) {}
-
- virtual ~RefCountedBaseVPTR() {}
-
- void Retain() const { ++ref_cnt; }
- void Release() const {
- assert (ref_cnt > 0 && "Reference count is already zero.");
- if (--ref_cnt == 0) delete this;
- }
-
- template <typename T>
- friend struct IntrusiveRefCntPtrInfo;
- };
+/// A CRTP mixin class that adds reference counting to a type.
+///
+/// The lifetime of an object which inherits from RefCountedBase is managed by
+/// calls to Release() and Retain(), which increment and decrement the object's
+/// refcount, respectively. When a Release() call decrements the refcount to 0,
+/// the object deletes itself.
+///
+/// Objects that inherit from RefCountedBase should always be allocated with
+/// operator new.
+template <class Derived> class RefCountedBase {
+ mutable unsigned RefCount = 0;
+public:
+ RefCountedBase() = default;
+ RefCountedBase(const RefCountedBase &) : RefCount(0) {}
- template <typename T> struct IntrusiveRefCntPtrInfo {
- static void retain(T *obj) { obj->Retain(); }
- static void release(T *obj) { obj->Release(); }
- };
+ void Retain() const { ++RefCount; }
+ void Release() const {
+ assert(RefCount > 0 && "Reference count is already zero.");
+ if (--RefCount == 0)
+ delete static_cast<const Derived *>(this);
+ }
+};
-/// \brief A thread-safe version of \c llvm::RefCountedBase.
-///
-/// A generic base class for objects that wish to have their lifetimes managed
-/// using reference counts. Classes subclass \c ThreadSafeRefCountedBase to
-/// obtain such functionality, and are typically handled with
-/// \c IntrusiveRefCntPtr "smart pointers" which automatically handle the
-/// management of reference counts.
-template <class Derived>
-class ThreadSafeRefCountedBase {
+/// A thread-safe version of \c RefCountedBase.
+template <class Derived> class ThreadSafeRefCountedBase {
mutable std::atomic<int> RefCount;
protected:
ThreadSafeRefCountedBase() : RefCount(0) {}
public:
- void Retain() const { ++RefCount; }
+ void Retain() const { RefCount.fetch_add(1, std::memory_order_relaxed); }
void Release() const {
- int NewRefCount = --RefCount;
+ int NewRefCount = RefCount.fetch_sub(1, std::memory_order_acq_rel) - 1;
assert(NewRefCount >= 0 && "Reference count was already zero.");
if (NewRefCount == 0)
- delete static_cast<const Derived*>(this);
+ delete static_cast<const Derived *>(this);
}
};
-//===----------------------------------------------------------------------===//
-/// IntrusiveRefCntPtr - A template class that implements a "smart pointer"
-/// that assumes the wrapped object has a reference count associated
-/// with it that can be managed via calls to
-/// IntrusivePtrAddRef/IntrusivePtrRelease. The smart pointers
-/// manage reference counts via the RAII idiom: upon creation of
-/// smart pointer the reference count of the wrapped object is
-/// incremented and upon destruction of the smart pointer the
-/// reference count is decremented. This class also safely handles
-/// wrapping NULL pointers.
+/// Class you can specialize to provide custom retain/release functionality for
+/// a type.
///
-/// Reference counting is implemented via calls to
-/// Obj->Retain()/Obj->Release(). Release() is required to destroy
-/// the object when the reference count reaches zero. Inheriting from
-/// RefCountedBase/RefCountedBaseVPTR takes care of this
-/// automatically.
-//===----------------------------------------------------------------------===//
- template <typename T>
- class IntrusiveRefCntPtr {
- T* Obj;
-
- public:
- typedef T element_type;
-
- explicit IntrusiveRefCntPtr() : Obj(nullptr) {}
-
- IntrusiveRefCntPtr(T* obj) : Obj(obj) {
- retain();
- }
-
- IntrusiveRefCntPtr(const IntrusiveRefCntPtr& S) : Obj(S.Obj) {
- retain();
- }
-
- IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
- S.Obj = nullptr;
- }
-
- template <class X>
- IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.get()) {
- S.Obj = nullptr;
- }
-
- template <class X>
- IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
- : Obj(S.get()) {
- retain();
- }
-
- IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) {
- swap(S);
- return *this;
- }
-
- ~IntrusiveRefCntPtr() { release(); }
-
- T& operator*() const { return *Obj; }
-
- T* operator->() const { return Obj; }
-
- T* get() const { return Obj; }
-
- explicit operator bool() const { return Obj; }
-
- void swap(IntrusiveRefCntPtr& other) {
- T* tmp = other.Obj;
- other.Obj = Obj;
- Obj = tmp;
- }
-
- void reset() {
- release();
- Obj = nullptr;
- }
+/// Usually specializing this class is not necessary, as IntrusiveRefCntPtr
+/// works with any type which defines Retain() and Release() functions -- you
+/// can define those functions yourself if RefCountedBase doesn't work for you.
+///
+/// One case when you might want to specialize this type is if you have
+/// - Foo.h defines type Foo and includes Bar.h, and
+/// - Bar.h uses IntrusiveRefCntPtr<Foo> in inline functions.
+///
+/// Because Foo.h includes Bar.h, Bar.h can't include Foo.h in order to pull in
+/// the declaration of Foo. Without the declaration of Foo, normally Bar.h
+/// wouldn't be able to use IntrusiveRefCntPtr<Foo>, which wants to call
+/// T::Retain and T::Release.
+///
+/// To resolve this, Bar.h could include a third header, FooFwd.h, which
+/// forward-declares Foo and specializes IntrusiveRefCntPtrInfo<Foo>. Then
+/// Bar.h could use IntrusiveRefCntPtr<Foo>, although it still couldn't call any
+/// functions on Foo itself, because Foo would be an incomplete type.
+template <typename T> struct IntrusiveRefCntPtrInfo {
+ static void retain(T *obj) { obj->Retain(); }
+ static void release(T *obj) { obj->Release(); }
+};
- void resetWithoutRelease() {
- Obj = nullptr;
- }
+/// A smart pointer to a reference-counted object that inherits from
+/// RefCountedBase or ThreadSafeRefCountedBase.
+///
+/// This class increments its pointee's reference count when it is created, and
+/// decrements its refcount when it's destroyed (or is changed to point to a
+/// different object).
+template <typename T> class IntrusiveRefCntPtr {
+ T *Obj = nullptr;
- private:
- void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
- void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
+public:
+ typedef T element_type;
- template <typename X>
- friend class IntrusiveRefCntPtr;
- };
+ explicit IntrusiveRefCntPtr() = default;
+ IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr &S) : Obj(S.Obj) { retain(); }
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr &&S) : Obj(S.Obj) { S.Obj = nullptr; }
- template<class T, class U>
- inline bool operator==(const IntrusiveRefCntPtr<T>& A,
- const IntrusiveRefCntPtr<U>& B)
- {
- return A.get() == B.get();
+ template <class X>
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr<X> &&S) : Obj(S.get()) {
+ S.Obj = nullptr;
}
- template<class T, class U>
- inline bool operator!=(const IntrusiveRefCntPtr<T>& A,
- const IntrusiveRefCntPtr<U>& B)
- {
- return A.get() != B.get();
+ template <class X>
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X> &S) : Obj(S.get()) {
+ retain();
}
- template<class T, class U>
- inline bool operator==(const IntrusiveRefCntPtr<T>& A,
- U* B)
- {
- return A.get() == B;
+ IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
+ swap(S);
+ return *this;
}
- template<class T, class U>
- inline bool operator!=(const IntrusiveRefCntPtr<T>& A,
- U* B)
- {
- return A.get() != B;
- }
+ ~IntrusiveRefCntPtr() { release(); }
- template<class T, class U>
- inline bool operator==(T* A,
- const IntrusiveRefCntPtr<U>& B)
- {
- return A == B.get();
- }
+ T &operator*() const { return *Obj; }
+ T *operator->() const { return Obj; }
+ T *get() const { return Obj; }
+ explicit operator bool() const { return Obj; }
- template<class T, class U>
- inline bool operator!=(T* A,
- const IntrusiveRefCntPtr<U>& B)
- {
- return A != B.get();
+ void swap(IntrusiveRefCntPtr &other) {
+ T *tmp = other.Obj;
+ other.Obj = Obj;
+ Obj = tmp;
}
- template <class T>
- bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
- return !B;
+ void reset() {
+ release();
+ Obj = nullptr;
}
- template <class T>
- bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
- return B == A;
- }
+ void resetWithoutRelease() { Obj = nullptr; }
- template <class T>
- bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
- return !(A == B);
+private:
+ void retain() {
+ if (Obj)
+ IntrusiveRefCntPtrInfo<T>::retain(Obj);
}
-
- template <class T>
- bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
- return !(A == B);
+ void release() {
+ if (Obj)
+ IntrusiveRefCntPtrInfo<T>::release(Obj);
}
-//===----------------------------------------------------------------------===//
-// LLVM-style downcasting support for IntrusiveRefCntPtr objects
-//===----------------------------------------------------------------------===//
+ template <typename X> friend class IntrusiveRefCntPtr;
+};
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A,
+ const IntrusiveRefCntPtr<U> &B) {
+ return A.get() == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A,
+ const IntrusiveRefCntPtr<U> &B) {
+ return A.get() != B.get();
+}
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A, U *B) {
+ return A.get() == B;
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A, U *B) {
+ return A.get() != B;
+}
+
+template <class T, class U>
+inline bool operator==(T *A, const IntrusiveRefCntPtr<U> &B) {
+ return A == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(T *A, const IntrusiveRefCntPtr<U> &B) {
+ return A != B.get();
+}
+
+template <class T>
+bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+ return !B;
+}
+
+template <class T>
+bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+ return B == A;
+}
+
+template <class T>
+bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+ return !(A == B);
+}
+
+template <class T>
+bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+ return !(A == B);
+}
+
+// Make IntrusiveRefCntPtr work with dyn_cast, isa, and the other idioms from
+// Casting.h.
+template <typename From> struct simplify_type;
+
+template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
+ typedef T *SimpleType;
+ static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
+ return Val.get();
+ }
+};
- template <typename From> struct simplify_type;
-
- template<class T> struct simplify_type<IntrusiveRefCntPtr<T> > {
- typedef T* SimpleType;
- static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T>& Val) {
- return Val.get();
- }
- };
-
- template<class T> struct simplify_type<const IntrusiveRefCntPtr<T> > {
- typedef /*const*/ T* SimpleType;
- static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T>& Val) {
- return Val.get();
- }
- };
+template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
+ typedef /*const*/ T *SimpleType;
+ static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
+ return Val.get();
+ }
+};
} // end namespace llvm
diff --git a/include/llvm/ADT/MapVector.h b/include/llvm/ADT/MapVector.h
index f19a50b7ba84..ac1885758cb9 100644
--- a/include/llvm/ADT/MapVector.h
+++ b/include/llvm/ADT/MapVector.h
@@ -30,6 +30,7 @@ template<typename KeyT, typename ValueT,
typename MapType = llvm::DenseMap<KeyT, unsigned>,
typename VectorType = std::vector<std::pair<KeyT, ValueT> > >
class MapVector {
+ typedef typename VectorType::value_type value_type;
typedef typename VectorType::size_type size_type;
MapType Map;
@@ -41,6 +42,12 @@ public:
typedef typename VectorType::reverse_iterator reverse_iterator;
typedef typename VectorType::const_reverse_iterator const_reverse_iterator;
+ /// Clear the MapVector and return the underlying vector.
+ VectorType takeVector() {
+ Map.clear();
+ return std::move(Vector);
+ }
+
size_type size() const { return Vector.size(); }
iterator begin() { return Vector.begin(); }
@@ -83,7 +90,10 @@ public:
return Vector[I].second;
}
+ // Returns a copy of the value. Only allowed if ValueT is copyable.
ValueT lookup(const KeyT &Key) const {
+ static_assert(std::is_copy_constructible<ValueT>::value,
+ "Cannot call lookup() if ValueT is not copyable.");
typename MapType::const_iterator Pos = Map.find(Key);
return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
}
@@ -100,6 +110,19 @@ public:
return std::make_pair(begin() + I, false);
}
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ // Copy KV.first into the map, then move it into the vector.
+ std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ unsigned &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::move(KV));
+ I = Vector.size() - 1;
+ return std::make_pair(std::prev(end()), true);
+ }
+ return std::make_pair(begin() + I, false);
+ }
+
size_type count(const KeyT &Key) const {
typename MapType::const_iterator Pos = Map.find(Key);
return Pos == Map.end()? 0 : 1;
diff --git a/include/llvm/ADT/Optional.h b/include/llvm/ADT/Optional.h
index d9acaf6d23b0..701872c9f63f 100644
--- a/include/llvm/ADT/Optional.h
+++ b/include/llvm/ADT/Optional.h
@@ -129,7 +129,7 @@ public:
T& operator*() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
template <typename U>
- LLVM_CONSTEXPR T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
+ constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
return hasValue() ? getValue() : std::forward<U>(value);
}
@@ -150,18 +150,43 @@ template <typename T> struct isPodLike<Optional<T> > {
static const bool value = isPodLike<T>::value;
};
-/// \brief Poison comparison between two \c Optional objects. Clients needs to
-/// explicitly compare the underlying values and account for empty \c Optional
-/// objects.
-///
-/// This routine will never be defined. It returns \c void to help diagnose
-/// errors at compile time.
-template<typename T, typename U>
-void operator==(const Optional<T> &X, const Optional<U> &Y);
+template <typename T, typename U>
+bool operator==(const Optional<T> &X, const Optional<U> &Y) {
+ if (X && Y)
+ return *X == *Y;
+ return X.hasValue() == Y.hasValue();
+}
+
+template <typename T, typename U>
+bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
+ return !(X == Y);
+}
+
+template <typename T, typename U>
+bool operator<(const Optional<T> &X, const Optional<U> &Y) {
+ if (X && Y)
+ return *X < *Y;
+ return X.hasValue() < Y.hasValue();
+}
+
+template <typename T, typename U>
+bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
+ return !(Y < X);
+}
+
+template <typename T, typename U>
+bool operator>(const Optional<T> &X, const Optional<U> &Y) {
+ return Y < X;
+}
+
+template <typename T, typename U>
+bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
+ return !(X < Y);
+}
template<typename T>
bool operator==(const Optional<T> &X, NoneType) {
- return !X.hasValue();
+ return !X;
}
template<typename T>
@@ -178,50 +203,86 @@ template<typename T>
bool operator!=(NoneType, const Optional<T> &X) {
return X != None;
}
-/// \brief Poison comparison between two \c Optional objects. Clients needs to
-/// explicitly compare the underlying values and account for empty \c Optional
-/// objects.
-///
-/// This routine will never be defined. It returns \c void to help diagnose
-/// errors at compile time.
-template<typename T, typename U>
-void operator!=(const Optional<T> &X, const Optional<U> &Y);
-
-/// \brief Poison comparison between two \c Optional objects. Clients needs to
-/// explicitly compare the underlying values and account for empty \c Optional
-/// objects.
-///
-/// This routine will never be defined. It returns \c void to help diagnose
-/// errors at compile time.
-template<typename T, typename U>
-void operator<(const Optional<T> &X, const Optional<U> &Y);
-
-/// \brief Poison comparison between two \c Optional objects. Clients needs to
-/// explicitly compare the underlying values and account for empty \c Optional
-/// objects.
-///
-/// This routine will never be defined. It returns \c void to help diagnose
-/// errors at compile time.
-template<typename T, typename U>
-void operator<=(const Optional<T> &X, const Optional<U> &Y);
-
-/// \brief Poison comparison between two \c Optional objects. Clients needs to
-/// explicitly compare the underlying values and account for empty \c Optional
-/// objects.
-///
-/// This routine will never be defined. It returns \c void to help diagnose
-/// errors at compile time.
-template<typename T, typename U>
-void operator>=(const Optional<T> &X, const Optional<U> &Y);
-
-/// \brief Poison comparison between two \c Optional objects. Clients needs to
-/// explicitly compare the underlying values and account for empty \c Optional
-/// objects.
-///
-/// This routine will never be defined. It returns \c void to help diagnose
-/// errors at compile time.
-template<typename T, typename U>
-void operator>(const Optional<T> &X, const Optional<U> &Y);
+
+template <typename T> bool operator<(const Optional<T> &X, NoneType) {
+ return false;
+}
+
+template <typename T> bool operator<(NoneType, const Optional<T> &X) {
+ return X.hasValue();
+}
+
+template <typename T> bool operator<=(const Optional<T> &X, NoneType) {
+ return !(None < X);
+}
+
+template <typename T> bool operator<=(NoneType, const Optional<T> &X) {
+ return !(X < None);
+}
+
+template <typename T> bool operator>(const Optional<T> &X, NoneType) {
+ return None < X;
+}
+
+template <typename T> bool operator>(NoneType, const Optional<T> &X) {
+ return X < None;
+}
+
+template <typename T> bool operator>=(const Optional<T> &X, NoneType) {
+ return None <= X;
+}
+
+template <typename T> bool operator>=(NoneType, const Optional<T> &X) {
+ return X <= None;
+}
+
+template <typename T> bool operator==(const Optional<T> &X, const T &Y) {
+ return X && *X == Y;
+}
+
+template <typename T> bool operator==(const T &X, const Optional<T> &Y) {
+ return Y && X == *Y;
+}
+
+template <typename T> bool operator!=(const Optional<T> &X, const T &Y) {
+ return !(X == Y);
+}
+
+template <typename T> bool operator!=(const T &X, const Optional<T> &Y) {
+ return !(X == Y);
+}
+
+template <typename T> bool operator<(const Optional<T> &X, const T &Y) {
+ return !X || *X < Y;
+}
+
+template <typename T> bool operator<(const T &X, const Optional<T> &Y) {
+ return Y && X < *Y;
+}
+
+template <typename T> bool operator<=(const Optional<T> &X, const T &Y) {
+ return !(Y < X);
+}
+
+template <typename T> bool operator<=(const T &X, const Optional<T> &Y) {
+ return !(Y < X);
+}
+
+template <typename T> bool operator>(const Optional<T> &X, const T &Y) {
+ return Y < X;
+}
+
+template <typename T> bool operator>(const T &X, const Optional<T> &Y) {
+ return Y < X;
+}
+
+template <typename T> bool operator>=(const Optional<T> &X, const T &Y) {
+ return !(X < Y);
+}
+
+template <typename T> bool operator>=(const T &X, const Optional<T> &Y) {
+ return !(X < Y);
+}
} // end llvm namespace
diff --git a/include/llvm/ADT/PackedVector.h b/include/llvm/ADT/PackedVector.h
index 09267173fd77..8f925f1ff5cb 100644
--- a/include/llvm/ADT/PackedVector.h
+++ b/include/llvm/ADT/PackedVector.h
@@ -15,6 +15,7 @@
#define LLVM_ADT_PACKEDVECTOR_H
#include "llvm/ADT/BitVector.h"
+#include <cassert>
#include <limits>
namespace llvm {
@@ -83,14 +84,15 @@ public:
PackedVector &Vec;
const unsigned Idx;
- reference(); // Undefined
public:
+ reference() = delete;
reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}
reference &operator=(T val) {
Vec.setValue(Vec.Bits, Idx, val);
return *this;
}
+
operator T() const {
return Vec.getValue(Vec.Bits, Idx);
}
@@ -144,6 +146,6 @@ public:
// Leave BitNum=0 undefined.
template <typename T> class PackedVector<T, 0>;
-} // end llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_PACKEDVECTOR_H
diff --git a/include/llvm/ADT/PointerSumType.h b/include/llvm/ADT/PointerSumType.h
index 6b8618fc5a17..005b1c645c93 100644
--- a/include/llvm/ADT/PointerSumType.h
+++ b/include/llvm/ADT/PointerSumType.h
@@ -54,7 +54,7 @@ struct PointerSumTypeHelper;
///
/// It also default constructs to a zero tag with a null pointer, whatever that
/// would be. This means that the zero value for the tag type is significant
-/// and may be desireable to set to a state that is particularly desirable to
+/// and may be desirable to set to a state that is particularly desirable to
/// default construct.
///
/// There is no support for constructing or accessing with a dynamic tag as
diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h
index 6b3fe5749ad5..a8ac18645f3a 100644
--- a/include/llvm/ADT/PointerUnion.h
+++ b/include/llvm/ADT/PointerUnion.h
@@ -17,7 +17,10 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <cstddef>
namespace llvm {
@@ -57,6 +60,7 @@ template <typename PT1, typename PT2> class PointerUnionUIntTraits {
public:
static inline void *getAsVoidPointer(void *P) { return P; }
static inline void *getFromVoidPointer(void *P) { return P; }
+
enum {
PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
@@ -97,7 +101,7 @@ private:
template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {};
public:
- PointerUnion() {}
+ PointerUnion() = default;
PointerUnion(PT1 V)
: Val(const_cast<void *>(
@@ -208,6 +212,7 @@ public:
static inline void *getAsVoidPointer(const PointerUnion<PT1, PT2> &P) {
return P.getOpaqueValue();
}
+
static inline PointerUnion<PT1, PT2> getFromVoidPointer(void *P) {
return PointerUnion<PT1, PT2>::getFromOpaqueValue(P);
}
@@ -249,7 +254,7 @@ private:
};
public:
- PointerUnion3() {}
+ PointerUnion3() = default;
PointerUnion3(PT1 V) { Val = InnerUnion(V); }
PointerUnion3(PT2 V) { Val = InnerUnion(V); }
@@ -328,6 +333,7 @@ public:
static inline void *getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) {
return P.getOpaqueValue();
}
+
static inline PointerUnion3<PT1, PT2, PT3> getFromVoidPointer(void *P) {
return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P);
}
@@ -352,7 +358,7 @@ private:
ValTy Val;
public:
- PointerUnion4() {}
+ PointerUnion4() = default;
PointerUnion4(PT1 V) { Val = InnerUnion1(V); }
PointerUnion4(PT2 V) { Val = InnerUnion1(V); }
@@ -435,6 +441,7 @@ public:
getAsVoidPointer(const PointerUnion4<PT1, PT2, PT3, PT4> &P) {
return P.getOpaqueValue();
}
+
static inline PointerUnion4<PT1, PT2, PT3, PT4> getFromVoidPointer(void *P) {
return PointerUnion4<PT1, PT2, PT3, PT4>::getFromOpaqueValue(P);
}
@@ -469,6 +476,6 @@ template <typename T, typename U> struct DenseMapInfo<PointerUnion<T, U>> {
}
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_POINTERUNION_H
diff --git a/include/llvm/ADT/PostOrderIterator.h b/include/llvm/ADT/PostOrderIterator.h
index 0cc504b5c39e..e519b5c07964 100644
--- a/include/llvm/ADT/PostOrderIterator.h
+++ b/include/llvm/ADT/PostOrderIterator.h
@@ -17,9 +17,12 @@
#define LLVM_ADT_POSTORDERITERATOR_H
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include <iterator>
#include <set>
+#include <utility>
#include <vector>
namespace llvm {
@@ -54,22 +57,23 @@ namespace llvm {
template<class SetType, bool External>
class po_iterator_storage {
SetType Visited;
+
public:
// Return true if edge destination should be visited.
- template<typename NodeType>
- bool insertEdge(NodeType *From, NodeType *To) {
+ template <typename NodeRef>
+ bool insertEdge(Optional<NodeRef> From, NodeRef To) {
return Visited.insert(To).second;
}
// Called after all children of BB have been visited.
- template<typename NodeType>
- void finishPostorder(NodeType *BB) {}
+ template <typename NodeRef> void finishPostorder(NodeRef BB) {}
};
/// Specialization of po_iterator_storage that references an external set.
template<class SetType>
class po_iterator_storage<SetType, true> {
SetType &Visited;
+
public:
po_iterator_storage(SetType &VSet) : Visited(VSet) {}
po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
@@ -77,51 +81,50 @@ public:
// Return true if edge destination should be visited, called with From = 0 for
// the root node.
// Graph edges can be pruned by specializing this function.
- template <class NodeType> bool insertEdge(NodeType *From, NodeType *To) {
+ template <class NodeRef> bool insertEdge(Optional<NodeRef> From, NodeRef To) {
return Visited.insert(To).second;
}
// Called after all children of BB have been visited.
- template<class NodeType>
- void finishPostorder(NodeType *BB) {}
+ template <class NodeRef> void finishPostorder(NodeRef BB) {}
};
-template<class GraphT,
- class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
- bool ExtStorage = false,
- class GT = GraphTraits<GraphT> >
-class po_iterator : public std::iterator<std::forward_iterator_tag,
- typename GT::NodeType, ptrdiff_t>,
- public po_iterator_storage<SetType, ExtStorage> {
- typedef std::iterator<std::forward_iterator_tag,
- typename GT::NodeType, ptrdiff_t> super;
- typedef typename GT::NodeType NodeType;
+template <class GraphT,
+ class SetType =
+ SmallPtrSet<typename GraphTraits<GraphT>::NodeRef, 8>,
+ bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class po_iterator
+ : public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
+ public po_iterator_storage<SetType, ExtStorage> {
+ typedef std::iterator<std::forward_iterator_tag, typename GT::NodeRef> super;
+ typedef typename GT::NodeRef NodeRef;
typedef typename GT::ChildIteratorType ChildItTy;
// VisitStack - Used to maintain the ordering. Top = current block
// First element is basic block pointer, second is the 'next child' to visit
- std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
+ std::vector<std::pair<NodeRef, ChildItTy>> VisitStack;
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
- NodeType *BB = *VisitStack.back().second++;
- if (this->insertEdge(VisitStack.back().first, BB)) {
+ NodeRef BB = *VisitStack.back().second++;
+ if (this->insertEdge(Optional<NodeRef>(VisitStack.back().first), BB)) {
// If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
- po_iterator(NodeType *BB) {
- this->insertEdge((NodeType*)nullptr, BB);
+ po_iterator(NodeRef BB) {
+ this->insertEdge(Optional<NodeRef>(), BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
- po_iterator() {} // End is when stack is empty.
- po_iterator(NodeType *BB, SetType &S)
+ po_iterator() = default; // End is when stack is empty.
+
+ po_iterator(NodeRef BB, SetType &S)
: po_iterator_storage<SetType, ExtStorage>(S) {
- if (this->insertEdge((NodeType*)nullptr, BB)) {
+ if (this->insertEdge(Optional<NodeRef>(), BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@@ -130,6 +133,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
po_iterator(SetType &S)
: po_iterator_storage<SetType, ExtStorage>(S) {
} // End is when stack is empty.
+
public:
typedef typename super::pointer pointer;
@@ -149,13 +153,13 @@ public:
}
bool operator!=(const po_iterator &x) const { return !(*this == x); }
- pointer operator*() const { return VisitStack.back().first; }
+ const NodeRef &operator*() const { return VisitStack.back().first; }
// This is a nonstandard operator-> that dereferences the pointer an extra
// time... so that you can actually call methods ON the BasicBlock, because
// the contained type is a pointer. This allows BBIt->getTerminator() f.e.
//
- NodeType *operator->() const { return **this; }
+ NodeRef operator->() const { return **this; }
po_iterator &operator++() { // Preincrement
this->finishPostorder(VisitStack.back().first);
@@ -184,7 +188,7 @@ template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
}
// Provide global definitions of external postorder iterators...
-template<class T, class SetType=std::set<typename GraphTraits<T>::NodeType*> >
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
struct po_ext_iterator : public po_iterator<T, SetType, true> {
po_ext_iterator(const po_iterator<T, SetType, true> &V) :
po_iterator<T, SetType, true>(V) {}
@@ -206,10 +210,9 @@ iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &
}
// Provide global definitions of inverse post order iterators...
-template <class T,
- class SetType = std::set<typename GraphTraits<T>::NodeType*>,
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>,
bool External = false>
-struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External > {
+struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External> {
ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
po_iterator<Inverse<T>, SetType, External> (V) {}
};
@@ -230,8 +233,7 @@ iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
}
// Provide global definitions of external inverse postorder iterators...
-template <class T,
- class SetType = std::set<typename GraphTraits<T>::NodeType*> >
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
ipo_iterator<T, SetType, true>(V) {}
@@ -278,15 +280,17 @@ inverse_post_order_ext(const T &G, SetType &S) {
// }
//
-template<class GraphT, class GT = GraphTraits<GraphT> >
+template<class GraphT, class GT = GraphTraits<GraphT>>
class ReversePostOrderTraversal {
- typedef typename GT::NodeType NodeType;
- std::vector<NodeType*> Blocks; // Block list in normal PO order
- void Initialize(NodeType *BB) {
+ typedef typename GT::NodeRef NodeRef;
+ std::vector<NodeRef> Blocks; // Block list in normal PO order
+
+ void Initialize(NodeRef BB) {
std::copy(po_begin(BB), po_end(BB), std::back_inserter(Blocks));
}
+
public:
- typedef typename std::vector<NodeType*>::reverse_iterator rpo_iterator;
+ typedef typename std::vector<NodeRef>::reverse_iterator rpo_iterator;
ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); }
@@ -295,6 +299,6 @@ public:
rpo_iterator end() { return Blocks.rend(); }
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_POSTORDERITERATOR_H
diff --git a/include/llvm/ADT/PriorityQueue.h b/include/llvm/ADT/PriorityQueue.h
index 827d0b346e59..8ba871e25304 100644
--- a/include/llvm/ADT/PriorityQueue.h
+++ b/include/llvm/ADT/PriorityQueue.h
@@ -46,8 +46,7 @@ public:
///
void erase_one(const T &t) {
// Linear-search to find the element.
- typename Sequence::size_type i =
- std::find(this->c.begin(), this->c.end(), t) - this->c.begin();
+ typename Sequence::size_type i = find(this->c, t) - this->c.begin();
// Logarithmic-time heap bubble-up.
while (i != 0) {
diff --git a/include/llvm/ADT/PriorityWorklist.h b/include/llvm/ADT/PriorityWorklist.h
index 00549b88fd02..c0b4709e98f8 100644
--- a/include/llvm/ADT/PriorityWorklist.h
+++ b/include/llvm/ADT/PriorityWorklist.h
@@ -17,10 +17,12 @@
#define LLVM_ADT_PRIORITYWORKLIST_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
-#include <utility>
+#include <cstddef>
#include <vector>
namespace llvm {
@@ -59,7 +61,7 @@ public:
typedef typename MapT::size_type size_type;
/// Construct an empty PriorityWorklist
- PriorityWorklist() {}
+ PriorityWorklist() = default;
/// Determine if the PriorityWorklist is empty or not.
bool empty() const {
@@ -115,7 +117,7 @@ public:
} while (!V.empty() && V.back() == T());
}
- T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
+ LLVM_NODISCARD T pop_back_val() {
T Ret = back();
pop_back();
return Ret;
@@ -147,7 +149,7 @@ public:
/// write it:
///
/// \code
- /// V.erase(std::remove_if(V.begin(), V.end(), P), V.end());
+ /// V.erase(remove_if(V, P), V.end());
/// \endcode
///
/// However, PriorityWorklist doesn't expose non-const iterators, making any
@@ -156,8 +158,8 @@ public:
/// \returns true if any element is removed.
template <typename UnaryPredicate>
bool erase_if(UnaryPredicate P) {
- typename VectorT::iterator E = std::remove_if(
- V.begin(), V.end(), TestAndEraseFromMap<UnaryPredicate>(P, M));
+ typename VectorT::iterator E =
+ remove_if(V, TestAndEraseFromMap<UnaryPredicate>(P, M));
if (E == V.end())
return false;
for (auto I = V.begin(); I != E; ++I)
@@ -216,9 +218,9 @@ class SmallPriorityWorklist
: public PriorityWorklist<T, SmallVector<T, N>,
SmallDenseMap<T, ptrdiff_t>> {
public:
- SmallPriorityWorklist() {}
+ SmallPriorityWorklist() = default;
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_PRIORITYWORKLIST_H
diff --git a/include/llvm/ADT/SCCIterator.h b/include/llvm/ADT/SCCIterator.h
index e89345c0f348..9a8a7b168fce 100644
--- a/include/llvm/ADT/SCCIterator.h
+++ b/include/llvm/ADT/SCCIterator.h
@@ -26,6 +26,9 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
#include <vector>
namespace llvm {
@@ -93,7 +96,7 @@ class scc_iterator : public iterator_facade_base<
}
/// End is when the DFS stack is empty.
- scc_iterator() {}
+ scc_iterator() = default;
public:
static scc_iterator begin(const GraphT &G) {
@@ -230,15 +233,15 @@ template <class T> scc_iterator<T> scc_end(const T &G) {
}
/// \brief Construct the begin iterator for a deduced graph type T's Inverse<T>.
-template <class T> scc_iterator<Inverse<T> > scc_begin(const Inverse<T> &G) {
- return scc_iterator<Inverse<T> >::begin(G);
+template <class T> scc_iterator<Inverse<T>> scc_begin(const Inverse<T> &G) {
+ return scc_iterator<Inverse<T>>::begin(G);
}
/// \brief Construct the end iterator for a deduced graph type T's Inverse<T>.
-template <class T> scc_iterator<Inverse<T> > scc_end(const Inverse<T> &G) {
- return scc_iterator<Inverse<T> >::end(G);
+template <class T> scc_iterator<Inverse<T>> scc_end(const Inverse<T> &G) {
+ return scc_iterator<Inverse<T>>::end(G);
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_SCCITERATOR_H
diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h
index 00b796f63818..ec121e0d55cd 100644
--- a/include/llvm/ADT/STLExtras.h
+++ b/include/llvm/ADT/STLExtras.h
@@ -24,18 +24,25 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <tuple>
#include <utility> // for std::pair
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
+
+// Only used by compiler if both template types are the same. Useful when
+// using SFINAE to test for the existence of member functions.
+template <typename T, T> struct SameType;
+
namespace detail {
template <typename RangeT>
-using IterOfRange = decltype(std::begin(std::declval<RangeT>()));
+using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
} // End detail namespace
@@ -208,11 +215,24 @@ inline mapped_iterator<ItTy, FuncTy> map_iterator(const ItTy &I, FuncTy F) {
return mapped_iterator<ItTy, FuncTy>(I, F);
}
-/// \brief Metafunction to determine if type T has a member called rbegin().
-template <typename T> struct has_rbegin {
- template <typename U> static char(&f(const U &, decltype(&U::rbegin)))[1];
- static char(&f(...))[2];
- const static bool value = sizeof(f(std::declval<T>(), nullptr)) == 1;
+/// Helper to determine if type T has a member called rbegin().
+template <typename Ty> class has_rbegin_impl {
+ typedef char yes[1];
+ typedef char no[2];
+
+ template <typename Inner>
+ static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
+
+ template <typename>
+ static no& test(...);
+
+public:
+ static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
+};
+
+/// Metafunction to determine if T& or T has a member called rbegin().
+template <typename Ty>
+struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
};
// Returns an iterator_range over the given container which iterates in reverse.
@@ -327,6 +347,240 @@ make_filter_range(RangeT &&Range, PredicateT Pred) {
FilterIteratorT(std::end(std::forward<RangeT>(Range))));
}
+// forward declarations required by zip_shortest/zip_first
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&range, UnaryPredicate P);
+
+template <size_t... I> struct index_sequence;
+
+template <class... Ts> struct index_sequence_for;
+
+namespace detail {
+template <typename... Iters> class zip_first {
+public:
+ typedef std::input_iterator_tag iterator_category;
+ typedef std::tuple<decltype(*std::declval<Iters>())...> value_type;
+ std::tuple<Iters...> iterators;
+
+private:
+ template <size_t... Ns> value_type deres(index_sequence<Ns...>) {
+ return value_type(*std::get<Ns>(iterators)...);
+ }
+
+ template <size_t... Ns> decltype(iterators) tup_inc(index_sequence<Ns...>) {
+ return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
+ }
+
+public:
+ value_type operator*() { return deres(index_sequence_for<Iters...>{}); }
+
+ void operator++() { iterators = tup_inc(index_sequence_for<Iters...>{}); }
+
+ bool operator!=(const zip_first<Iters...> &other) const {
+ return std::get<0>(iterators) != std::get<0>(other.iterators);
+ }
+ zip_first(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
+};
+
+template <typename... Iters> class zip_shortest : public zip_first<Iters...> {
+ template <size_t... Ns>
+ bool test(const zip_first<Iters...> &other, index_sequence<Ns...>) const {
+ return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
+ std::get<Ns>(other.iterators)...},
+ identity<bool>{});
+ }
+
+public:
+ bool operator!=(const zip_first<Iters...> &other) const {
+ return test(other, index_sequence_for<Iters...>{});
+ }
+ zip_shortest(Iters &&... ts)
+ : zip_first<Iters...>(std::forward<Iters>(ts)...) {}
+};
+
+template <template <typename...> class ItType, typename... Args> class zippy {
+public:
+ typedef ItType<decltype(std::begin(std::declval<Args>()))...> iterator;
+
+private:
+ std::tuple<Args...> ts;
+
+ template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) {
+ return iterator(std::begin(std::get<Ns>(ts))...);
+ }
+ template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) {
+ return iterator(std::end(std::get<Ns>(ts))...);
+ }
+
+public:
+ iterator begin() { return begin_impl(index_sequence_for<Args...>{}); }
+ iterator end() { return end_impl(index_sequence_for<Args...>{}); }
+ zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
+};
+} // End detail namespace
+
+/// zip iterator for two or more iteratable types.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
+ Args &&... args) {
+ return detail::zippy<detail::zip_shortest, T, U, Args...>(
+ std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
+/// be the shortest.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
+ Args &&... args) {
+ return detail::zippy<detail::zip_first, T, U, Args...>(
+ std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// Iterator wrapper that concatenates sequences together.
+///
+/// This can concatenate different iterators, even with different types, into
+/// a single iterator provided the value types of all the concatenated
+/// iterators expose `reference` and `pointer` types that can be converted to
+/// `ValueT &` and `ValueT *` respectively. It doesn't support more
+/// interesting/customized pointer or reference types.
+///
+/// Currently this only supports forward or higher iterator categories as
+/// inputs and always exposes a forward iterator interface.
+template <typename ValueT, typename... IterTs>
+class concat_iterator
+ : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
+ std::forward_iterator_tag, ValueT> {
+ typedef typename concat_iterator::iterator_facade_base BaseT;
+
+ /// We store both the current and end iterators for each concatenated
+ /// sequence in a tuple of pairs.
+ ///
+ /// Note that something like iterator_range seems nice at first here, but the
+ /// range properties are of little benefit and end up getting in the way
+ /// because we need to do mutation on the current iterators.
+ std::tuple<std::pair<IterTs, IterTs>...> IterPairs;
+
+ /// Attempts to increment a specific iterator.
+ ///
+ /// Returns true if it was able to increment the iterator. Returns false if
+ /// the iterator is already at the end iterator.
+ template <size_t Index> bool incrementHelper() {
+ auto &IterPair = std::get<Index>(IterPairs);
+ if (IterPair.first == IterPair.second)
+ return false;
+
+ ++IterPair.first;
+ return true;
+ }
+
+ /// Increments the first non-end iterator.
+ ///
+ /// It is an error to call this with all iterators at the end.
+ template <size_t... Ns> void increment(index_sequence<Ns...>) {
+ // Build a sequence of functions to increment each iterator if possible.
+ bool (concat_iterator::*IncrementHelperFns[])() = {
+ &concat_iterator::incrementHelper<Ns>...};
+
+ // Loop over them, and stop as soon as we succeed at incrementing one.
+ for (auto &IncrementHelperFn : IncrementHelperFns)
+ if ((this->*IncrementHelperFn)())
+ return;
+
+ llvm_unreachable("Attempted to increment an end concat iterator!");
+ }
+
+ /// Returns null if the specified iterator is at the end. Otherwise,
+ /// dereferences the iterator and returns the address of the resulting
+ /// reference.
+ template <size_t Index> ValueT *getHelper() const {
+ auto &IterPair = std::get<Index>(IterPairs);
+ if (IterPair.first == IterPair.second)
+ return nullptr;
+
+ return &*IterPair.first;
+ }
+
+ /// Finds the first non-end iterator, dereferences, and returns the resulting
+ /// reference.
+ ///
+ /// It is an error to call this with all iterators at the end.
+ template <size_t... Ns> ValueT &get(index_sequence<Ns...>) const {
+ // Build a sequence of functions to get from iterator if possible.
+ ValueT *(concat_iterator::*GetHelperFns[])() const = {
+ &concat_iterator::getHelper<Ns>...};
+
+ // Loop over them, and return the first result we find.
+ for (auto &GetHelperFn : GetHelperFns)
+ if (ValueT *P = (this->*GetHelperFn)())
+ return *P;
+
+ llvm_unreachable("Attempted to get a pointer from an end concat iterator!");
+ }
+
+public:
+ /// Constructs an iterator from a squence of ranges.
+ ///
+ /// We need the full range to know how to switch between each of the
+ /// iterators.
+ template <typename... RangeTs>
+ explicit concat_iterator(RangeTs &&... Ranges)
+ : IterPairs({std::begin(Ranges), std::end(Ranges)}...) {}
+
+ using BaseT::operator++;
+ concat_iterator &operator++() {
+ increment(index_sequence_for<IterTs...>());
+ return *this;
+ }
+
+ ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); }
+
+ bool operator==(const concat_iterator &RHS) const {
+ return IterPairs == RHS.IterPairs;
+ }
+};
+
+namespace detail {
+/// Helper to store a sequence of ranges being concatenated and access them.
+///
+/// This is designed to facilitate providing actual storage when temporaries
+/// are passed into the constructor such that we can use it as part of range
+/// based for loops.
+template <typename ValueT, typename... RangeTs> class concat_range {
+public:
+ typedef concat_iterator<ValueT,
+ decltype(std::begin(std::declval<RangeTs &>()))...>
+ iterator;
+
+private:
+ std::tuple<RangeTs...> Ranges;
+
+ template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) {
+ return iterator(std::get<Ns>(Ranges)...);
+ }
+ template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) {
+ return iterator(make_range(std::end(std::get<Ns>(Ranges)),
+ std::end(std::get<Ns>(Ranges)))...);
+ }
+
+public:
+ iterator begin() { return begin_impl(index_sequence_for<RangeTs...>{}); }
+ iterator end() { return end_impl(index_sequence_for<RangeTs...>{}); }
+ concat_range(RangeTs &&... Ranges)
+ : Ranges(std::forward<RangeTs>(Ranges)...) {}
+};
+}
+
+/// Concatenated range across two or more ranges.
+///
+/// The desired value type must be explicitly specified.
+template <typename ValueT, typename... RangeTs>
+detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
+ static_assert(sizeof...(RangeTs) > 1,
+ "Need more than one range to concatenate!");
+ return detail::concat_range<ValueT, RangeTs...>(
+ std::forward<RangeTs>(Ranges)...);
+}
+
//===----------------------------------------------------------------------===//
// Extra additions to <utility>
//===----------------------------------------------------------------------===//
@@ -353,7 +607,7 @@ struct less_second {
template <class T, T... I> struct integer_sequence {
typedef T value_type;
- static LLVM_CONSTEXPR size_t size() { return sizeof...(I); }
+ static constexpr size_t size() { return sizeof...(I); }
};
/// \brief Alias for the common case of a sequence of size_ts.
@@ -369,13 +623,30 @@ struct build_index_impl<0, I...> : index_sequence<I...> {};
template <class... Ts>
struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
+/// Utility type to build an inheritance chain that makes it easy to rank
+/// overload candidates.
+template <int N> struct rank : rank<N - 1> {};
+template <> struct rank<0> {};
+
+/// \brief traits class for checking whether type T is one of any of the given
+/// types in the variadic list.
+template <typename T, typename... Ts> struct is_one_of {
+ static const bool value = false;
+};
+
+template <typename T, typename U, typename... Ts>
+struct is_one_of<T, U, Ts...> {
+ static const bool value =
+ std::is_same<T, U>::value || is_one_of<T, Ts...>::value;
+};
+
//===----------------------------------------------------------------------===//
// Extra additions for arrays
//===----------------------------------------------------------------------===//
/// Find the length of an array.
template <class T, std::size_t N>
-LLVM_CONSTEXPR inline size_t array_lengthof(T (&)[N]) {
+constexpr inline size_t array_lengthof(T (&)[N]) {
return N;
}
@@ -445,8 +716,8 @@ inline void array_pod_sort(
/// container.
template<typename Container>
void DeleteContainerPointers(Container &C) {
- for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
- delete *I;
+ for (auto V : C)
+ delete V;
C.clear();
}
@@ -454,77 +725,106 @@ void DeleteContainerPointers(Container &C) {
/// deletes the second elements and then clears the container.
template<typename Container>
void DeleteContainerSeconds(Container &C) {
- for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
- delete I->second;
+ for (auto &V : C)
+ delete V.second;
C.clear();
}
/// Provide wrappers to std::all_of which take ranges instead of having to pass
/// begin/end explicitly.
-template<typename R, class UnaryPredicate>
-bool all_of(R &&Range, UnaryPredicate &&P) {
- return std::all_of(Range.begin(), Range.end(),
- std::forward<UnaryPredicate>(P));
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&Range, UnaryPredicate P) {
+ return std::all_of(std::begin(Range), std::end(Range), P);
}
/// Provide wrappers to std::any_of which take ranges instead of having to pass
/// begin/end explicitly.
-template <typename R, class UnaryPredicate>
-bool any_of(R &&Range, UnaryPredicate &&P) {
- return std::any_of(Range.begin(), Range.end(),
- std::forward<UnaryPredicate>(P));
+template <typename R, typename UnaryPredicate>
+bool any_of(R &&Range, UnaryPredicate P) {
+ return std::any_of(std::begin(Range), std::end(Range), P);
}
/// Provide wrappers to std::none_of which take ranges instead of having to pass
/// begin/end explicitly.
-template <typename R, class UnaryPredicate>
-bool none_of(R &&Range, UnaryPredicate &&P) {
- return std::none_of(Range.begin(), Range.end(),
- std::forward<UnaryPredicate>(P));
+template <typename R, typename UnaryPredicate>
+bool none_of(R &&Range, UnaryPredicate P) {
+ return std::none_of(std::begin(Range), std::end(Range), P);
}
/// Provide wrappers to std::find which take ranges instead of having to pass
/// begin/end explicitly.
-template<typename R, class T>
-auto find(R &&Range, const T &val) -> decltype(Range.begin()) {
- return std::find(Range.begin(), Range.end(), val);
+template <typename R, typename T>
+auto find(R &&Range, const T &Val) -> decltype(std::begin(Range)) {
+ return std::find(std::begin(Range), std::end(Range), Val);
}
/// Provide wrappers to std::find_if which take ranges instead of having to pass
/// begin/end explicitly.
-template <typename R, class T>
-auto find_if(R &&Range, const T &Pred) -> decltype(Range.begin()) {
- return std::find_if(Range.begin(), Range.end(), Pred);
+template <typename R, typename UnaryPredicate>
+auto find_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range)) {
+ return std::find_if(std::begin(Range), std::end(Range), P);
+}
+
+template <typename R, typename UnaryPredicate>
+auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range)) {
+ return std::find_if_not(std::begin(Range), std::end(Range), P);
}
/// Provide wrappers to std::remove_if which take ranges instead of having to
/// pass begin/end explicitly.
-template<typename R, class UnaryPredicate>
-auto remove_if(R &&Range, UnaryPredicate &&P) -> decltype(Range.begin()) {
- return std::remove_if(Range.begin(), Range.end(), P);
+template <typename R, typename UnaryPredicate>
+auto remove_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range)) {
+ return std::remove_if(std::begin(Range), std::end(Range), P);
}
/// Wrapper function around std::find to detect if an element exists
/// in a container.
template <typename R, typename E>
bool is_contained(R &&Range, const E &Element) {
- return std::find(Range.begin(), Range.end(), Element) != Range.end();
+ return std::find(std::begin(Range), std::end(Range), Element) !=
+ std::end(Range);
+}
+
+/// Wrapper function around std::count to count the number of times an element
+/// \p Element occurs in the given range \p Range.
+template <typename R, typename E>
+auto count(R &&Range, const E &Element) -> typename std::iterator_traits<
+ decltype(std::begin(Range))>::difference_type {
+ return std::count(std::begin(Range), std::end(Range), Element);
}
/// Wrapper function around std::count_if to count the number of times an
/// element satisfying a given predicate occurs in a range.
template <typename R, typename UnaryPredicate>
-auto count_if(R &&Range, UnaryPredicate &&P)
- -> typename std::iterator_traits<decltype(Range.begin())>::difference_type {
- return std::count_if(Range.begin(), Range.end(), P);
+auto count_if(R &&Range, UnaryPredicate P) -> typename std::iterator_traits<
+ decltype(std::begin(Range))>::difference_type {
+ return std::count_if(std::begin(Range), std::end(Range), P);
}
/// Wrapper function around std::transform to apply a function to a range and
/// store the result elsewhere.
-template <typename R, class OutputIt, typename UnaryPredicate>
-OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate &&P) {
- return std::transform(Range.begin(), Range.end(), d_first,
- std::forward<UnaryPredicate>(P));
+template <typename R, typename OutputIt, typename UnaryPredicate>
+OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P) {
+ return std::transform(std::begin(Range), std::end(Range), d_first, P);
+}
+
+/// Provide wrappers to std::partition which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto partition(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range)) {
+ return std::partition(std::begin(Range), std::end(Range), P);
+}
+
+/// Provide a container algorithm similar to C++ Library Fundamentals v2's
+/// `erase_if` which is equivalent to:
+///
+/// C.erase(remove_if(C, pred), C.end());
+///
+/// This version works for any container with an erase method call accepting
+/// two iterators.
+template <typename Container, typename UnaryPredicate>
+void erase_if(Container &C, UnaryPredicate P) {
+ C.erase(remove_if(C, P), C.end());
}
//===----------------------------------------------------------------------===//
@@ -608,6 +908,92 @@ template <typename T> struct deref {
}
};
+namespace detail {
+template <typename R> class enumerator_impl {
+public:
+ template <typename X> struct result_pair {
+ result_pair(std::size_t Index, X Value) : Index(Index), Value(Value) {}
+
+ const std::size_t Index;
+ X Value;
+ };
+
+ class iterator {
+ typedef
+ typename std::iterator_traits<IterOfRange<R>>::reference iter_reference;
+ typedef result_pair<iter_reference> result_type;
+
+ public:
+ iterator(IterOfRange<R> &&Iter, std::size_t Index)
+ : Iter(Iter), Index(Index) {}
+
+ result_type operator*() const { return result_type(Index, *Iter); }
+
+ iterator &operator++() {
+ ++Iter;
+ ++Index;
+ return *this;
+ }
+
+ bool operator!=(const iterator &RHS) const { return Iter != RHS.Iter; }
+
+ private:
+ IterOfRange<R> Iter;
+ std::size_t Index;
+ };
+
+public:
+ explicit enumerator_impl(R &&Range) : Range(std::forward<R>(Range)) {}
+
+ iterator begin() { return iterator(std::begin(Range), 0); }
+ iterator end() { return iterator(std::end(Range), std::size_t(-1)); }
+
+private:
+ R Range;
+};
+}
+
+/// Given an input range, returns a new range whose values are are pair (A,B)
+/// such that A is the 0-based index of the item in the sequence, and B is
+/// the value from the original sequence. Example:
+///
+/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
+/// for (auto X : enumerate(Items)) {
+/// printf("Item %d - %c\n", X.Index, X.Value);
+/// }
+///
+/// Output:
+/// Item 0 - A
+/// Item 1 - B
+/// Item 2 - C
+/// Item 3 - D
+///
+template <typename R> detail::enumerator_impl<R> enumerate(R &&Range) {
+ return detail::enumerator_impl<R>(std::forward<R>(Range));
+}
+
+namespace detail {
+template <typename F, typename Tuple, std::size_t... I>
+auto apply_tuple_impl(F &&f, Tuple &&t, index_sequence<I...>)
+ -> decltype(std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...)) {
+ return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
+}
+}
+
+/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
+/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
+/// return the result.
+template <typename F, typename Tuple>
+auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl(
+ std::forward<F>(f), std::forward<Tuple>(t),
+ build_index_impl<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>{})) {
+ using Indices = build_index_impl<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>;
+
+ return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
+ Indices{});
+}
} // End llvm namespace
#endif
diff --git a/include/llvm/ADT/ScopeExit.h b/include/llvm/ADT/ScopeExit.h
new file mode 100644
index 000000000000..4e64352c77df
--- /dev/null
+++ b/include/llvm/ADT/ScopeExit.h
@@ -0,0 +1,54 @@
+//===- llvm/ADT/ScopeExit.h - Execute code at scope exit --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the make_scope_exit function, which executes user-defined
+// cleanup logic at scope exit.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPE_EXIT_H
+#define LLVM_ADT_SCOPE_EXIT_H
+
+#include "llvm/Support/Compiler.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+namespace detail {
+
+template <typename Callable> class scope_exit {
+ Callable ExitFunction;
+
+public:
+ template <typename Fp>
+ explicit scope_exit(Fp &&F) : ExitFunction(std::forward<Fp>(F)) {}
+
+ scope_exit(scope_exit &&Rhs) : ExitFunction(std::move(Rhs.ExitFunction)) {}
+
+ ~scope_exit() { ExitFunction(); }
+};
+
+} // end namespace detail
+
+// Keeps the callable object that is passed in, and execute it at the
+// destruction of the returned object (usually at the scope exit where the
+// returned object is kept).
+//
+// Interface is specified by p0052r2.
+template <typename Callable>
+LLVM_NODISCARD detail::scope_exit<typename std::decay<Callable>::type>
+make_scope_exit(Callable &&F) {
+ return detail::scope_exit<typename std::decay<Callable>::type>(
+ std::forward<Callable>(F));
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/ADT/ScopedHashTable.h b/include/llvm/ADT/ScopedHashTable.h
index 4af3d6d37e33..ad805b0991c1 100644
--- a/include/llvm/ADT/ScopedHashTable.h
+++ b/include/llvm/ADT/ScopedHashTable.h
@@ -32,7 +32,10 @@
#define LLVM_ADT_SCOPEDHASHTABLE_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <new>
namespace llvm {
@@ -46,6 +49,7 @@ class ScopedHashTableVal {
ScopedHashTableVal *NextForKey;
K Key;
V Val;
+
ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
public:
@@ -89,11 +93,11 @@ class ScopedHashTableScope {
/// LastValInScope - This is the last value that was inserted for this scope
/// or null if none have been inserted yet.
ScopedHashTableVal<K, V> *LastValInScope;
- void operator=(ScopedHashTableScope &) = delete;
- ScopedHashTableScope(ScopedHashTableScope &) = delete;
public:
ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
+ ScopedHashTableScope(ScopedHashTableScope &) = delete;
+ ScopedHashTableScope &operator=(ScopedHashTableScope &) = delete;
~ScopedHashTableScope();
ScopedHashTableScope *getParentScope() { return PrevScope; }
@@ -101,6 +105,7 @@ public:
private:
friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+
ScopedHashTableVal<K, V> *getLastValInScope() {
return LastValInScope;
}
@@ -150,19 +155,20 @@ public:
typedef unsigned size_type;
private:
+ friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+
typedef ScopedHashTableVal<K, V> ValTy;
DenseMap<K, ValTy*, KInfo> TopLevelMap;
- ScopeTy *CurScope;
+ ScopeTy *CurScope = nullptr;
AllocatorTy Allocator;
- ScopedHashTable(const ScopedHashTable &); // NOT YET IMPLEMENTED
- void operator=(const ScopedHashTable &); // NOT YET IMPLEMENTED
- friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
-
public:
- ScopedHashTable() : CurScope(nullptr) {}
+ ScopedHashTable() = default;
ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
+ ScopedHashTable(const ScopedHashTable &) = delete;
+ ScopedHashTable &operator=(const ScopedHashTable &) = delete;
+
~ScopedHashTable() {
assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
}
@@ -253,4 +259,4 @@ ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_SCOPEDHASHTABLE_H
diff --git a/include/llvm/ADT/SetVector.h b/include/llvm/ADT/SetVector.h
index 2bb0fdbd3370..4dc18bc52178 100644
--- a/include/llvm/ADT/SetVector.h
+++ b/include/llvm/ADT/SetVector.h
@@ -20,11 +20,13 @@
#ifndef LLVM_ADT_SETVECTOR_H
#define LLVM_ADT_SETVECTOR_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
-#include <utility>
+#include <iterator>
#include <vector>
namespace llvm {
@@ -51,7 +53,7 @@ public:
typedef typename vector_type::size_type size_type;
/// \brief Construct an empty SetVector
- SetVector() {}
+ SetVector() = default;
/// \brief Initialize a SetVector with a range of elements
template<typename It>
@@ -61,6 +63,12 @@ public:
ArrayRef<T> getArrayRef() const { return vector_; }
+ /// Clear the SetVector and return the underlying vector.
+ Vector takeVector() {
+ set_.clear();
+ return std::move(vector_);
+ }
+
/// \brief Determine if the SetVector is empty or not.
bool empty() const {
return vector_.empty();
@@ -143,8 +151,7 @@ public:
/// \brief Remove an item from the set vector.
bool remove(const value_type& X) {
if (set_.erase(X)) {
- typename vector_type::iterator I =
- std::find(vector_.begin(), vector_.end(), X);
+ typename vector_type::iterator I = find(vector_, X);
assert(I != vector_.end() && "Corrupted SetVector instances!");
vector_.erase(I);
return true;
@@ -176,7 +183,7 @@ public:
/// write it:
///
/// \code
- /// V.erase(std::remove_if(V.begin(), V.end(), P), V.end());
+ /// V.erase(remove_if(V, P), V.end());
/// \endcode
///
/// However, SetVector doesn't expose non-const iterators, making any
@@ -185,9 +192,8 @@ public:
/// \returns true if any element is removed.
template <typename UnaryPredicate>
bool remove_if(UnaryPredicate P) {
- typename vector_type::iterator I
- = std::remove_if(vector_.begin(), vector_.end(),
- TestAndEraseFromSet<UnaryPredicate>(P, set_));
+ typename vector_type::iterator I =
+ llvm::remove_if(vector_, TestAndEraseFromSet<UnaryPredicate>(P, set_));
if (I == vector_.end())
return false;
vector_.erase(I, vector_.end());
@@ -213,7 +219,7 @@ public:
vector_.pop_back();
}
- T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
+ LLVM_NODISCARD T pop_back_val() {
T Ret = back();
pop_back();
return Ret;
@@ -283,9 +289,10 @@ private:
/// \brief A SetVector that performs no allocations if smaller than
/// a certain size.
template <typename T, unsigned N>
-class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > {
+class SmallSetVector
+ : public SetVector<T, SmallVector<T, N>, SmallDenseSet<T, N>> {
public:
- SmallSetVector() {}
+ SmallSetVector() = default;
/// \brief Initialize a SmallSetVector with a range of elements
template<typename It>
@@ -294,7 +301,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-// vim: sw=2 ai
-#endif
+#endif // LLVM_ADT_SETVECTOR_H
diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h
index eaed6aa05dcb..49feb9da897a 100644
--- a/include/llvm/ADT/SmallPtrSet.h
+++ b/include/llvm/ADT/SmallPtrSet.h
@@ -15,19 +15,25 @@
#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H
+#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstddef>
#include <cstring>
#include <cstdlib>
+#include <initializer_list>
#include <iterator>
#include <utility>
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
namespace llvm {
+template <class T = void> struct ReverseIterate { static bool value; };
+template <class T> bool ReverseIterate<T>::value = false;
+}
+#endif
-class SmallPtrSetIteratorImpl;
+namespace llvm {
/// SmallPtrSetImplBase - This is the common code shared among all the
/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
@@ -71,12 +77,14 @@ protected:
const SmallPtrSetImplBase &that);
SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
SmallPtrSetImplBase &&that);
+
explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
: SmallArray(SmallStorage), CurArray(SmallStorage),
CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
"Initial size must be a power of two!");
}
+
~SmallPtrSetImplBase() {
if (!isSmall())
free(CurArray);
@@ -84,7 +92,10 @@ protected:
public:
typedef unsigned size_type;
- bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return size() == 0; }
+
+ SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;
+
+ LLVM_NODISCARD bool empty() const { return size() == 0; }
size_type size() const { return NumNonEmpty - NumTombstones; }
void clear() {
@@ -103,6 +114,7 @@ public:
protected:
static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
+
static void *getEmptyMarker() {
// Note that -1 is chosen to make clear() efficiently implementable with
// memset and because it's not a valid pointer value.
@@ -150,22 +162,38 @@ protected:
/// return true, otherwise return false. This is hidden from the client so
/// that the derived class can check that the right type of pointer is passed
/// in.
- bool erase_imp(const void * Ptr);
+ bool erase_imp(const void * Ptr) {
+ const void *const *P = find_imp(Ptr);
+ if (P == EndPointer())
+ return false;
+
+ const void ** Loc = const_cast<const void **>(P);
+ assert(*Loc == Ptr && "broken find!");
+ *Loc = getTombstoneMarker();
+ NumTombstones++;
+ return true;
+ }
- bool count_imp(const void * Ptr) const {
+ /// Returns the raw pointer needed to construct an iterator. If element not
+ /// found, this will be EndPointer. Otherwise, it will be a pointer to the
+ /// slot which stores Ptr;
+ const void *const * find_imp(const void * Ptr) const {
if (isSmall()) {
// Linear search for the item.
for (const void *const *APtr = SmallArray,
*const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
if (*APtr == Ptr)
- return true;
- return false;
+ return APtr;
+ return EndPointer();
}
// Big set case.
- return *FindBucketFor(Ptr) == Ptr;
+ auto *Bucket = FindBucketFor(Ptr);
+ if (*Bucket == Ptr)
+ return Bucket;
+ return EndPointer();
}
-
+
private:
bool isSmall() const { return CurArray == SmallArray; }
@@ -177,8 +205,6 @@ private:
/// Grow - Allocate a larger backing store for the buckets and move it over.
void Grow(unsigned NewSize);
- void operator=(const SmallPtrSetImplBase &RHS) = delete;
-
protected:
/// swap - Swaps the elements of two sets.
/// Note: This method assumes that both sets have the same small size.
@@ -204,6 +230,12 @@ protected:
public:
explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
: Bucket(BP), End(E) {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ if (ReverseIterate<bool>::value) {
+ RetreatIfNotValid();
+ return;
+ }
+#endif
AdvanceIfNotValid();
}
@@ -225,6 +257,17 @@ protected:
*Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
++Bucket;
}
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ void RetreatIfNotValid() {
+ --Bucket;
+ assert(Bucket <= End);
+ while (Bucket != End &&
+ (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
+ *Bucket == SmallPtrSetImplBase::getTombstoneMarker())) {
+ --Bucket;
+ }
+ }
+#endif
};
/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
@@ -250,13 +293,21 @@ public:
}
inline SmallPtrSetIterator& operator++() { // Preincrement
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ if (ReverseIterate<bool>::value) {
+ RetreatIfNotValid();
+ return *this;
+ }
+#endif
++Bucket;
AdvanceIfNotValid();
return *this;
}
SmallPtrSetIterator operator++(int) { // Postincrement
- SmallPtrSetIterator tmp = *this; ++*this; return tmp;
+ SmallPtrSetIterator tmp = *this;
+ ++*this;
+ return tmp;
}
};
@@ -294,8 +345,6 @@ template <typename PtrType>
class SmallPtrSetImpl : public SmallPtrSetImplBase {
typedef PointerLikeTypeTraits<PtrType> PtrTraits;
- SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
-
protected:
// Constructors that forward to the base.
SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that)
@@ -310,6 +359,8 @@ public:
typedef SmallPtrSetIterator<PtrType> iterator;
typedef SmallPtrSetIterator<PtrType> const_iterator;
+ SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
+
/// Inserts Ptr if and only if there is no element in the container equal to
/// Ptr. The bool component of the returned pair is true if and only if the
/// insertion takes place, and the iterator component of the pair points to
@@ -327,7 +378,11 @@ public:
/// count - Return 1 if the specified pointer is in the set, 0 otherwise.
size_type count(PtrType Ptr) const {
- return count_imp(PtrTraits::getAsVoidPointer(Ptr)) ? 1 : 0;
+ return find(Ptr) != endPtr() ? 1 : 0;
+ }
+ iterator find(PtrType Ptr) const {
+ auto *P = find_imp(PtrTraits::getAsVoidPointer(Ptr));
+ return iterator(P, EndPointer());
}
template <typename IterT>
@@ -336,10 +391,27 @@ public:
insert(*I);
}
+ void insert(std::initializer_list<PtrType> IL) {
+ insert(IL.begin(), IL.end());
+ }
+
inline iterator begin() const {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ if (ReverseIterate<bool>::value)
+ return endPtr();
+#endif
return iterator(CurArray, EndPointer());
}
inline iterator end() const {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ if (ReverseIterate<bool>::value)
+ return iterator(CurArray, CurArray);
+#endif
+ return endPtr();
+ }
+
+private:
+ inline iterator endPtr() const {
const void *const *End = EndPointer();
return iterator(End, End);
}
@@ -374,6 +446,11 @@ public:
this->insert(I, E);
}
+ SmallPtrSet(std::initializer_list<PtrType> IL)
+ : BaseT(SmallStorage, SmallSizePowTwo) {
+ this->insert(IL.begin(), IL.end());
+ }
+
SmallPtrSet<PtrType, SmallSize> &
operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
if (&RHS != this)
@@ -381,26 +458,36 @@ public:
return *this;
}
- SmallPtrSet<PtrType, SmallSize>&
+ SmallPtrSet<PtrType, SmallSize> &
operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
if (&RHS != this)
this->MoveFrom(SmallSizePowTwo, std::move(RHS));
return *this;
}
+ SmallPtrSet<PtrType, SmallSize> &
+ operator=(std::initializer_list<PtrType> IL) {
+ this->clear();
+ this->insert(IL.begin(), IL.end());
+ return *this;
+ }
+
/// swap - Swaps the elements of two sets.
void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
SmallPtrSetImplBase::swap(RHS);
}
};
-}
+
+} // end namespace llvm
namespace std {
+
/// Implement std::swap in terms of SmallPtrSet swap.
template<class T, unsigned N>
inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
LHS.swap(RHS);
}
-}
-#endif
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLPTRSET_H
diff --git a/include/llvm/ADT/SmallSet.h b/include/llvm/ADT/SmallSet.h
index aaa5ff0ae939..6dac1677b7a2 100644
--- a/include/llvm/ADT/SmallSet.h
+++ b/include/llvm/ADT/SmallSet.h
@@ -17,7 +17,11 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+#include <functional>
#include <set>
+#include <utility>
namespace llvm {
@@ -28,7 +32,7 @@ namespace llvm {
///
/// Note that this set does not provide a way to iterate over members in the
/// set.
-template <typename T, unsigned N, typename C = std::less<T> >
+template <typename T, unsigned N, typename C = std::less<T>>
class SmallSet {
/// Use a SmallVector to hold the elements here (even though it will never
/// reach its 'large' stage) to avoid calling the default ctors of elements
@@ -45,9 +49,10 @@ class SmallSet {
public:
typedef size_t size_type;
- SmallSet() {}
- bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
+ SmallSet() = default;
+
+ LLVM_NODISCARD bool empty() const {
return Vector.empty() && Set.empty();
}
@@ -133,4 +138,4 @@ class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_SMALLSET_H
diff --git a/include/llvm/ADT/SmallString.h b/include/llvm/ADT/SmallString.h
index e569f54481a2..ff46e85ccb09 100644
--- a/include/llvm/ADT/SmallString.h
+++ b/include/llvm/ADT/SmallString.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include <cstddef>
namespace llvm {
@@ -25,7 +26,7 @@ template<unsigned InternalLen>
class SmallString : public SmallVector<char, InternalLen> {
public:
/// Default ctor - Initialize to empty.
- SmallString() {}
+ SmallString() = default;
/// Initialize from a StringRef.
SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
@@ -79,7 +80,6 @@ public:
SmallVectorImpl<char>::append(NumInputs, Elt);
}
-
/// Append from a StringRef.
void append(StringRef RHS) {
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
@@ -292,6 +292,6 @@ public:
}
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_SMALLSTRING_H
diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h
index 42eedc63e079..b9588214023c 100644
--- a/include/llvm/ADT/SmallVector.h
+++ b/include/llvm/ADT/SmallVector.h
@@ -27,6 +27,9 @@
#include <initializer_list>
#include <iterator>
#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
namespace llvm {
@@ -54,11 +57,9 @@ public:
return size_t((char*)CapacityX - (char*)BeginX);
}
- bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return BeginX == EndX; }
+ LLVM_NODISCARD bool empty() const { return BeginX == EndX; }
};
-template <typename T, unsigned N> struct SmallVectorStorage;
-
/// This is the part of SmallVectorTemplateBase which does not depend on whether
/// the type T is a POD. The extra dummy template argument is used by ArrayRef
/// to avoid unnecessarily requiring T to be complete.
@@ -70,7 +71,7 @@ private:
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. Use an array of char of sufficient alignment.
- typedef llvm::AlignedCharArrayUnion<T> U;
+ typedef AlignedCharArrayUnion<T> U;
U FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
@@ -93,6 +94,7 @@ protected:
}
void setEnd(T *P) { this->EndX = P; }
+
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
@@ -117,11 +119,12 @@ public:
iterator end() { return (iterator)this->EndX; }
LLVM_ATTRIBUTE_ALWAYS_INLINE
const_iterator end() const { return (const_iterator)this->EndX; }
+
protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
-public:
+public:
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
@@ -298,6 +301,7 @@ protected:
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
+
public:
void push_back(const T &Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
@@ -311,14 +315,12 @@ public:
}
};
-
/// This class consists of common code factored out of the SmallVector class to
/// reduce code duplication based on the SmallVector 'N' template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
- SmallVectorImpl(const SmallVectorImpl&) = delete;
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::const_iterator const_iterator;
@@ -331,6 +333,8 @@ protected:
}
public:
+ SmallVectorImpl(const SmallVectorImpl &) = delete;
+
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
@@ -340,7 +344,6 @@ public:
free(this->begin());
}
-
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
@@ -376,7 +379,7 @@ public:
this->grow(N);
}
- T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
+ LLVM_NODISCARD T pop_back_val() {
T Result = ::std::move(this->back());
this->pop_back();
return Result;
@@ -668,7 +671,6 @@ public:
}
};
-
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
if (this == &RHS) return;
@@ -841,6 +843,7 @@ template <typename T, unsigned N>
class SmallVector : public SmallVectorImpl<T> {
/// Inline space for elements which aren't stored in the base class.
SmallVectorStorage<T, N> Storage;
+
public:
SmallVector() : SmallVectorImpl<T>(N) {
}
@@ -856,7 +859,7 @@ public:
}
template <typename RangeTy>
- explicit SmallVector(const llvm::iterator_range<RangeTy> R)
+ explicit SmallVector(const iterator_range<RangeTy> &R)
: SmallVectorImpl<T>(N) {
this->append(R.begin(), R.end());
}
@@ -906,9 +909,10 @@ static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
}
-} // End llvm namespace
+} // end namespace llvm
namespace std {
+
/// Implement std::swap in terms of SmallVector swap.
template<typename T>
inline void
@@ -922,6 +926,7 @@ namespace std {
swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
LHS.swap(RHS);
}
-}
-#endif
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLVECTOR_H
diff --git a/include/llvm/ADT/SparseBitVector.h b/include/llvm/ADT/SparseBitVector.h
index e6e72413da4e..e2822c46e266 100644
--- a/include/llvm/ADT/SparseBitVector.h
+++ b/include/llvm/ADT/SparseBitVector.h
@@ -15,14 +15,14 @@
#ifndef LLVM_ADT_SPARSEBITVECTOR_H
#define LLVM_ADT_SPARSEBITVECTOR_H
-#include "llvm/ADT/ilist.h"
-#include "llvm/ADT/ilist_node.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <climits>
+#include <cstring>
+#include <iterator>
+#include <list>
namespace llvm {
@@ -39,9 +39,7 @@ namespace llvm {
/// etc) do not perform as well in practice as a linked list with this iterator
/// kept up to date. They are also significantly more memory intensive.
-template <unsigned ElementSize = 128>
-struct SparseBitVectorElement
- : public ilist_node<SparseBitVectorElement<ElementSize> > {
+template <unsigned ElementSize = 128> struct SparseBitVectorElement {
public:
typedef unsigned long BitWord;
typedef unsigned size_type;
@@ -55,8 +53,7 @@ private:
// Index of Element in terms of where first bit starts.
unsigned ElementIndex;
BitWord Bits[BITWORDS_PER_ELEMENT];
- // Needed for sentinels
- friend struct ilist_sentinel_traits<SparseBitVectorElement>;
+
SparseBitVectorElement() {
ElementIndex = ~0U;
memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
@@ -84,7 +81,7 @@ public:
// Return the bits that make up word Idx in our element.
BitWord word(unsigned Idx) const {
- assert (Idx < BITWORDS_PER_ELEMENT);
+ assert(Idx < BITWORDS_PER_ELEMENT);
return Bits[Idx];
}
@@ -144,8 +141,8 @@ public:
unsigned WordPos = Curr / BITWORD_SIZE;
unsigned BitPos = Curr % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
- assert (WordPos <= BITWORDS_PER_ELEMENT
- && "Word Position outside of element");
+ assert(WordPos <= BITWORDS_PER_ELEMENT
+ && "Word Position outside of element");
// Mask off previous bits.
Copy &= ~0UL << BitPos;
@@ -244,25 +241,9 @@ public:
}
};
-template <unsigned ElementSize>
-struct ilist_traits<SparseBitVectorElement<ElementSize> >
- : public ilist_default_traits<SparseBitVectorElement<ElementSize> > {
- typedef SparseBitVectorElement<ElementSize> Element;
-
- Element *createSentinel() const { return static_cast<Element *>(&Sentinel); }
- static void destroySentinel(Element *) {}
-
- Element *provideInitialHead() const { return createSentinel(); }
- Element *ensureHead(Element *) const { return createSentinel(); }
- static void noteHead(Element *, Element *) {}
-
-private:
- mutable ilist_half_node<Element> Sentinel;
-};
-
template <unsigned ElementSize = 128>
class SparseBitVector {
- typedef ilist<SparseBitVectorElement<ElementSize> > ElementList;
+ typedef std::list<SparseBitVectorElement<ElementSize>> ElementList;
typedef typename ElementList::iterator ElementListIter;
typedef typename ElementList::const_iterator ElementListConstIter;
enum {
@@ -310,7 +291,7 @@ class SparseBitVector {
private:
bool AtEnd;
- const SparseBitVector<ElementSize> *BitVector;
+ const SparseBitVector<ElementSize> *BitVector = nullptr;
// Current element inside of bitmap.
ElementListConstIter Iter;
@@ -380,7 +361,20 @@ class SparseBitVector {
}
}
}
+
public:
+ SparseBitVectorIterator() = default;
+
+ SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
+ bool end = false):BitVector(RHS) {
+ Iter = BitVector->Elements.begin();
+ BitNumber = 0;
+ Bits = 0;
+ WordNumber = ~0;
+ AtEnd = end;
+ AdvanceToFirstNonZero();
+ }
+
// Preincrement.
inline SparseBitVectorIterator& operator++() {
++BitNumber;
@@ -413,29 +407,16 @@ class SparseBitVector {
bool operator!=(const SparseBitVectorIterator &RHS) const {
return !(*this == RHS);
}
-
- SparseBitVectorIterator(): BitVector(nullptr) {
- }
-
- SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
- bool end = false):BitVector(RHS) {
- Iter = BitVector->Elements.begin();
- BitNumber = 0;
- Bits = 0;
- WordNumber = ~0;
- AtEnd = end;
- AdvanceToFirstNonZero();
- }
};
+
public:
typedef SparseBitVectorIterator iterator;
- SparseBitVector () {
- CurrElementIter = Elements.begin ();
+ SparseBitVector() {
+ CurrElementIter = Elements.begin();
}
- ~SparseBitVector() {
- }
+ ~SparseBitVector() = default;
// SparseBitVector copy ctor.
SparseBitVector(const SparseBitVector &RHS) {
@@ -510,26 +491,21 @@ public:
void set(unsigned Idx) {
unsigned ElementIndex = Idx / ElementSize;
- SparseBitVectorElement<ElementSize> *Element;
ElementListIter ElementIter;
if (Elements.empty()) {
- Element = new SparseBitVectorElement<ElementSize>(ElementIndex);
- ElementIter = Elements.insert(Elements.end(), Element);
-
+ ElementIter = Elements.emplace(Elements.end(), ElementIndex);
} else {
ElementIter = FindLowerBound(ElementIndex);
if (ElementIter == Elements.end() ||
ElementIter->index() != ElementIndex) {
- Element = new SparseBitVectorElement<ElementSize>(ElementIndex);
// We may have hit the beginning of our SparseBitVector, in which case,
// we may need to insert right after this element, which requires moving
// the current iterator forward one, because insert does insert before.
if (ElementIter != Elements.end() &&
ElementIter->index() < ElementIndex)
- ElementIter = Elements.insert(++ElementIter, Element);
- else
- ElementIter = Elements.insert(ElementIter, Element);
+ ++ElementIter;
+ ElementIter = Elements.emplace(ElementIter, ElementIndex);
}
}
CurrElementIter = ElementIter;
@@ -537,7 +513,7 @@ public:
ElementIter->set(Idx % ElementSize);
}
- bool test_and_set (unsigned Idx) {
+ bool test_and_set(unsigned Idx) {
bool old = test(Idx);
if (!old) {
set(Idx);
@@ -577,8 +553,7 @@ public:
while (Iter2 != RHS.Elements.end()) {
if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
- Elements.insert(Iter1,
- new SparseBitVectorElement<ElementSize>(*Iter2));
+ Elements.insert(Iter1, *Iter2);
++Iter2;
changed = true;
} else if (Iter1->index() == Iter2->index()) {
@@ -725,31 +700,19 @@ public:
++Iter2;
} else if (Iter1->index() == Iter2->index()) {
bool BecameZero = false;
- SparseBitVectorElement<ElementSize> *NewElement =
- new SparseBitVectorElement<ElementSize>(Iter1->index());
- NewElement->intersectWithComplement(*Iter1, *Iter2, BecameZero);
- if (!BecameZero) {
- Elements.push_back(NewElement);
- }
- else
- delete NewElement;
+ Elements.emplace_back(Iter1->index());
+ Elements.back().intersectWithComplement(*Iter1, *Iter2, BecameZero);
+ if (BecameZero)
+ Elements.pop_back();
++Iter1;
++Iter2;
} else {
- SparseBitVectorElement<ElementSize> *NewElement =
- new SparseBitVectorElement<ElementSize>(*Iter1);
- Elements.push_back(NewElement);
- ++Iter1;
+ Elements.push_back(*Iter1++);
}
}
// copy the remaining elements
- while (Iter1 != RHS1.Elements.end()) {
- SparseBitVectorElement<ElementSize> *NewElement =
- new SparseBitVectorElement<ElementSize>(*Iter1);
- Elements.push_back(NewElement);
- ++Iter1;
- }
+ std::copy(Iter1, RHS1.Elements.end(), std::back_inserter(Elements));
}
void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
@@ -819,6 +782,7 @@ public:
return BitCount;
}
+
iterator begin() const {
return iterator(this);
}
@@ -899,6 +863,7 @@ void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
}
out << "]\n";
}
+
} // end namespace llvm
#endif // LLVM_ADT_SPARSEBITVECTOR_H
diff --git a/include/llvm/ADT/SparseMultiSet.h b/include/llvm/ADT/SparseMultiSet.h
index e3aa2589b79f..08da4b68ebaa 100644
--- a/include/llvm/ADT/SparseMultiSet.h
+++ b/include/llvm/ADT/SparseMultiSet.h
@@ -21,7 +21,15 @@
#ifndef LLVM_ADT_SPARSEMULTISET_H
#define LLVM_ADT_SPARSEMULTISET_H
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <iterator>
+#include <limits>
+#include <utility>
namespace llvm {
@@ -73,7 +81,7 @@ namespace llvm {
/// @tparam SparseT An unsigned integer type. See above.
///
template<typename ValueT,
- typename KeyFunctorT = llvm::identity<unsigned>,
+ typename KeyFunctorT = identity<unsigned>,
typename SparseT = uint8_t>
class SparseMultiSet {
static_assert(std::numeric_limits<SparseT>::is_integer &&
@@ -113,16 +121,16 @@ class SparseMultiSet {
typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<SMSNode, 8> DenseT;
DenseT Dense;
- SparseT *Sparse;
- unsigned Universe;
+ SparseT *Sparse = nullptr;
+ unsigned Universe = 0;
KeyFunctorT KeyIndexOf;
SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
/// We have a built-in recycler for reusing tombstone slots. This recycler
/// puts a singly-linked free list into tombstone slots, allowing us quick
/// erasure, iterator preservation, and dense size.
- unsigned FreelistIdx;
- unsigned NumFree;
+ unsigned FreelistIdx = SMSNode::INVALID;
+ unsigned NumFree = 0;
unsigned sparseIndex(const ValueT &Val) const {
assert(ValIndexOf(Val) < Universe &&
@@ -131,11 +139,6 @@ class SparseMultiSet {
}
unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
- // Disable copy construction and assignment.
- // This data structure is not meant to be used that way.
- SparseMultiSet(const SparseMultiSet&) = delete;
- SparseMultiSet &operator=(const SparseMultiSet&) = delete;
-
/// Whether the given entry is the head of the list. List heads's previous
/// pointers are to the tail of the list, allowing for efficient access to the
/// list tail. D must be a valid entry node.
@@ -187,9 +190,9 @@ public:
typedef const ValueT *const_pointer;
typedef unsigned size_type;
- SparseMultiSet()
- : Sparse(nullptr), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) {}
-
+ SparseMultiSet() = default;
+ SparseMultiSet(const SparseMultiSet &) = delete;
+ SparseMultiSet &operator=(const SparseMultiSet &) = delete;
~SparseMultiSet() { free(Sparse); }
/// Set the universe size which determines the largest key the set can hold.
@@ -218,6 +221,7 @@ public:
class iterator_base : public std::iterator<std::bidirectional_iterator_tag,
ValueT> {
friend class SparseMultiSet;
+
SMSPtrTy SMS;
unsigned Idx;
unsigned SparseIdx;
@@ -515,4 +519,4 @@ private:
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_SPARSEMULTISET_H
diff --git a/include/llvm/ADT/SparseSet.h b/include/llvm/ADT/SparseSet.h
index 5b6494d17129..00c18c743219 100644
--- a/include/llvm/ADT/SparseSet.h
+++ b/include/llvm/ADT/SparseSet.h
@@ -22,8 +22,11 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
#include <limits>
+#include <utility>
namespace llvm {
@@ -115,7 +118,7 @@ struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
/// @tparam SparseT An unsigned integer type. See above.
///
template<typename ValueT,
- typename KeyFunctorT = llvm::identity<unsigned>,
+ typename KeyFunctorT = identity<unsigned>,
typename SparseT = uint8_t>
class SparseSet {
static_assert(std::numeric_limits<SparseT>::is_integer &&
@@ -126,16 +129,11 @@ class SparseSet {
typedef SmallVector<ValueT, 8> DenseT;
typedef unsigned size_type;
DenseT Dense;
- SparseT *Sparse;
- unsigned Universe;
+ SparseT *Sparse = nullptr;
+ unsigned Universe = 0;
KeyFunctorT KeyIndexOf;
SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
- // Disable copy construction and assignment.
- // This data structure is not meant to be used that way.
- SparseSet(const SparseSet&) = delete;
- SparseSet &operator=(const SparseSet&) = delete;
-
public:
typedef ValueT value_type;
typedef ValueT &reference;
@@ -143,7 +141,9 @@ public:
typedef ValueT *pointer;
typedef const ValueT *const_pointer;
- SparseSet() : Sparse(nullptr), Universe(0) {}
+ SparseSet() = default;
+ SparseSet(const SparseSet &) = delete;
+ SparseSet &operator=(const SparseSet &) = delete;
~SparseSet() { free(Sparse); }
/// setUniverse - Set the universe size which determines the largest key the
@@ -308,9 +308,8 @@ public:
erase(I);
return true;
}
-
};
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_SPARSESET_H
diff --git a/include/llvm/ADT/Statistic.h b/include/llvm/ADT/Statistic.h
index 32175fdc7c5c..53fa2a50fcba 100644
--- a/include/llvm/ADT/Statistic.h
+++ b/include/llvm/ADT/Statistic.h
@@ -32,6 +32,7 @@
#include <memory>
namespace llvm {
+
class raw_ostream;
class raw_fd_ostream;
@@ -140,16 +141,17 @@ protected:
TsanHappensAfter(this);
return *this;
}
+
void RegisterStatistic();
};
// STATISTIC - A macro to make definition of statistics really simple. This
// automatically passes the DEBUG_TYPE of the file into the statistic.
#define STATISTIC(VARNAME, DESC) \
- static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, 0}
+ static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, false}
/// \brief Enable the collection and printing of statistics.
-void EnableStatistics();
+void EnableStatistics(bool PrintOnExit = true);
/// \brief Check if statistics are enabled.
bool AreStatisticsEnabled();
@@ -163,9 +165,12 @@ void PrintStatistics();
/// \brief Print statistics to the given output stream.
void PrintStatistics(raw_ostream &OS);
-/// Print statistics in JSON format.
+/// Print statistics in JSON format. This does include all global timers (\see
+/// Timer, TimerGroup). Note that the timers are cleared after printing and will
+/// not be printed in human readable form or in a second call of
+/// PrintStatisticsJSON().
void PrintStatisticsJSON(raw_ostream &OS);
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_ADT_STATISTIC_H
diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h
index bdbb4d3f5932..488748a5f605 100644
--- a/include/llvm/ADT/StringExtras.h
+++ b/include/llvm/ADT/StringExtras.h
@@ -19,6 +19,7 @@
#include <iterator>
namespace llvm {
+class raw_ostream;
template<typename T> class SmallVectorImpl;
/// hexdigit - Return the hexadecimal character for the
@@ -150,6 +151,12 @@ static inline StringRef getOrdinalSuffix(unsigned Val) {
}
}
+/// PrintEscapedString - Print each character of the specified string, escaping
+/// it if it is not printable or if it is an escape char.
+void PrintEscapedString(StringRef Name, raw_ostream &Out);
+
+namespace detail {
+
template <typename IteratorT>
inline std::string join_impl(IteratorT Begin, IteratorT End,
StringRef Separator, std::input_iterator_tag) {
@@ -184,12 +191,64 @@ inline std::string join_impl(IteratorT Begin, IteratorT End,
return S;
}
+template <typename Sep>
+inline void join_items_impl(std::string &Result, Sep Separator) {}
+
+template <typename Sep, typename Arg>
+inline void join_items_impl(std::string &Result, Sep Separator,
+ const Arg &Item) {
+ Result += Item;
+}
+
+template <typename Sep, typename Arg1, typename... Args>
+inline void join_items_impl(std::string &Result, Sep Separator, const Arg1 &A1,
+ Args &&... Items) {
+ Result += A1;
+ Result += Separator;
+ join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+}
+
+inline size_t join_one_item_size(char C) { return 1; }
+inline size_t join_one_item_size(const char *S) { return S ? ::strlen(S) : 0; }
+
+template <typename T> inline size_t join_one_item_size(const T &Str) {
+ return Str.size();
+}
+
+inline size_t join_items_size() { return 0; }
+
+template <typename A1> inline size_t join_items_size(const A1 &A) {
+ return join_one_item_size(A);
+}
+template <typename A1, typename... Args>
+inline size_t join_items_size(const A1 &A, Args &&... Items) {
+ return join_one_item_size(A) + join_items_size(std::forward<Args>(Items)...);
+}
+}
+
/// Joins the strings in the range [Begin, End), adding Separator between
/// the elements.
template <typename IteratorT>
inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
typedef typename std::iterator_traits<IteratorT>::iterator_category tag;
- return join_impl(Begin, End, Separator, tag());
+ return detail::join_impl(Begin, End, Separator, tag());
+}
+
+/// Joins the strings in the parameter pack \p Items, adding \p Separator
+/// between the elements. All arguments must be implicitly convertible to
+/// std::string, or there should be an overload of std::string::operator+=()
+/// that accepts the argument explicitly.
+template <typename Sep, typename... Args>
+inline std::string join_items(Sep Separator, Args &&... Items) {
+ std::string Result;
+ if (sizeof...(Items) == 0)
+ return Result;
+
+ size_t NS = detail::join_one_item_size(Separator);
+ size_t NI = detail::join_items_size(std::forward<Args>(Items)...);
+ Result.reserve(NI + (sizeof...(Items) - 1) * NS + 1);
+ detail::join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+ return Result;
}
} // End llvm namespace
diff --git a/include/llvm/ADT/StringMap.h b/include/llvm/ADT/StringMap.h
index 260275295c99..24e3ecf71b13 100644
--- a/include/llvm/ADT/StringMap.h
+++ b/include/llvm/ADT/StringMap.h
@@ -17,10 +17,17 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
#include <cstring>
#include <utility>
+#include <initializer_list>
+#include <new>
+#include <utility>
namespace llvm {
+
template<typename ValueT>
class StringMapConstIterator;
template<typename ValueT>
@@ -119,8 +126,6 @@ public:
/// and data.
template<typename ValueTy>
class StringMapEntry : public StringMapEntryBase {
- StringMapEntry(StringMapEntry &E) = delete;
-
public:
ValueTy second;
@@ -129,6 +134,7 @@ public:
template <typename... InitTy>
StringMapEntry(unsigned strLen, InitTy &&... InitVals)
: StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {}
+ StringMapEntry(StringMapEntry &E) = delete;
StringRef getKey() const {
return StringRef(getKeyData(), getKeyLength());
@@ -157,7 +163,7 @@ public:
// terminator.
unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
KeyLength+1;
- unsigned Alignment = alignOf<StringMapEntry>();
+ unsigned Alignment = alignof(StringMapEntry);
StringMapEntry *NewItem =
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
@@ -329,9 +335,7 @@ public:
/// Lookup the ValueTy for the \p Key, or create a default constructed value
/// if the key is not in the map.
- ValueTy &operator[](StringRef Key) {
- return emplace_second(Key).first->second;
- }
+ ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; }
/// count - Return 1 if the element is in the map, 0 otherwise.
size_type count(StringRef Key) const {
@@ -362,7 +366,7 @@ public:
/// if and only if the insertion takes place, and the iterator component of
/// the pair points to the element with key equivalent to the key of the pair.
std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
- return emplace_second(KV.first, std::move(KV.second));
+ return try_emplace(KV.first, std::move(KV.second));
}
/// Emplace a new element for the specified key into the map if the key isn't
@@ -370,7 +374,7 @@ public:
/// if and only if the insertion takes place, and the iterator component of
/// the pair points to the element with key equivalent to the key of the pair.
template <typename... ArgsTy>
- std::pair<iterator, bool> emplace_second(StringRef Key, ArgsTy &&... Args) {
+ std::pair<iterator, bool> try_emplace(StringRef Key, ArgsTy &&... Args) {
unsigned BucketNo = LookupBucketFor(Key);
StringMapEntryBase *&Bucket = TheTable[BucketNo];
if (Bucket && Bucket != getTombstoneVal())
@@ -442,12 +446,12 @@ public:
template <typename ValueTy> class StringMapConstIterator {
protected:
- StringMapEntryBase **Ptr;
+ StringMapEntryBase **Ptr = nullptr;
public:
typedef StringMapEntry<ValueTy> value_type;
- StringMapConstIterator() : Ptr(nullptr) { }
+ StringMapConstIterator() = default;
explicit StringMapConstIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
@@ -488,11 +492,13 @@ private:
template<typename ValueTy>
class StringMapIterator : public StringMapConstIterator<ValueTy> {
public:
- StringMapIterator() {}
+ StringMapIterator() = default;
+
explicit StringMapIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
: StringMapConstIterator<ValueTy>(Bucket, NoAdvance) {
}
+
StringMapEntry<ValueTy> &operator*() const {
return *static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
}
@@ -500,6 +506,7 @@ public:
return static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
}
};
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGMAP_H
diff --git a/include/llvm/ADT/StringRef.h b/include/llvm/ADT/StringRef.h
index 398ca6920249..d80a848c44a1 100644
--- a/include/llvm/ADT/StringRef.h
+++ b/include/llvm/ADT/StringRef.h
@@ -10,6 +10,7 @@
#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
@@ -32,6 +33,10 @@ namespace llvm {
bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
+ bool consumeUnsignedInteger(StringRef &Str, unsigned Radix,
+ unsigned long long &Result);
+ bool consumeSignedInteger(StringRef &Str, unsigned Radix, long long &Result);
+
/// StringRef - Represent a constant reference to a string, i.e. a character
/// array and a length, which need not be null terminated.
///
@@ -48,10 +53,10 @@ namespace llvm {
private:
/// The start of the string, in an external buffer.
- const char *Data;
+ const char *Data = nullptr;
/// The length of the string.
- size_t Length;
+ size_t Length = 0;
// Workaround memcmp issue with null pointers (undefined behavior)
// by providing a specialized version
@@ -66,28 +71,31 @@ namespace llvm {
/// @{
/// Construct an empty string ref.
- /*implicit*/ StringRef() : Data(nullptr), Length(0) {}
+ /*implicit*/ StringRef() = default;
+
+ /// Disable conversion from nullptr. This prevents things like
+ /// if (S == nullptr)
+ StringRef(std::nullptr_t) = delete;
/// Construct a string ref from a cstring.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
/*implicit*/ StringRef(const char *Str)
- : Data(Str) {
- assert(Str && "StringRef cannot be built from a NULL argument");
- Length = ::strlen(Str); // invoking strlen(NULL) is undefined behavior
- }
+ : Data(Str), Length(Str ? ::strlen(Str) : 0) {}
/// Construct a string ref from a pointer and length.
LLVM_ATTRIBUTE_ALWAYS_INLINE
- /*implicit*/ StringRef(const char *data, size_t length)
- : Data(data), Length(length) {
- assert((data || length == 0) &&
- "StringRef cannot be built from a NULL argument with non-null length");
- }
+ /*implicit*/ constexpr StringRef(const char *data, size_t length)
+ : Data(data), Length(length) {}
/// Construct a string ref from an std::string.
LLVM_ATTRIBUTE_ALWAYS_INLINE
/*implicit*/ StringRef(const std::string &Str)
: Data(Str.data()), Length(Str.length()) {}
+ static StringRef withNullAsEmpty(const char *data) {
+ return StringRef(data ? data : "");
+ }
+
/// @}
/// @name Iterators
/// @{
@@ -112,31 +120,37 @@ namespace llvm {
/// data - Get a pointer to the start of the string (which may not be null
/// terminated).
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
const char *data() const { return Data; }
/// empty - Check if the string is empty.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool empty() const { return Length == 0; }
/// size - Get the string size.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
size_t size() const { return Length; }
/// front - Get the first character in the string.
+ LLVM_NODISCARD
char front() const {
assert(!empty());
return Data[0];
}
/// back - Get the last character in the string.
+ LLVM_NODISCARD
char back() const {
assert(!empty());
return Data[Length-1];
}
// copy - Allocate copy in Allocator and return StringRef to it.
- template <typename Allocator> StringRef copy(Allocator &A) const {
+ template <typename Allocator>
+ LLVM_NODISCARD StringRef copy(Allocator &A) const {
// Don't request a length 0 copy from the allocator.
if (empty())
return StringRef();
@@ -147,6 +161,7 @@ namespace llvm {
/// equals - Check for string equality, this is more efficient than
/// compare() when the relative ordering of inequal strings isn't needed.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool equals(StringRef RHS) const {
return (Length == RHS.Length &&
@@ -154,12 +169,14 @@ namespace llvm {
}
/// equals_lower - Check for string equality, ignoring case.
+ LLVM_NODISCARD
bool equals_lower(StringRef RHS) const {
return Length == RHS.Length && compare_lower(RHS) == 0;
}
/// compare - Compare two strings; the result is -1, 0, or 1 if this string
/// is lexicographically less than, equal to, or greater than the \p RHS.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
int compare(StringRef RHS) const {
// Check the prefix for a mismatch.
@@ -173,10 +190,12 @@ namespace llvm {
}
/// compare_lower - Compare two strings, ignoring case.
+ LLVM_NODISCARD
int compare_lower(StringRef RHS) const;
/// compare_numeric - Compare two strings, treating sequences of digits as
/// numbers.
+ LLVM_NODISCARD
int compare_numeric(StringRef RHS) const;
/// \brief Determine the edit distance between this string and another
@@ -197,10 +216,12 @@ namespace llvm {
/// or (if \p AllowReplacements is \c true) replacements needed to
/// transform one of the given strings into the other. If zero,
/// the strings are identical.
+ LLVM_NODISCARD
unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
unsigned MaxEditDistance = 0) const;
/// str - Get the contents as an std::string.
+ LLVM_NODISCARD
std::string str() const {
if (!Data) return std::string();
return std::string(Data, Length);
@@ -210,11 +231,21 @@ namespace llvm {
/// @name Operator Overloads
/// @{
+ LLVM_NODISCARD
char operator[](size_t Index) const {
assert(Index < Length && "Invalid index!");
return Data[Index];
}
+ /// Disallow accidental assignment from a temporary std::string.
+ ///
+ /// The declaration here is extra complicated so that `stringRef = {}`
+ /// and `stringRef = "abc"` continue to select the move assignment operator.
+ template <typename T>
+ typename std::enable_if<std::is_same<T, std::string>::value,
+ StringRef>::type &
+ operator=(T &&Str) = delete;
+
/// @}
/// @name Type Conversions
/// @{
@@ -228,6 +259,7 @@ namespace llvm {
/// @{
/// Check if this string starts with the given \p Prefix.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool startswith(StringRef Prefix) const {
return Length >= Prefix.Length &&
@@ -235,9 +267,11 @@ namespace llvm {
}
/// Check if this string starts with the given \p Prefix, ignoring case.
+ LLVM_NODISCARD
bool startswith_lower(StringRef Prefix) const;
/// Check if this string ends with the given \p Suffix.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool endswith(StringRef Suffix) const {
return Length >= Suffix.Length &&
@@ -245,6 +279,7 @@ namespace llvm {
}
/// Check if this string ends with the given \p Suffix, ignoring case.
+ LLVM_NODISCARD
bool endswith_lower(StringRef Suffix) const;
/// @}
@@ -255,6 +290,7 @@ namespace llvm {
///
/// \returns The index of the first occurrence of \p C, or npos if not
/// found.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
size_t find(char C, size_t From = 0) const {
size_t FindBegin = std::min(From, Length);
@@ -266,16 +302,58 @@ namespace llvm {
return npos;
}
+ /// Search for the first character \p C in the string, ignoring case.
+ ///
+ /// \returns The index of the first occurrence of \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_lower(char C, size_t From = 0) const;
+
+ /// Search for the first character satisfying the predicate \p F
+ ///
+ /// \returns The index of the first character satisfying \p F starting from
+ /// \p From, or npos if not found.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ size_t find_if(function_ref<bool(char)> F, size_t From = 0) const {
+ StringRef S = drop_front(From);
+ while (!S.empty()) {
+ if (F(S.front()))
+ return size() - S.size();
+ S = S.drop_front();
+ }
+ return npos;
+ }
+
+ /// Search for the first character not satisfying the predicate \p F
+ ///
+ /// \returns The index of the first character not satisfying \p F starting
+ /// from \p From, or npos if not found.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ size_t find_if_not(function_ref<bool(char)> F, size_t From = 0) const {
+ return find_if([F](char c) { return !F(c); }, From);
+ }
+
/// Search for the first string \p Str in the string.
///
/// \returns The index of the first occurrence of \p Str, or npos if not
/// found.
+ LLVM_NODISCARD
size_t find(StringRef Str, size_t From = 0) const;
+ /// Search for the first string \p Str in the string, ignoring case.
+ ///
+ /// \returns The index of the first occurrence of \p Str, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_lower(StringRef Str, size_t From = 0) const;
+
/// Search for the last character \p C in the string.
///
/// \returns The index of the last occurrence of \p C, or npos if not
/// found.
+ LLVM_NODISCARD
size_t rfind(char C, size_t From = npos) const {
From = std::min(From, Length);
size_t i = From;
@@ -287,14 +365,30 @@ namespace llvm {
return npos;
}
+ /// Search for the last character \p C in the string, ignoring case.
+ ///
+ /// \returns The index of the last occurrence of \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t rfind_lower(char C, size_t From = npos) const;
+
/// Search for the last string \p Str in the string.
///
/// \returns The index of the last occurrence of \p Str, or npos if not
/// found.
+ LLVM_NODISCARD
size_t rfind(StringRef Str) const;
+ /// Search for the last string \p Str in the string, ignoring case.
+ ///
+ /// \returns The index of the last occurrence of \p Str, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t rfind_lower(StringRef Str) const;
+
/// Find the first character in the string that is \p C, or npos if not
/// found. Same as find.
+ LLVM_NODISCARD
size_t find_first_of(char C, size_t From = 0) const {
return find(C, From);
}
@@ -303,20 +397,24 @@ namespace llvm {
/// not found.
///
/// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
size_t find_first_of(StringRef Chars, size_t From = 0) const;
/// Find the first character in the string that is not \p C or npos if not
/// found.
+ LLVM_NODISCARD
size_t find_first_not_of(char C, size_t From = 0) const;
/// Find the first character in the string that is not in the string
/// \p Chars, or npos if not found.
///
/// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
size_t find_first_not_of(StringRef Chars, size_t From = 0) const;
/// Find the last character in the string that is \p C, or npos if not
/// found.
+ LLVM_NODISCARD
size_t find_last_of(char C, size_t From = npos) const {
return rfind(C, From);
}
@@ -325,23 +423,53 @@ namespace llvm {
/// found.
///
/// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
size_t find_last_of(StringRef Chars, size_t From = npos) const;
/// Find the last character in the string that is not \p C, or npos if not
/// found.
+ LLVM_NODISCARD
size_t find_last_not_of(char C, size_t From = npos) const;
/// Find the last character in the string that is not in \p Chars, or
/// npos if not found.
///
/// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
size_t find_last_not_of(StringRef Chars, size_t From = npos) const;
+ /// Return true if the given string is a substring of *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool contains(StringRef Other) const { return find(Other) != npos; }
+
+ /// Return true if the given character is contained in *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool contains(char C) const { return find_first_of(C) != npos; }
+
+ /// Return true if the given string is a substring of *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool contains_lower(StringRef Other) const {
+ return find_lower(Other) != npos;
+ }
+
+ /// Return true if the given character is contained in *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool contains_lower(char C) const { return find_lower(C) != npos; }
+
/// @}
/// @name Helpful Algorithms
/// @{
/// Return the number of occurrences of \p C in the string.
+ LLVM_NODISCARD
size_t count(char C) const {
size_t Count = 0;
for (size_t i = 0, e = Length; i != e; ++i)
@@ -386,6 +514,37 @@ namespace llvm {
return false;
}
+ /// Parse the current string as an integer of the specified radix. If
+ /// \p Radix is specified as zero, this does radix autosensing using
+ /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+ ///
+ /// If the string does not begin with a number of the specified radix,
+ /// this returns true to signify the error. The string is considered
+ /// erroneous if empty or if it overflows T.
+ /// The portion of the string representing the discovered numeric value
+ /// is removed from the beginning of the string.
+ template <typename T>
+ typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+ consumeInteger(unsigned Radix, T &Result) {
+ long long LLVal;
+ if (consumeSignedInteger(*this, Radix, LLVal) ||
+ static_cast<long long>(static_cast<T>(LLVal)) != LLVal)
+ return true;
+ Result = LLVal;
+ return false;
+ }
+
+ template <typename T>
+ typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+ consumeInteger(unsigned Radix, T &Result) {
+ unsigned long long ULLVal;
+ if (consumeUnsignedInteger(*this, Radix, ULLVal) ||
+ static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+ return true;
+ Result = ULLVal;
+ return false;
+ }
+
/// Parse the current string as an integer of the specified \p Radix, or of
/// an autosensed radix if the \p Radix given is 0. The current value in
/// \p Result is discarded, and the storage is changed to be wide enough to
@@ -403,9 +562,11 @@ namespace llvm {
/// @{
// Convert the given ASCII string to lowercase.
+ LLVM_NODISCARD
std::string lower() const;
/// Convert the given ASCII string to uppercase.
+ LLVM_NODISCARD
std::string upper() const;
/// @}
@@ -421,14 +582,54 @@ namespace llvm {
/// \param N The number of characters to included in the substring. If N
/// exceeds the number of characters remaining in the string, the string
/// suffix (starting with \p Start) will be returned.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef substr(size_t Start, size_t N = npos) const {
Start = std::min(Start, Length);
return StringRef(Data + Start, std::min(N, Length - Start));
}
+ /// Return a StringRef equal to 'this' but with only the first \p N
+ /// elements remaining. If \p N is greater than the length of the
+ /// string, the entire string is returned.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef take_front(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// Return a StringRef equal to 'this' but with only the first \p N
+ /// elements remaining. If \p N is greater than the length of the
+ /// string, the entire string is returned.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef take_back(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ /// Return the longest prefix of 'this' such that every character
+ /// in the prefix satisfies the given predicate.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef take_while(function_ref<bool(char)> F) const {
+ return substr(0, find_if_not(F));
+ }
+
+ /// Return the longest prefix of 'this' such that no character in
+ /// the prefix satisfies the given predicate.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef take_until(function_ref<bool(char)> F) const {
+ return substr(0, find_if(F));
+ }
+
/// Return a StringRef equal to 'this' but with the first \p N elements
/// dropped.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
@@ -437,12 +638,51 @@ namespace llvm {
/// Return a StringRef equal to 'this' but with the last \p N elements
/// dropped.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef drop_back(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return substr(0, size()-N);
}
+ /// Return a StringRef equal to 'this', but with all characters satisfying
+ /// the given predicate dropped from the beginning of the string.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef drop_while(function_ref<bool(char)> F) const {
+ return substr(find_if_not(F));
+ }
+
+ /// Return a StringRef equal to 'this', but with all characters not
+ /// satisfying the given predicate dropped from the beginning of the string.
+ LLVM_NODISCARD
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef drop_until(function_ref<bool(char)> F) const {
+ return substr(find_if(F));
+ }
+
+ /// Returns true if this StringRef has the given prefix and removes that
+ /// prefix.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool consume_front(StringRef Prefix) {
+ if (!startswith(Prefix))
+ return false;
+
+ *this = drop_front(Prefix.size());
+ return true;
+ }
+
+ /// Returns true if this StringRef has the given suffix and removes that
+ /// suffix.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool consume_back(StringRef Suffix) {
+ if (!endswith(Suffix))
+ return false;
+
+ *this = drop_back(Suffix.size());
+ return true;
+ }
+
/// Return a reference to the substring from [Start, End).
///
/// \param Start The index of the starting character in the substring; if
@@ -454,6 +694,7 @@ namespace llvm {
/// remaining in the string, the string suffix (starting with \p Start)
/// will be returned. If this is less than \p Start, an empty string will
/// be returned.
+ LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef slice(size_t Start, size_t End) const {
Start = std::min(Start, Length);
@@ -471,6 +712,7 @@ namespace llvm {
///
/// \param Separator The character to split on.
/// \returns The split substrings.
+ LLVM_NODISCARD
std::pair<StringRef, StringRef> split(char Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
@@ -488,6 +730,7 @@ namespace llvm {
///
/// \param Separator - The string to split on.
/// \return - The split substrings.
+ LLVM_NODISCARD
std::pair<StringRef, StringRef> split(StringRef Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
@@ -540,6 +783,7 @@ namespace llvm {
///
/// \param Separator - The character to split on.
/// \return - The split substrings.
+ LLVM_NODISCARD
std::pair<StringRef, StringRef> rsplit(char Separator) const {
size_t Idx = rfind(Separator);
if (Idx == npos)
@@ -549,36 +793,42 @@ namespace llvm {
/// Return string with consecutive \p Char characters starting from the
/// the left removed.
+ LLVM_NODISCARD
StringRef ltrim(char Char) const {
return drop_front(std::min(Length, find_first_not_of(Char)));
}
/// Return string with consecutive characters in \p Chars starting from
/// the left removed.
+ LLVM_NODISCARD
StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_front(std::min(Length, find_first_not_of(Chars)));
}
/// Return string with consecutive \p Char characters starting from the
/// right removed.
+ LLVM_NODISCARD
StringRef rtrim(char Char) const {
return drop_back(Length - std::min(Length, find_last_not_of(Char) + 1));
}
/// Return string with consecutive characters in \p Chars starting from
/// the right removed.
+ LLVM_NODISCARD
StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
}
/// Return string with consecutive \p Char characters starting from the
/// left and right removed.
+ LLVM_NODISCARD
StringRef trim(char Char) const {
return ltrim(Char).rtrim(Char);
}
/// Return string with consecutive characters in \p Chars starting from
/// the left and right removed.
+ LLVM_NODISCARD
StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
return ltrim(Chars).rtrim(Chars);
}
@@ -586,6 +836,28 @@ namespace llvm {
/// @}
};
+ /// A wrapper around a string literal that serves as a proxy for constructing
+ /// global tables of StringRefs with the length computed at compile time.
+ /// In order to avoid the invocation of a global constructor, StringLiteral
+ /// should *only* be used in a constexpr context, as such:
+ ///
+ /// constexpr StringLiteral S("test");
+ ///
+ class StringLiteral : public StringRef {
+ public:
+ template <size_t N>
+ constexpr StringLiteral(const char (&Str)[N])
+#if defined(__clang__) && __has_attribute(enable_if)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wgcc-compat"
+ __attribute((enable_if(__builtin_strlen(Str) == N - 1,
+ "invalid string literal")))
+#pragma clang diagnostic pop
+#endif
+ : StringRef(Str, N - 1) {
+ }
+ };
+
/// @name StringRef Comparison Operators
/// @{
@@ -595,9 +867,7 @@ namespace llvm {
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
- inline bool operator!=(StringRef LHS, StringRef RHS) {
- return !(LHS == RHS);
- }
+ inline bool operator!=(StringRef LHS, StringRef RHS) { return !(LHS == RHS); }
inline bool operator<(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) == -1;
@@ -622,6 +892,7 @@ namespace llvm {
/// @}
/// \brief Compute a hash_code for a StringRef.
+ LLVM_NODISCARD
hash_code hash_value(StringRef S);
// StringRefs can be treated like a POD type.
diff --git a/include/llvm/ADT/StringSwitch.h b/include/llvm/ADT/StringSwitch.h
index 42b0fc4bc441..75577b7738ba 100644
--- a/include/llvm/ADT/StringSwitch.h
+++ b/include/llvm/ADT/StringSwitch.h
@@ -53,104 +53,197 @@ public:
explicit StringSwitch(StringRef S)
: Str(S), Result(nullptr) { }
+ // StringSwitch is not copyable.
+ StringSwitch(const StringSwitch &) = delete;
+ void operator=(const StringSwitch &) = delete;
+
+ StringSwitch(StringSwitch &&other) {
+ *this = std::move(other);
+ }
+ StringSwitch &operator=(StringSwitch &&other) {
+ Str = other.Str;
+ Result = other.Result;
+ return *this;
+ }
+
+ ~StringSwitch() = default;
+
+ // Case-sensitive case matchers
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Case(const char (&S)[N], const T& Value) {
+ assert(N);
if (!Result && N-1 == Str.size() &&
- (std::memcmp(S, Str.data(), N-1) == 0)) {
+ (N == 1 || std::memcmp(S, Str.data(), N-1) == 0)) {
Result = &Value;
}
-
return *this;
}
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& EndsWith(const char (&S)[N], const T &Value) {
+ assert(N);
if (!Result && Str.size() >= N-1 &&
- std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0) {
+ (N == 1 || std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0)) {
Result = &Value;
}
-
return *this;
}
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& StartsWith(const char (&S)[N], const T &Value) {
+ assert(N);
if (!Result && Str.size() >= N-1 &&
- std::memcmp(S, Str.data(), N-1) == 0) {
+ (N == 1 || std::memcmp(S, Str.data(), N-1) == 0)) {
Result = &Value;
}
-
return *this;
}
template<unsigned N0, unsigned N1>
LLVM_ATTRIBUTE_ALWAYS_INLINE
- StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const T& Value) {
- if (!Result && (
- (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
- (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0))) {
- Result = &Value;
- }
-
- return *this;
+ return Case(S0, Value).Case(S1, Value);
}
template<unsigned N0, unsigned N1, unsigned N2>
LLVM_ATTRIBUTE_ALWAYS_INLINE
- StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const T& Value) {
- if (!Result && (
- (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
- (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
- (N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0))) {
- Result = &Value;
- }
-
- return *this;
+ return Case(S0, Value).Cases(S1, S2, Value);
}
template<unsigned N0, unsigned N1, unsigned N2, unsigned N3>
LLVM_ATTRIBUTE_ALWAYS_INLINE
- StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const T& Value) {
- if (!Result && (
- (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
- (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
- (N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0) ||
- (N3-1 == Str.size() && std::memcmp(S3, Str.data(), N3-1) == 0))) {
- Result = &Value;
- }
-
- return *this;
+ return Case(S0, Value).Cases(S1, S2, S3, Value);
}
template<unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4>
LLVM_ATTRIBUTE_ALWAYS_INLINE
- StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const T& Value) {
- if (!Result && (
- (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
- (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
- (N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0) ||
- (N3-1 == Str.size() && std::memcmp(S3, Str.data(), N3-1) == 0) ||
- (N4-1 == Str.size() && std::memcmp(S4, Str.data(), N4-1) == 0))) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+ unsigned N5>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const char (&S4)[N4], const char (&S5)[N5],
+ const T &Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+ unsigned N5, unsigned N6>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const char (&S4)[N4], const char (&S5)[N5],
+ const char (&S6)[N6], const T &Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+ unsigned N5, unsigned N6, unsigned N7>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const char (&S4)[N4], const char (&S5)[N5],
+ const char (&S6)[N6], const char (&S7)[N7],
+ const T &Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+ unsigned N5, unsigned N6, unsigned N7, unsigned N8>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const char (&S4)[N4], const char (&S5)[N5],
+ const char (&S6)[N6], const char (&S7)[N7],
+ const char (&S8)[N8], const T &Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+ unsigned N5, unsigned N6, unsigned N7, unsigned N8, unsigned N9>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const char (&S4)[N4], const char (&S5)[N5],
+ const char (&S6)[N6], const char (&S7)[N7],
+ const char (&S8)[N8], const char (&S9)[N9],
+ const T &Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, S9, Value);
+ }
+
+ // Case-insensitive case matchers.
+ template <unsigned N>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &CaseLower(const char (&S)[N],
+ const T &Value) {
+ if (!Result && Str.equals_lower(StringRef(S, N - 1)))
Result = &Value;
- }
return *this;
}
+ template <unsigned N>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &EndsWithLower(const char (&S)[N],
+ const T &Value) {
+ if (!Result && Str.endswith_lower(StringRef(S, N - 1)))
+ Result = &Value;
+
+ return *this;
+ }
+
+ template <unsigned N>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &StartsWithLower(const char (&S)[N],
+ const T &Value) {
+ if (!Result && Str.startswith_lower(StringRef(S, N - 1)))
+ Result = &Value;
+
+ return *this;
+ }
+ template <unsigned N0, unsigned N1>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
+ CasesLower(const char (&S0)[N0], const char (&S1)[N1], const T &Value) {
+ return CaseLower(S0, Value).CaseLower(S1, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
+ CasesLower(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2],
+ const T &Value) {
+ return CaseLower(S0, Value).CasesLower(S1, S2, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
+ CasesLower(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2],
+ const char (&S3)[N3], const T &Value) {
+ return CaseLower(S0, Value).CasesLower(S1, S2, S3, Value);
+ }
+
+ template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
+ CasesLower(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2],
+ const char (&S3)[N3], const char (&S4)[N4], const T &Value) {
+ return CaseLower(S0, Value).CasesLower(S1, S2, S3, S4, Value);
+ }
+
LLVM_ATTRIBUTE_ALWAYS_INLINE
- R Default(const T& Value) const {
+ R Default(const T &Value) const {
if (Result)
return *Result;
-
return Value;
}
diff --git a/include/llvm/ADT/TinyPtrVector.h b/include/llvm/ADT/TinyPtrVector.h
index 605f0e70a857..ca43b6046193 100644
--- a/include/llvm/ADT/TinyPtrVector.h
+++ b/include/llvm/ADT/TinyPtrVector.h
@@ -11,8 +11,13 @@
#define LLVM_ADT_TINYPTRVECTOR_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
namespace llvm {
@@ -25,15 +30,16 @@ namespace llvm {
template <typename EltTy>
class TinyPtrVector {
public:
- typedef llvm::SmallVector<EltTy, 4> VecTy;
+ typedef SmallVector<EltTy, 4> VecTy;
typedef typename VecTy::value_type value_type;
- typedef llvm::PointerUnion<EltTy, VecTy *> PtrUnion;
+ typedef PointerUnion<EltTy, VecTy *> PtrUnion;
private:
PtrUnion Val;
public:
- TinyPtrVector() {}
+ TinyPtrVector() = default;
+
~TinyPtrVector() {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
delete V;
@@ -43,6 +49,7 @@ public:
if (VecTy *V = Val.template dyn_cast<VecTy*>())
Val = new VecTy(*V);
}
+
TinyPtrVector &operator=(const TinyPtrVector &RHS) {
if (this == &RHS)
return *this;
@@ -74,6 +81,7 @@ public:
TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
RHS.Val = (EltTy)nullptr;
}
+
TinyPtrVector &operator=(TinyPtrVector &&RHS) {
if (this == &RHS)
return *this;
@@ -170,6 +178,7 @@ public:
return Val.template get<VecTy *>()->begin();
}
+
iterator end() {
if (Val.template is<EltTy>())
return begin() + (Val.isNull() ? 0 : 1);
@@ -187,9 +196,11 @@ public:
reverse_iterator rbegin() { return reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
+
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
+
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
@@ -329,6 +340,7 @@ public:
return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
}
};
+
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_TINYPTRVECTOR_H
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index b98f8407d075..d4130e1e85ae 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -64,11 +64,14 @@ public:
ppc64le, // PPC64LE: powerpc64le
r600, // R600: AMD GPUs HD2XXX - HD6XXX
amdgcn, // AMDGCN: AMD GCN GPUs
+ riscv32, // RISC-V (32-bit): riscv32
+ riscv64, // RISC-V (64-bit): riscv64
sparc, // Sparc: sparc
sparcv9, // Sparcv9: Sparcv9
sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
systemz, // SystemZ: s390x
tce, // TCE (http://tce.cs.tut.fi/): tce
+ tcele, // TCE little endian (http://tce.cs.tut.fi/): tcele
thumb, // Thumb (little endian): thumb, thumbv.*
thumbeb, // Thumb (big endian): thumbeb
x86, // X86: i[3-9]86
@@ -99,6 +102,7 @@ public:
ARMSubArch_v8_2a,
ARMSubArch_v8_1a,
ARMSubArch_v8,
+ ARMSubArch_v8r,
ARMSubArch_v8m_baseline,
ARMSubArch_v8m_mainline,
ARMSubArch_v7,
@@ -144,6 +148,7 @@ public:
Darwin,
DragonFly,
FreeBSD,
+ Fuchsia,
IOS,
KFreeBSD,
Linux,
@@ -168,7 +173,8 @@ public:
TvOS, // Apple tvOS
WatchOS, // Apple watchOS
Mesa3D,
- LastOSType = Mesa3D
+ Contiki,
+ LastOSType = Contiki
};
enum EnvironmentType {
UnknownEnvironment,
@@ -191,7 +197,8 @@ public:
Cygnus,
AMDOpenCL,
CoreCLR,
- LastEnvironmentType = CoreCLR
+ OpenCL,
+ LastEnvironmentType = OpenCL
};
enum ObjectFormatType {
UnknownObjectFormat,
@@ -461,6 +468,10 @@ public:
return getOS() == Triple::FreeBSD;
}
+ bool isOSFuchsia() const {
+ return getOS() == Triple::Fuchsia;
+ }
+
bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }
bool isOSSolaris() const {
@@ -482,6 +493,10 @@ public:
Env == Triple::GNUX32;
}
+ bool isOSContiki() const {
+ return getOS() == Triple::Contiki;
+ }
+
/// Checks if the environment could be MSVC.
bool isWindowsMSVCEnvironment() const {
return getOS() == Triple::Win32 &&
@@ -690,7 +705,7 @@ public:
/// @{
/// getArchTypeName - Get the canonical name for the \p Kind architecture.
- static const char *getArchTypeName(ArchType Kind);
+ static StringRef getArchTypeName(ArchType Kind);
/// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind
/// architecture. This is the prefix used by the architecture specific
@@ -698,17 +713,17 @@ public:
/// Intrinsic::getIntrinsicForGCCBuiltin().
///
/// \return - The architecture prefix, or 0 if none is defined.
- static const char *getArchTypePrefix(ArchType Kind);
+ static StringRef getArchTypePrefix(ArchType Kind);
/// getVendorTypeName - Get the canonical name for the \p Kind vendor.
- static const char *getVendorTypeName(VendorType Kind);
+ static StringRef getVendorTypeName(VendorType Kind);
/// getOSTypeName - Get the canonical name for the \p Kind operating system.
- static const char *getOSTypeName(OSType Kind);
+ static StringRef getOSTypeName(OSType Kind);
/// getEnvironmentTypeName - Get the canonical name for the \p Kind
/// environment.
- static const char *getEnvironmentTypeName(EnvironmentType Kind);
+ static StringRef getEnvironmentTypeName(EnvironmentType Kind);
/// @}
/// @name Static helpers for converting alternate architecture names.
diff --git a/include/llvm/ADT/Twine.h b/include/llvm/ADT/Twine.h
index 81b1a6d946fc..f5f00dcfafe5 100644
--- a/include/llvm/ADT/Twine.h
+++ b/include/llvm/ADT/Twine.h
@@ -12,12 +12,14 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
+#include <cstdint>
#include <string>
namespace llvm {
+
+ class formatv_object_base;
class raw_ostream;
/// Twine - A lightweight data structure for efficiently representing the
@@ -101,6 +103,9 @@ namespace llvm {
/// A pointer to a SmallString instance.
SmallStringKind,
+ /// A pointer to a formatv_object_base instance.
+ FormatvObjectKind,
+
/// A char value, to render as a character.
CharKind,
@@ -136,6 +141,7 @@ namespace llvm {
const std::string *stdString;
const StringRef *stringRef;
const SmallVectorImpl<char> *smallString;
+ const formatv_object_base *formatvObject;
char character;
unsigned int decUI;
int decI;
@@ -146,7 +152,6 @@ namespace llvm {
const uint64_t *uHex;
};
- private:
/// LHS - The prefix in the concatenation, which may be uninitialized for
/// Null or Empty kinds.
Child LHS;
@@ -158,7 +163,6 @@ namespace llvm {
/// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
NodeKind RHSKind;
- private:
/// Construct a nullary twine; the kind must be NullKind or EmptyKind.
explicit Twine(NodeKind Kind)
: LHSKind(Kind), RHSKind(EmptyKind) {
@@ -179,10 +183,6 @@ namespace llvm {
assert(isValid() && "Invalid twine!");
}
- /// Since the intended use of twines is as temporary objects, assignments
- /// when concatenating might cause undefined behavior or stack corruptions
- Twine &operator=(const Twine &Other) = delete;
-
/// Check for the null twine.
bool isNull() const {
return getLHSKind() == NullKind;
@@ -295,6 +295,13 @@ namespace llvm {
assert(isValid() && "Invalid twine!");
}
+ /// Construct from a formatv_object_base.
+ /*implicit*/ Twine(const formatv_object_base &Fmt)
+ : LHSKind(FormatvObjectKind), RHSKind(EmptyKind) {
+ LHS.formatvObject = &Fmt;
+ assert(isValid() && "Invalid twine!");
+ }
+
/// Construct from a char.
explicit Twine(char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
@@ -370,6 +377,10 @@ namespace llvm {
assert(isValid() && "Invalid twine!");
}
+ /// Since the intended use of twines is as temporary objects, assignments
+ /// when concatenating might cause undefined behavior or stack corruptions
+ Twine &operator=(const Twine &) = delete;
+
/// Create a 'null' string, which is an empty string that always
/// concatenates to form another empty string.
static Twine createNull() {
@@ -535,6 +546,7 @@ namespace llvm {
}
/// @}
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_ADT_TWINE_H
diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h
index 8e4d45dfef22..a788f811e4c6 100644
--- a/include/llvm/ADT/ilist.h
+++ b/include/llvm/ADT/ilist.h
@@ -11,459 +11,270 @@
// (i.e. each node of the list must contain a next and previous field for the
// list.
//
-// The ilist_traits trait class is used to gain access to the next and previous
-// fields of the node type that the list is instantiated with. If it is not
-// specialized, the list defaults to using the getPrev(), getNext() method calls
-// to get the next and previous pointers.
+// The ilist class itself should be a plug in replacement for list. This list
+// replacement does not provide a constant time size() method, so be careful to
+// use empty() when you really want to know if it's empty.
//
-// The ilist class itself, should be a plug in replacement for list, assuming
-// that the nodes contain next/prev pointers. This list replacement does not
-// provide a constant time size() method, so be careful to use empty() when you
-// really want to know if it's empty.
-//
-// The ilist class is implemented by allocating a 'tail' node when the list is
-// created (using ilist_traits<>::createSentinel()). This tail node is
-// absolutely required because the user must be able to compute end()-1. Because
-// of this, users of the direct next/prev links will see an extra link on the
-// end of the list, which should be ignored.
-//
-// Requirements for a user of this list:
-//
-// 1. The user must provide {g|s}et{Next|Prev} methods, or specialize
-// ilist_traits to provide an alternate way of getting and setting next and
-// prev links.
+// The ilist class is implemented as a circular list. The list itself contains
+// a sentinel node, whose Next points at begin() and whose Prev points at
+// rbegin(). The sentinel node itself serves as end() and rend().
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H
-#include "llvm/Support/Compiler.h"
-#include <algorithm>
+#include "llvm/ADT/simple_ilist.h"
#include <cassert>
#include <cstddef>
#include <iterator>
namespace llvm {
-template<typename NodeTy, typename Traits> class iplist;
-template<typename NodeTy> class ilist_iterator;
-
-/// ilist_nextprev_traits - A fragment for template traits for intrusive list
-/// that provides default next/prev implementations for common operations.
+/// Use delete by default for iplist and ilist.
///
-template<typename NodeTy>
-struct ilist_nextprev_traits {
- static NodeTy *getPrev(NodeTy *N) { return N->getPrev(); }
- static NodeTy *getNext(NodeTy *N) { return N->getNext(); }
- static const NodeTy *getPrev(const NodeTy *N) { return N->getPrev(); }
- static const NodeTy *getNext(const NodeTy *N) { return N->getNext(); }
-
- static void setPrev(NodeTy *N, NodeTy *Prev) { N->setPrev(Prev); }
- static void setNext(NodeTy *N, NodeTy *Next) { N->setNext(Next); }
+/// Specialize this to get different behaviour for ownership-related API. (If
+/// you really want ownership semantics, consider using std::list or building
+/// something like \a BumpPtrList.)
+///
+/// \see ilist_noalloc_traits
+template <typename NodeTy> struct ilist_alloc_traits {
+ static void deleteNode(NodeTy *V) { delete V; }
};
-template<typename NodeTy>
-struct ilist_traits;
-
-/// ilist_sentinel_traits - A fragment for template traits for intrusive list
-/// that provides default sentinel implementations for common operations.
+/// Custom traits to do nothing on deletion.
///
-/// ilist_sentinel_traits implements a lazy dynamic sentinel allocation
-/// strategy. The sentinel is stored in the prev field of ilist's Head.
+/// Specialize ilist_alloc_traits to inherit from this to disable the
+/// non-intrusive deletion in iplist (which implies ownership).
///
-template<typename NodeTy>
-struct ilist_sentinel_traits {
- /// createSentinel - create the dynamic sentinel
- static NodeTy *createSentinel() { return new NodeTy(); }
-
- /// destroySentinel - deallocate the dynamic sentinel
- static void destroySentinel(NodeTy *N) { delete N; }
-
- /// provideInitialHead - when constructing an ilist, provide a starting
- /// value for its Head
- /// @return null node to indicate that it needs to be allocated later
- static NodeTy *provideInitialHead() { return nullptr; }
-
- /// ensureHead - make sure that Head is either already
- /// initialized or assigned a fresh sentinel
- /// @return the sentinel
- static NodeTy *ensureHead(NodeTy *&Head) {
- if (!Head) {
- Head = ilist_traits<NodeTy>::createSentinel();
- ilist_traits<NodeTy>::noteHead(Head, Head);
- ilist_traits<NodeTy>::setNext(Head, nullptr);
- return Head;
- }
- return ilist_traits<NodeTy>::getPrev(Head);
- }
-
- /// noteHead - stash the sentinel into its default location
- static void noteHead(NodeTy *NewHead, NodeTy *Sentinel) {
- ilist_traits<NodeTy>::setPrev(NewHead, Sentinel);
- }
-};
-
-template <typename NodeTy> class ilist_half_node;
-template <typename NodeTy> class ilist_node;
-
-/// Traits with an embedded ilist_node as a sentinel.
+/// If you want purely intrusive semantics with no callbacks, consider using \a
+/// simple_ilist instead.
///
-/// FIXME: The downcast in createSentinel() is UB.
-template <typename NodeTy> struct ilist_embedded_sentinel_traits {
- /// Get hold of the node that marks the end of the list.
- NodeTy *createSentinel() const {
- // Since i(p)lists always publicly derive from their corresponding traits,
- // placing a data member in this class will augment the i(p)list. But since
- // the NodeTy is expected to be publicly derive from ilist_node<NodeTy>,
- // there is a legal viable downcast from it to NodeTy. We use this trick to
- // superimpose an i(p)list with a "ghostly" NodeTy, which becomes the
- // sentinel. Dereferencing the sentinel is forbidden (save the
- // ilist_node<NodeTy>), so no one will ever notice the superposition.
- return static_cast<NodeTy *>(&Sentinel);
- }
- static void destroySentinel(NodeTy *) {}
-
- NodeTy *provideInitialHead() const { return createSentinel(); }
- NodeTy *ensureHead(NodeTy *) const { return createSentinel(); }
- static void noteHead(NodeTy *, NodeTy *) {}
-
-private:
- mutable ilist_node<NodeTy> Sentinel;
+/// \code
+/// template <>
+/// struct ilist_alloc_traits<MyType> : ilist_noalloc_traits<MyType> {};
+/// \endcode
+template <typename NodeTy> struct ilist_noalloc_traits {
+ static void deleteNode(NodeTy *V) {}
};
-/// Trait with an embedded ilist_half_node as a sentinel.
+/// Callbacks do nothing by default in iplist and ilist.
///
-/// FIXME: The downcast in createSentinel() is UB.
-template <typename NodeTy> struct ilist_half_embedded_sentinel_traits {
- /// Get hold of the node that marks the end of the list.
- NodeTy *createSentinel() const {
- // See comment in ilist_embedded_sentinel_traits::createSentinel().
- return static_cast<NodeTy *>(&Sentinel);
- }
- static void destroySentinel(NodeTy *) {}
-
- NodeTy *provideInitialHead() const { return createSentinel(); }
- NodeTy *ensureHead(NodeTy *) const { return createSentinel(); }
- static void noteHead(NodeTy *, NodeTy *) {}
+/// Specialize this for to use callbacks for when nodes change their list
+/// membership.
+template <typename NodeTy> struct ilist_callback_traits {
+ void addNodeToList(NodeTy *) {}
+ void removeNodeFromList(NodeTy *) {}
-private:
- mutable ilist_half_node<NodeTy> Sentinel;
+ /// Callback before transferring nodes to this list.
+ ///
+ /// \pre \c this!=&OldList
+ template <class Iterator>
+ void transferNodesFromList(ilist_callback_traits &OldList, Iterator /*first*/,
+ Iterator /*last*/) {
+ (void)OldList;
+ }
};
-/// ilist_node_traits - A fragment for template traits for intrusive list
-/// that provides default node related operations.
+/// A fragment for template traits for intrusive list that provides default
+/// node related operations.
///
-template<typename NodeTy>
-struct ilist_node_traits {
- static NodeTy *createNode(const NodeTy &V) { return new NodeTy(V); }
- static void deleteNode(NodeTy *V) { delete V; }
-
- void addNodeToList(NodeTy *) {}
- void removeNodeFromList(NodeTy *) {}
- void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
- ilist_iterator<NodeTy> /*first*/,
- ilist_iterator<NodeTy> /*last*/) {}
-};
+/// TODO: Remove this layer of indirection. It's not necessary.
+template <typename NodeTy>
+struct ilist_node_traits : ilist_alloc_traits<NodeTy>,
+ ilist_callback_traits<NodeTy> {};
-/// ilist_default_traits - Default template traits for intrusive list.
-/// By inheriting from this, you can easily use default implementations
-/// for all common operations.
+/// Default template traits for intrusive list.
///
-template<typename NodeTy>
-struct ilist_default_traits : public ilist_nextprev_traits<NodeTy>,
- public ilist_sentinel_traits<NodeTy>,
- public ilist_node_traits<NodeTy> {
-};
+/// By inheriting from this, you can easily use default implementations for all
+/// common operations.
+///
+/// TODO: Remove this customization point. Specializing ilist_traits is
+/// already fully general.
+template <typename NodeTy>
+struct ilist_default_traits : public ilist_node_traits<NodeTy> {};
-// Template traits for intrusive list. By specializing this template class, you
-// can change what next/prev fields are used to store the links...
-template<typename NodeTy>
+/// Template traits for intrusive list.
+///
+/// Customize callbacks and allocation semantics.
+template <typename NodeTy>
struct ilist_traits : public ilist_default_traits<NodeTy> {};
-// Const traits are the same as nonconst traits...
-template<typename Ty>
-struct ilist_traits<const Ty> : public ilist_traits<Ty> {};
+/// Const traits should never be instantiated.
+template <typename Ty> struct ilist_traits<const Ty> {};
-//===----------------------------------------------------------------------===//
-// Iterator for intrusive list.
-//
-template <typename NodeTy>
-class ilist_iterator
- : public std::iterator<std::bidirectional_iterator_tag, NodeTy, ptrdiff_t> {
-public:
- typedef ilist_traits<NodeTy> Traits;
- typedef std::iterator<std::bidirectional_iterator_tag, NodeTy, ptrdiff_t>
- super;
+namespace ilist_detail {
- typedef typename super::value_type value_type;
- typedef typename super::difference_type difference_type;
- typedef typename super::pointer pointer;
- typedef typename super::reference reference;
+template <class T> T &make();
-private:
- pointer NodePtr;
+/// Type trait to check for a traits class that has a getNext member (as a
+/// canary for any of the ilist_nextprev_traits API).
+template <class TraitsT, class NodeT> struct HasGetNext {
+ typedef char Yes[1];
+ typedef char No[2];
+ template <size_t N> struct SFINAE {};
-public:
- explicit ilist_iterator(pointer NP) : NodePtr(NP) {}
- explicit ilist_iterator(reference NR) : NodePtr(&NR) {}
- ilist_iterator() : NodePtr(nullptr) {}
-
- // This is templated so that we can allow constructing a const iterator from
- // a nonconst iterator...
- template <class node_ty>
- ilist_iterator(const ilist_iterator<node_ty> &RHS)
- : NodePtr(RHS.getNodePtrUnchecked()) {}
-
- // This is templated so that we can allow assigning to a const iterator from
- // a nonconst iterator...
- template <class node_ty>
- const ilist_iterator &operator=(const ilist_iterator<node_ty> &RHS) {
- NodePtr = RHS.getNodePtrUnchecked();
- return *this;
- }
-
- void reset(pointer NP) { NodePtr = NP; }
+ template <class U>
+ static Yes &test(U *I, decltype(I->getNext(&make<NodeT>())) * = 0);
+ template <class> static No &test(...);
- // Accessors...
- explicit operator pointer() const { return NodePtr; }
- reference operator*() const { return *NodePtr; }
- pointer operator->() const { return &operator*(); }
+public:
+ static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
- // Comparison operators
- template <class Y> bool operator==(const ilist_iterator<Y> &RHS) const {
- return NodePtr == RHS.getNodePtrUnchecked();
- }
- template <class Y> bool operator!=(const ilist_iterator<Y> &RHS) const {
- return NodePtr != RHS.getNodePtrUnchecked();
- }
+/// Type trait to check for a traits class that has a createSentinel member (as
+/// a canary for any of the ilist_sentinel_traits API).
+template <class TraitsT> struct HasCreateSentinel {
+ typedef char Yes[1];
+ typedef char No[2];
- // Increment and decrement operators...
- ilist_iterator &operator--() {
- NodePtr = Traits::getPrev(NodePtr);
- assert(NodePtr && "--'d off the beginning of an ilist!");
- return *this;
- }
- ilist_iterator &operator++() {
- NodePtr = Traits::getNext(NodePtr);
- return *this;
- }
- ilist_iterator operator--(int) {
- ilist_iterator tmp = *this;
- --*this;
- return tmp;
- }
- ilist_iterator operator++(int) {
- ilist_iterator tmp = *this;
- ++*this;
- return tmp;
- }
+ template <class U>
+ static Yes &test(U *I, decltype(I->createSentinel()) * = 0);
+ template <class> static No &test(...);
- // Internal interface, do not use...
- pointer getNodePtrUnchecked() const { return NodePtr; }
+public:
+ static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
};
-// Allow ilist_iterators to convert into pointers to a node automatically when
-// used by the dyn_cast, cast, isa mechanisms...
-
-template<typename From> struct simplify_type;
+/// Type trait to check for a traits class that has a createNode member.
+/// Allocation should be managed in a wrapper class, instead of in
+/// ilist_traits.
+template <class TraitsT, class NodeT> struct HasCreateNode {
+ typedef char Yes[1];
+ typedef char No[2];
+ template <size_t N> struct SFINAE {};
-template<typename NodeTy> struct simplify_type<ilist_iterator<NodeTy> > {
- typedef NodeTy* SimpleType;
+ template <class U>
+ static Yes &test(U *I, decltype(I->createNode(make<NodeT>())) * = 0);
+ template <class> static No &test(...);
- static SimpleType getSimplifiedValue(ilist_iterator<NodeTy> &Node) {
- return &*Node;
- }
+public:
+ static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
};
-template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > {
- typedef /*const*/ NodeTy* SimpleType;
- static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) {
- return &*Node;
- }
+template <class TraitsT, class NodeT> struct HasObsoleteCustomization {
+ static const bool value = HasGetNext<TraitsT, NodeT>::value ||
+ HasCreateSentinel<TraitsT>::value ||
+ HasCreateNode<TraitsT, NodeT>::value;
};
+} // end namespace ilist_detail
//===----------------------------------------------------------------------===//
//
-/// iplist - The subset of list functionality that can safely be used on nodes
-/// of polymorphic types, i.e. a heterogeneous list with a common base class that
-/// holds the next/prev pointers. The only state of the list itself is a single
-/// pointer to the head of the list.
+/// A wrapper around an intrusive list with callbacks and non-intrusive
+/// ownership.
///
-/// This list can be in one of three interesting states:
-/// 1. The list may be completely unconstructed. In this case, the head
-/// pointer is null. When in this form, any query for an iterator (e.g.
-/// begin() or end()) causes the list to transparently change to state #2.
-/// 2. The list may be empty, but contain a sentinel for the end iterator. This
-/// sentinel is created by the Traits::createSentinel method and is a link
-/// in the list. When the list is empty, the pointer in the iplist points
-/// to the sentinel. Once the sentinel is constructed, it
-/// is not destroyed until the list is.
-/// 3. The list may contain actual objects in it, which are stored as a doubly
-/// linked list of nodes. One invariant of the list is that the predecessor
-/// of the first node in the list always points to the last node in the list,
-/// and the successor pointer for the sentinel (which always stays at the
-/// end of the list) is always null.
+/// This wraps a purely intrusive list (like simple_ilist) with a configurable
+/// traits class. The traits can implement callbacks and customize the
+/// ownership semantics.
///
-template<typename NodeTy, typename Traits=ilist_traits<NodeTy> >
-class iplist : public Traits {
- mutable NodeTy *Head;
-
- // Use the prev node pointer of 'head' as the tail pointer. This is really a
- // circularly linked list where we snip the 'next' link from the sentinel node
- // back to the first node in the list (to preserve assertions about going off
- // the end of the list).
- NodeTy *getTail() { return this->ensureHead(Head); }
- const NodeTy *getTail() const { return this->ensureHead(Head); }
- void setTail(NodeTy *N) const { this->noteHead(Head, N); }
-
- /// CreateLazySentinel - This method verifies whether the sentinel for the
- /// list has been created and lazily makes it if not.
- void CreateLazySentinel() const {
- this->ensureHead(Head);
- }
+/// This is a subset of ilist functionality that can safely be used on nodes of
+/// polymorphic types, i.e. a heterogeneous list with a common base class that
+/// holds the next/prev pointers. The only state of the list itself is an
+/// ilist_sentinel, which holds pointers to the first and last nodes in the
+/// list.
+template <class IntrusiveListT, class TraitsT>
+class iplist_impl : public TraitsT, IntrusiveListT {
+ typedef IntrusiveListT base_list_type;
+
+protected:
+ typedef iplist_impl iplist_impl_type;
+
+public:
+ typedef typename base_list_type::pointer pointer;
+ typedef typename base_list_type::const_pointer const_pointer;
+ typedef typename base_list_type::reference reference;
+ typedef typename base_list_type::const_reference const_reference;
+ typedef typename base_list_type::value_type value_type;
+ typedef typename base_list_type::size_type size_type;
+ typedef typename base_list_type::difference_type difference_type;
+ typedef typename base_list_type::iterator iterator;
+ typedef typename base_list_type::const_iterator const_iterator;
+ typedef typename base_list_type::reverse_iterator reverse_iterator;
+ typedef
+ typename base_list_type::const_reverse_iterator const_reverse_iterator;
- static bool op_less(NodeTy &L, NodeTy &R) { return L < R; }
- static bool op_equal(NodeTy &L, NodeTy &R) { return L == R; }
+private:
+ // TODO: Drop this assertion and the transitive type traits anytime after
+ // v4.0 is branched (i.e,. keep them for one release to help out-of-tree code
+ // update).
+ static_assert(
+ !ilist_detail::HasObsoleteCustomization<TraitsT, value_type>::value,
+ "ilist customization points have changed!");
- // No fundamental reason why iplist can't be copyable, but the default
- // copy/copy-assign won't do.
- iplist(const iplist &) = delete;
- void operator=(const iplist &) = delete;
+ static bool op_less(const_reference L, const_reference R) { return L < R; }
+ static bool op_equal(const_reference L, const_reference R) { return L == R; }
public:
- typedef NodeTy *pointer;
- typedef const NodeTy *const_pointer;
- typedef NodeTy &reference;
- typedef const NodeTy &const_reference;
- typedef NodeTy value_type;
- typedef ilist_iterator<NodeTy> iterator;
- typedef ilist_iterator<const NodeTy> const_iterator;
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
-
- iplist() : Head(this->provideInitialHead()) {}
- ~iplist() {
- if (!Head) return;
- clear();
- Traits::destroySentinel(getTail());
- }
+ iplist_impl() = default;
- // Iterator creation methods.
- iterator begin() {
- CreateLazySentinel();
- return iterator(Head);
- }
- const_iterator begin() const {
- CreateLazySentinel();
- return const_iterator(Head);
- }
- iterator end() {
- CreateLazySentinel();
- return iterator(getTail());
- }
- const_iterator end() const {
- CreateLazySentinel();
- return const_iterator(getTail());
- }
+ iplist_impl(const iplist_impl &) = delete;
+ iplist_impl &operator=(const iplist_impl &) = delete;
- // reverse iterator creation methods.
- reverse_iterator rbegin() { return reverse_iterator(end()); }
- const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
- reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+ iplist_impl(iplist_impl &&X)
+ : TraitsT(std::move(X)), IntrusiveListT(std::move(X)) {}
+ iplist_impl &operator=(iplist_impl &&X) {
+ *static_cast<TraitsT *>(this) = std::move(X);
+ *static_cast<IntrusiveListT *>(this) = std::move(X);
+ return *this;
+ }
+ ~iplist_impl() { clear(); }
// Miscellaneous inspection routines.
size_type max_size() const { return size_type(-1); }
- bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
- return !Head || Head == getTail();
- }
- // Front and back accessor functions...
- reference front() {
- assert(!empty() && "Called front() on empty list!");
- return *Head;
- }
- const_reference front() const {
- assert(!empty() && "Called front() on empty list!");
- return *Head;
- }
- reference back() {
- assert(!empty() && "Called back() on empty list!");
- return *this->getPrev(getTail());
- }
- const_reference back() const {
- assert(!empty() && "Called back() on empty list!");
- return *this->getPrev(getTail());
- }
+ using base_list_type::begin;
+ using base_list_type::end;
+ using base_list_type::rbegin;
+ using base_list_type::rend;
+ using base_list_type::empty;
+ using base_list_type::front;
+ using base_list_type::back;
- void swap(iplist &RHS) {
+ void swap(iplist_impl &RHS) {
assert(0 && "Swap does not use list traits callback correctly yet!");
- std::swap(Head, RHS.Head);
+ base_list_type::swap(RHS);
}
- iterator insert(iterator where, NodeTy *New) {
- NodeTy *CurNode = where.getNodePtrUnchecked();
- NodeTy *PrevNode = this->getPrev(CurNode);
- this->setNext(New, CurNode);
- this->setPrev(New, PrevNode);
-
- if (CurNode != Head) // Is PrevNode off the beginning of the list?
- this->setNext(PrevNode, New);
- else
- Head = New;
- this->setPrev(CurNode, New);
-
- this->addNodeToList(New); // Notify traits that we added a node...
- return iterator(New);
+ iterator insert(iterator where, pointer New) {
+ this->addNodeToList(New); // Notify traits that we added a node...
+ return base_list_type::insert(where, *New);
}
- iterator insert(iterator where, const NodeTy &New) {
- return this->insert(where, new NodeTy(New));
+ iterator insert(iterator where, const_reference New) {
+ return this->insert(where, new value_type(New));
}
- iterator insertAfter(iterator where, NodeTy *New) {
+ iterator insertAfter(iterator where, pointer New) {
if (empty())
return insert(begin(), New);
else
return insert(++where, New);
}
- NodeTy *remove(iterator &IT) {
- assert(IT != end() && "Cannot remove end of list!");
- NodeTy *Node = &*IT;
- NodeTy *NextNode = this->getNext(Node);
- NodeTy *PrevNode = this->getPrev(Node);
+ /// Clone another list.
+ template <class Cloner> void cloneFrom(const iplist_impl &L2, Cloner clone) {
+ clear();
+ for (const_reference V : L2)
+ push_back(clone(V));
+ }
- if (Node != Head) // Is PrevNode off the beginning of the list?
- this->setNext(PrevNode, NextNode);
- else
- Head = NextNode;
- this->setPrev(NextNode, PrevNode);
- IT.reset(NextNode);
- this->removeNodeFromList(Node); // Notify traits that we removed a node...
-
- // Set the next/prev pointers of the current node to null. This isn't
- // strictly required, but this catches errors where a node is removed from
- // an ilist (and potentially deleted) with iterators still pointing at it.
- // When those iterators are incremented or decremented, they will assert on
- // the null next/prev pointer instead of "usually working".
- this->setNext(Node, nullptr);
- this->setPrev(Node, nullptr);
+ pointer remove(iterator &IT) {
+ pointer Node = &*IT++;
+ this->removeNodeFromList(Node); // Notify traits that we removed a node...
+ base_list_type::remove(*Node);
return Node;
}
- NodeTy *remove(const iterator &IT) {
+ pointer remove(const iterator &IT) {
iterator MutIt = IT;
return remove(MutIt);
}
- NodeTy *remove(NodeTy *IT) { return remove(iterator(IT)); }
- NodeTy *remove(NodeTy &IT) { return remove(iterator(IT)); }
+ pointer remove(pointer IT) { return remove(iterator(IT)); }
+ pointer remove(reference IT) { return remove(iterator(IT)); }
// erase - remove a node from the controlled sequence... and delete it.
iterator erase(iterator where) {
@@ -471,82 +282,36 @@ public:
return where;
}
- iterator erase(NodeTy *IT) { return erase(iterator(IT)); }
- iterator erase(NodeTy &IT) { return erase(iterator(IT)); }
+ iterator erase(pointer IT) { return erase(iterator(IT)); }
+ iterator erase(reference IT) { return erase(iterator(IT)); }
/// Remove all nodes from the list like clear(), but do not call
/// removeNodeFromList() or deleteNode().
///
/// This should only be used immediately before freeing nodes in bulk to
/// avoid traversing the list and bringing all the nodes into cache.
- void clearAndLeakNodesUnsafely() {
- if (Head) {
- Head = getTail();
- this->setPrev(Head, Head);
- }
- }
+ void clearAndLeakNodesUnsafely() { base_list_type::clear(); }
private:
// transfer - The heart of the splice function. Move linked list nodes from
// [first, last) into position.
//
- void transfer(iterator position, iplist &L2, iterator first, iterator last) {
- assert(first != last && "Should be checked by callers");
- // Position cannot be contained in the range to be transferred.
- // Check for the most common mistake.
- assert(position != first &&
- "Insertion point can't be one of the transferred nodes");
-
- if (position != last) {
- // Note: we have to be careful about the case when we move the first node
- // in the list. This node is the list sentinel node and we can't move it.
- NodeTy *ThisSentinel = getTail();
- setTail(nullptr);
- NodeTy *L2Sentinel = L2.getTail();
- L2.setTail(nullptr);
-
- // Remove [first, last) from its old position.
- NodeTy *First = &*first, *Prev = this->getPrev(First);
- NodeTy *Next = last.getNodePtrUnchecked(), *Last = this->getPrev(Next);
- if (Prev)
- this->setNext(Prev, Next);
- else
- L2.Head = Next;
- this->setPrev(Next, Prev);
-
- // Splice [first, last) into its new position.
- NodeTy *PosNext = position.getNodePtrUnchecked();
- NodeTy *PosPrev = this->getPrev(PosNext);
-
- // Fix head of list...
- if (PosPrev)
- this->setNext(PosPrev, First);
- else
- Head = First;
- this->setPrev(First, PosPrev);
-
- // Fix end of list...
- this->setNext(Last, PosNext);
- this->setPrev(PosNext, Last);
-
- this->transferNodesFromList(L2, iterator(First), iterator(PosNext));
-
- // Now that everything is set, restore the pointers to the list sentinels.
- L2.setTail(L2Sentinel);
- setTail(ThisSentinel);
- }
+ void transfer(iterator position, iplist_impl &L2, iterator first, iterator last) {
+ if (position == last)
+ return;
+
+ if (this != &L2) // Notify traits we moved the nodes...
+ this->transferNodesFromList(L2, first, last);
+
+ base_list_type::splice(position, L2, first, last);
}
public:
-
//===----------------------------------------------------------------------===
// Functionality derived from other functions defined above...
//
- size_type LLVM_ATTRIBUTE_UNUSED_RESULT size() const {
- if (!Head) return 0; // Don't require construction of sentinel if empty.
- return std::distance(begin(), end());
- }
+ using base_list_type::size;
iterator erase(iterator first, iterator last) {
while (first != last)
@@ -554,11 +319,11 @@ public:
return last;
}
- void clear() { if (Head) erase(begin(), end()); }
+ void clear() { erase(begin(), end()); }
// Front and back inserters...
- void push_front(NodeTy *val) { insert(begin(), val); }
- void push_back(NodeTy *val) { insert(end(), val); }
+ void push_front(pointer val) { insert(begin(), val); }
+ void push_back(pointer val) { insert(end(), val); }
void pop_front() {
assert(!empty() && "pop_front() on empty list!");
erase(begin());
@@ -574,179 +339,96 @@ public:
}
// Splice members - defined in terms of transfer...
- void splice(iterator where, iplist &L2) {
+ void splice(iterator where, iplist_impl &L2) {
if (!L2.empty())
transfer(where, L2, L2.begin(), L2.end());
}
- void splice(iterator where, iplist &L2, iterator first) {
+ void splice(iterator where, iplist_impl &L2, iterator first) {
iterator last = first; ++last;
if (where == first || where == last) return; // No change
transfer(where, L2, first, last);
}
- void splice(iterator where, iplist &L2, iterator first, iterator last) {
+ void splice(iterator where, iplist_impl &L2, iterator first, iterator last) {
if (first != last) transfer(where, L2, first, last);
}
- void splice(iterator where, iplist &L2, NodeTy &N) {
+ void splice(iterator where, iplist_impl &L2, reference N) {
splice(where, L2, iterator(N));
}
- void splice(iterator where, iplist &L2, NodeTy *N) {
+ void splice(iterator where, iplist_impl &L2, pointer N) {
splice(where, L2, iterator(N));
}
template <class Compare>
- void merge(iplist &Right, Compare comp) {
+ void merge(iplist_impl &Right, Compare comp) {
if (this == &Right)
return;
- iterator First1 = begin(), Last1 = end();
- iterator First2 = Right.begin(), Last2 = Right.end();
- while (First1 != Last1 && First2 != Last2) {
- if (comp(*First2, *First1)) {
- iterator Next = First2;
- transfer(First1, Right, First2, ++Next);
- First2 = Next;
- } else {
- ++First1;
- }
- }
- if (First2 != Last2)
- transfer(Last1, Right, First2, Last2);
+ this->transferNodesFromList(Right, Right.begin(), Right.end());
+ base_list_type::merge(Right, comp);
}
- void merge(iplist &Right) { return merge(Right, op_less); }
+ void merge(iplist_impl &Right) { return merge(Right, op_less); }
- template <class Compare>
- void sort(Compare comp) {
- // The list is empty, vacuously sorted.
- if (empty())
- return;
- // The list has a single element, vacuously sorted.
- if (std::next(begin()) == end())
- return;
- // Find the split point for the list.
- iterator Center = begin(), End = begin();
- while (End != end() && std::next(End) != end()) {
- Center = std::next(Center);
- End = std::next(std::next(End));
- }
- // Split the list into two.
- iplist RightHalf;
- RightHalf.splice(RightHalf.begin(), *this, Center, end());
-
- // Sort the two sublists.
- sort(comp);
- RightHalf.sort(comp);
-
- // Merge the two sublists back together.
- merge(RightHalf, comp);
- }
- void sort() { sort(op_less); }
+ using base_list_type::sort;
/// \brief Get the previous node, or \c nullptr for the list head.
- NodeTy *getPrevNode(NodeTy &N) const {
+ pointer getPrevNode(reference N) const {
auto I = N.getIterator();
if (I == begin())
return nullptr;
return &*std::prev(I);
}
/// \brief Get the previous node, or \c nullptr for the list head.
- const NodeTy *getPrevNode(const NodeTy &N) const {
- return getPrevNode(const_cast<NodeTy &>(N));
+ const_pointer getPrevNode(const_reference N) const {
+ return getPrevNode(const_cast<reference >(N));
}
/// \brief Get the next node, or \c nullptr for the list tail.
- NodeTy *getNextNode(NodeTy &N) const {
+ pointer getNextNode(reference N) const {
auto Next = std::next(N.getIterator());
if (Next == end())
return nullptr;
return &*Next;
}
/// \brief Get the next node, or \c nullptr for the list tail.
- const NodeTy *getNextNode(const NodeTy &N) const {
- return getNextNode(const_cast<NodeTy &>(N));
+ const_pointer getNextNode(const_reference N) const {
+ return getNextNode(const_cast<reference >(N));
}
};
+/// An intrusive list with ownership and callbacks specified/controlled by
+/// ilist_traits, only with API safe for polymorphic types.
+///
+/// The \p Options parameters are the same as those for \a simple_ilist. See
+/// there for a description of what's available.
+template <class T, class... Options>
+class iplist
+ : public iplist_impl<simple_ilist<T, Options...>, ilist_traits<T>> {
+ typedef typename iplist::iplist_impl_type iplist_impl_type;
-template<typename NodeTy>
-struct ilist : public iplist<NodeTy> {
- typedef typename iplist<NodeTy>::size_type size_type;
- typedef typename iplist<NodeTy>::iterator iterator;
-
- ilist() {}
- ilist(const ilist &right) : iplist<NodeTy>() {
- insert(this->begin(), right.begin(), right.end());
- }
- explicit ilist(size_type count) {
- insert(this->begin(), count, NodeTy());
- }
- ilist(size_type count, const NodeTy &val) {
- insert(this->begin(), count, val);
- }
- template<class InIt> ilist(InIt first, InIt last) {
- insert(this->begin(), first, last);
- }
-
- // bring hidden functions into scope
- using iplist<NodeTy>::insert;
- using iplist<NodeTy>::push_front;
- using iplist<NodeTy>::push_back;
-
- // Main implementation here - Insert for a node passed by value...
- iterator insert(iterator where, const NodeTy &val) {
- return insert(where, this->createNode(val));
- }
-
-
- // Front and back inserters...
- void push_front(const NodeTy &val) { insert(this->begin(), val); }
- void push_back(const NodeTy &val) { insert(this->end(), val); }
-
- void insert(iterator where, size_type count, const NodeTy &val) {
- for (; count != 0; --count) insert(where, val);
- }
-
- // Assign special forms...
- void assign(size_type count, const NodeTy &val) {
- iterator I = this->begin();
- for (; I != this->end() && count != 0; ++I, --count)
- *I = val;
- if (count != 0)
- insert(this->end(), val, val);
- else
- erase(I, this->end());
- }
- template<class InIt> void assign(InIt first1, InIt last1) {
- iterator first2 = this->begin(), last2 = this->end();
- for ( ; first1 != last1 && first2 != last2; ++first1, ++first2)
- *first1 = *first2;
- if (first2 == last2)
- erase(first1, last1);
- else
- insert(last1, first2, last2);
- }
-
+public:
+ iplist() = default;
- // Resize members...
- void resize(size_type newsize, NodeTy val) {
- iterator i = this->begin();
- size_type len = 0;
- for ( ; i != this->end() && len < newsize; ++i, ++len) /* empty*/ ;
+ iplist(const iplist &X) = delete;
+ iplist &operator=(const iplist &X) = delete;
- if (len == newsize)
- erase(i, this->end());
- else // i == end()
- insert(this->end(), newsize - len, val);
+ iplist(iplist &&X) : iplist_impl_type(std::move(X)) {}
+ iplist &operator=(iplist &&X) {
+ *static_cast<iplist_impl_type *>(this) = std::move(X);
+ return *this;
}
- void resize(size_type newsize) { resize(newsize, NodeTy()); }
};
-} // End llvm namespace
+template <class T, class... Options> using ilist = iplist<T, Options...>;
+
+} // end namespace llvm
namespace std {
+
// Ensure that swap uses the fast list swap...
template<class Ty>
void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
Left.swap(Right);
}
-} // End 'std' extensions...
+
+} // end namespace std
#endif // LLVM_ADT_ILIST_H
diff --git a/include/llvm/ADT/ilist_base.h b/include/llvm/ADT/ilist_base.h
new file mode 100644
index 000000000000..1ffc864bea2f
--- /dev/null
+++ b/include/llvm/ADT/ilist_base.h
@@ -0,0 +1,95 @@
+//===- llvm/ADT/ilist_base.h - Intrusive List Base ---------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_BASE_H
+#define LLVM_ADT_ILIST_BASE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include <cassert>
+#include <cstddef>
+#include <type_traits>
+
+namespace llvm {
+
+/// Implementations of list algorithms using ilist_node_base.
+template <bool EnableSentinelTracking> class ilist_base {
+public:
+ typedef ilist_node_base<EnableSentinelTracking> node_base_type;
+
+ static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
+ node_base_type &Prev = *Next.getPrev();
+ N.setNext(&Next);
+ N.setPrev(&Prev);
+ Prev.setNext(&N);
+ Next.setPrev(&N);
+ }
+
+ static void removeImpl(node_base_type &N) {
+ node_base_type *Prev = N.getPrev();
+ node_base_type *Next = N.getNext();
+ Next->setPrev(Prev);
+ Prev->setNext(Next);
+
+ // Not strictly necessary, but helps catch a class of bugs.
+ N.setPrev(nullptr);
+ N.setNext(nullptr);
+ }
+
+ static void removeRangeImpl(node_base_type &First, node_base_type &Last) {
+ node_base_type *Prev = First.getPrev();
+ node_base_type *Final = Last.getPrev();
+ Last.setPrev(Prev);
+ Prev->setNext(&Last);
+
+ // Not strictly necessary, but helps catch a class of bugs.
+ First.setPrev(nullptr);
+ Final->setNext(nullptr);
+ }
+
+ static void transferBeforeImpl(node_base_type &Next, node_base_type &First,
+ node_base_type &Last) {
+ if (&Next == &Last || &First == &Last)
+ return;
+
+ // Position cannot be contained in the range to be transferred.
+ assert(&Next != &First &&
+ // Check for the most common mistake.
+ "Insertion point can't be one of the transferred nodes");
+
+ node_base_type &Final = *Last.getPrev();
+
+ // Detach from old list/position.
+ First.getPrev()->setNext(&Last);
+ Last.setPrev(First.getPrev());
+
+ // Splice [First, Final] into its new list/position.
+ node_base_type &Prev = *Next.getPrev();
+ Final.setNext(&Next);
+ First.setPrev(&Prev);
+ Prev.setNext(&First);
+ Next.setPrev(&Final);
+ }
+
+ template <class T> static void insertBefore(T &Next, T &N) {
+ insertBeforeImpl(Next, N);
+ }
+
+ template <class T> static void remove(T &N) { removeImpl(N); }
+ template <class T> static void removeRange(T &First, T &Last) {
+ removeRangeImpl(First, Last);
+ }
+
+ template <class T> static void transferBefore(T &Next, T &First, T &Last) {
+ transferBeforeImpl(Next, First, Last);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_BASE_H
diff --git a/include/llvm/ADT/ilist_iterator.h b/include/llvm/ADT/ilist_iterator.h
new file mode 100644
index 000000000000..ef532d2cf172
--- /dev/null
+++ b/include/llvm/ADT/ilist_iterator.h
@@ -0,0 +1,185 @@
+//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator -------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_ITERATOR_H
+#define LLVM_ADT_ILIST_ITERATOR_H
+
+#include "llvm/ADT/ilist_node.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+namespace ilist_detail {
+
+/// Find const-correct node types.
+template <class OptionsT, bool IsConst> struct IteratorTraits;
+template <class OptionsT> struct IteratorTraits<OptionsT, false> {
+ typedef typename OptionsT::value_type value_type;
+ typedef typename OptionsT::pointer pointer;
+ typedef typename OptionsT::reference reference;
+ typedef ilist_node_impl<OptionsT> *node_pointer;
+ typedef ilist_node_impl<OptionsT> &node_reference;
+};
+template <class OptionsT> struct IteratorTraits<OptionsT, true> {
+ typedef const typename OptionsT::value_type value_type;
+ typedef typename OptionsT::const_pointer pointer;
+ typedef typename OptionsT::const_reference reference;
+ typedef const ilist_node_impl<OptionsT> *node_pointer;
+ typedef const ilist_node_impl<OptionsT> &node_reference;
+};
+
+template <bool IsReverse> struct IteratorHelper;
+template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
+ typedef ilist_detail::NodeAccess Access;
+ template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
+ template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
+};
+template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
+ typedef ilist_detail::NodeAccess Access;
+ template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
+ template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
+};
+
+} // end namespace ilist_detail
+
+/// Iterator for intrusive lists based on ilist_node.
+template <class OptionsT, bool IsReverse, bool IsConst>
+class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
+ friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
+ friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
+ friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
+
+ typedef ilist_detail::IteratorTraits<OptionsT, IsConst> Traits;
+ typedef ilist_detail::SpecificNodeAccess<OptionsT> Access;
+
+public:
+ typedef typename Traits::value_type value_type;
+ typedef typename Traits::pointer pointer;
+ typedef typename Traits::reference reference;
+ typedef ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+
+ typedef typename OptionsT::const_pointer const_pointer;
+ typedef typename OptionsT::const_reference const_reference;
+
+private:
+ typedef typename Traits::node_pointer node_pointer;
+ typedef typename Traits::node_reference node_reference;
+
+ node_pointer NodePtr;
+
+public:
+ /// Create from an ilist_node.
+ explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
+
+ explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
+ explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
+ ilist_iterator() : NodePtr(nullptr) {}
+
+ // This is templated so that we can allow constructing a const iterator from
+ // a nonconst iterator...
+ template <bool RHSIsConst>
+ ilist_iterator(
+ const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
+ typename std::enable_if<IsConst || !RHSIsConst, void *>::type = nullptr)
+ : NodePtr(RHS.NodePtr) {}
+
+ // This is templated so that we can allow assigning to a const iterator from
+ // a nonconst iterator...
+ template <bool RHSIsConst>
+ typename std::enable_if<IsConst || !RHSIsConst, ilist_iterator &>::type
+ operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
+ NodePtr = RHS.NodePtr;
+ return *this;
+ }
+
+ /// Convert from an iterator to its reverse.
+ ///
+ /// TODO: Roll this into the implicit constructor once we're sure that no one
+ /// is relying on the std::reverse_iterator off-by-one semantics.
+ ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
+ if (NodePtr)
+ return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
+ return ilist_iterator<OptionsT, !IsReverse, IsConst>();
+ }
+
+ /// Const-cast.
+ ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
+ if (NodePtr)
+ return ilist_iterator<OptionsT, IsReverse, false>(
+ const_cast<typename ilist_iterator<OptionsT, IsReverse,
+ false>::node_reference>(*NodePtr));
+ return ilist_iterator<OptionsT, IsReverse, false>();
+ }
+
+ // Accessors...
+ reference operator*() const {
+ assert(!NodePtr->isKnownSentinel());
+ return *Access::getValuePtr(NodePtr);
+ }
+ pointer operator->() const { return &operator*(); }
+
+ // Comparison operators
+ friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+ return LHS.NodePtr == RHS.NodePtr;
+ }
+ friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+ return LHS.NodePtr != RHS.NodePtr;
+ }
+
+ // Increment and decrement operators...
+ ilist_iterator &operator--() {
+ NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
+ return *this;
+ }
+ ilist_iterator &operator++() {
+ NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
+ return *this;
+ }
+ ilist_iterator operator--(int) {
+ ilist_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+ ilist_iterator operator++(int) {
+ ilist_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ /// Get the underlying ilist_node.
+ node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
+
+ /// Check for end. Only valid if ilist_sentinel_tracking<true>.
+ bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
+};
+
+template <typename From> struct simplify_type;
+
+/// Allow ilist_iterators to convert into pointers to a node automatically when
+/// used by the dyn_cast, cast, isa mechanisms...
+///
+/// FIXME: remove this, since there is no implicit conversion to NodeTy.
+template <class OptionsT, bool IsConst>
+struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
+ typedef ilist_iterator<OptionsT, false, IsConst> iterator;
+ typedef typename iterator::pointer SimpleType;
+
+ static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
+};
+template <class OptionsT, bool IsConst>
+struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
+ : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_ITERATOR_H
diff --git a/include/llvm/ADT/ilist_node.h b/include/llvm/ADT/ilist_node.h
index 7e5a0e0e5ad8..7244d0f40586 100644
--- a/include/llvm/ADT/ilist_node.h
+++ b/include/llvm/ADT/ilist_node.h
@@ -15,65 +15,232 @@
#ifndef LLVM_ADT_ILIST_NODE_H
#define LLVM_ADT_ILIST_NODE_H
+#include "llvm/ADT/ilist_node_base.h"
+#include "llvm/ADT/ilist_node_options.h"
+
namespace llvm {
+namespace ilist_detail {
+struct NodeAccess;
+} // end namespace ilist_detail
+
template<typename NodeTy>
struct ilist_traits;
-template <typename NodeTy> struct ilist_embedded_sentinel_traits;
-template <typename NodeTy> struct ilist_half_embedded_sentinel_traits;
-/// ilist_half_node - Base class that provides prev services for sentinels.
+template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
+template <class OptionsT> class ilist_sentinel;
+
+/// Implementation for an ilist node.
///
-template<typename NodeTy>
-class ilist_half_node {
- friend struct ilist_traits<NodeTy>;
- friend struct ilist_half_embedded_sentinel_traits<NodeTy>;
- NodeTy *Prev;
+/// Templated on an appropriate \a ilist_detail::node_options, usually computed
+/// by \a ilist_detail::compute_node_options.
+///
+/// This is a wrapper around \a ilist_node_base whose main purpose is to
+/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
+/// wrong \a simple_ilist or \a iplist.
+template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
+ typedef typename OptionsT::value_type value_type;
+ typedef typename OptionsT::node_base_type node_base_type;
+ typedef typename OptionsT::list_base_type list_base_type;
+
+ friend typename OptionsT::list_base_type;
+ friend struct ilist_detail::NodeAccess;
+ friend class ilist_sentinel<OptionsT>;
+ friend class ilist_iterator<OptionsT, false, false>;
+ friend class ilist_iterator<OptionsT, false, true>;
+ friend class ilist_iterator<OptionsT, true, false>;
+ friend class ilist_iterator<OptionsT, true, true>;
+
protected:
- NodeTy *getPrev() { return Prev; }
- const NodeTy *getPrev() const { return Prev; }
- void setPrev(NodeTy *P) { Prev = P; }
- ilist_half_node() : Prev(nullptr) {}
-};
+ ilist_node_impl() = default;
-template<typename NodeTy>
-struct ilist_nextprev_traits;
+ typedef ilist_iterator<OptionsT, false, false> self_iterator;
+ typedef ilist_iterator<OptionsT, false, true> const_self_iterator;
+ typedef ilist_iterator<OptionsT, true, false> reverse_self_iterator;
+ typedef ilist_iterator<OptionsT, true, true> const_reverse_self_iterator;
-template <typename NodeTy> class ilist_iterator;
+private:
+ ilist_node_impl *getPrev() {
+ return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+ }
+ ilist_node_impl *getNext() {
+ return static_cast<ilist_node_impl *>(node_base_type::getNext());
+ }
+
+ const ilist_node_impl *getPrev() const {
+ return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+ }
+ const ilist_node_impl *getNext() const {
+ return static_cast<ilist_node_impl *>(node_base_type::getNext());
+ }
-/// ilist_node - Base class that provides next/prev services for nodes
-/// that use ilist_nextprev_traits or ilist_default_traits.
+ void setPrev(ilist_node_impl *N) { node_base_type::setPrev(N); }
+ void setNext(ilist_node_impl *N) { node_base_type::setNext(N); }
+
+public:
+ self_iterator getIterator() { return self_iterator(*this); }
+ const_self_iterator getIterator() const { return const_self_iterator(*this); }
+ reverse_self_iterator getReverseIterator() {
+ return reverse_self_iterator(*this);
+ }
+ const_reverse_self_iterator getReverseIterator() const {
+ return const_reverse_self_iterator(*this);
+ }
+
+ // Under-approximation, but always available for assertions.
+ using node_base_type::isKnownSentinel;
+
+ /// Check whether this is the sentinel node.
+ ///
+ /// This requires sentinel tracking to be explicitly enabled. Use the
+ /// ilist_sentinel_tracking<true> option to get this API.
+ bool isSentinel() const {
+ static_assert(OptionsT::is_sentinel_tracking_explicit,
+ "Use ilist_sentinel_tracking<true> to enable isSentinel()");
+ return node_base_type::isSentinel();
+ }
+};
+
+/// An intrusive list node.
///
-template<typename NodeTy>
-class ilist_node : private ilist_half_node<NodeTy> {
- friend struct ilist_nextprev_traits<NodeTy>;
- friend struct ilist_traits<NodeTy>;
- friend struct ilist_half_embedded_sentinel_traits<NodeTy>;
- friend struct ilist_embedded_sentinel_traits<NodeTy>;
- NodeTy *Next;
- NodeTy *getNext() { return Next; }
- const NodeTy *getNext() const { return Next; }
- void setNext(NodeTy *N) { Next = N; }
+/// A base class to enable membership in intrusive lists, including \a
+/// simple_ilist, \a iplist, and \a ilist. The first template parameter is the
+/// \a value_type for the list.
+///
+/// An ilist node can be configured with compile-time options to change
+/// behaviour and/or add API.
+///
+/// By default, an \a ilist_node knows whether it is the list sentinel (an
+/// instance of \a ilist_sentinel) if and only if
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS. The function \a isKnownSentinel() always
+/// returns \c false tracking is off. Sentinel tracking steals a bit from the
+/// "prev" link, which adds a mask operation when decrementing an iterator, but
+/// enables bug-finding assertions in \a ilist_iterator.
+///
+/// To turn sentinel tracking on all the time, pass in the
+/// ilist_sentinel_tracking<true> template parameter. This also enables the \a
+/// isSentinel() function. The same option must be passed to the intrusive
+/// list. (ilist_sentinel_tracking<false> turns sentinel tracking off all the
+/// time.)
+///
+/// A type can inherit from ilist_node multiple times by passing in different
+/// \a ilist_tag options. This allows a single instance to be inserted into
+/// multiple lists simultaneously, where each list is given the same tag.
+///
+/// \example
+/// struct A {};
+/// struct B {};
+/// struct N : ilist_node<N, ilist_tag<A>>, ilist_node<N, ilist_tag<B>> {};
+///
+/// void foo() {
+/// simple_ilist<N, ilist_tag<A>> ListA;
+/// simple_ilist<N, ilist_tag<B>> ListB;
+/// N N1;
+/// ListA.push_back(N1);
+/// ListB.push_back(N1);
+/// }
+/// \endexample
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <class T, class... Options>
+class ilist_node
+ : public ilist_node_impl<
+ typename ilist_detail::compute_node_options<T, Options...>::type> {
+ static_assert(ilist_detail::check_options<Options...>::value,
+ "Unrecognized node option!");
+};
+
+namespace ilist_detail {
+/// An access class for ilist_node private API.
+///
+/// This gives access to the private parts of ilist nodes. Nodes for an ilist
+/// should friend this class if they inherit privately from ilist_node.
+///
+/// Using this class outside of the ilist implementation is unsupported.
+struct NodeAccess {
+protected:
+ template <class OptionsT>
+ static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
+ return N;
+ }
+ template <class OptionsT>
+ static const ilist_node_impl<OptionsT> *
+ getNodePtr(typename OptionsT::const_pointer N) {
+ return N;
+ }
+ template <class OptionsT>
+ static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
+ return static_cast<typename OptionsT::pointer>(N);
+ }
+ template <class OptionsT>
+ static typename OptionsT::const_pointer
+ getValuePtr(const ilist_node_impl<OptionsT> *N) {
+ return static_cast<typename OptionsT::const_pointer>(N);
+ }
+
+ template <class OptionsT>
+ static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
+ return N.getPrev();
+ }
+ template <class OptionsT>
+ static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
+ return N.getNext();
+ }
+ template <class OptionsT>
+ static const ilist_node_impl<OptionsT> *
+ getPrev(const ilist_node_impl<OptionsT> &N) {
+ return N.getPrev();
+ }
+ template <class OptionsT>
+ static const ilist_node_impl<OptionsT> *
+ getNext(const ilist_node_impl<OptionsT> &N) {
+ return N.getNext();
+ }
+};
+
+template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
protected:
- ilist_node() : Next(nullptr) {}
+ typedef typename OptionsT::pointer pointer;
+ typedef typename OptionsT::const_pointer const_pointer;
+ typedef ilist_node_impl<OptionsT> node_type;
+
+ static node_type *getNodePtr(pointer N) {
+ return NodeAccess::getNodePtr<OptionsT>(N);
+ }
+ static const node_type *getNodePtr(const_pointer N) {
+ return NodeAccess::getNodePtr<OptionsT>(N);
+ }
+ static pointer getValuePtr(node_type *N) {
+ return NodeAccess::getValuePtr<OptionsT>(N);
+ }
+ static const_pointer getValuePtr(const node_type *N) {
+ return NodeAccess::getValuePtr<OptionsT>(N);
+ }
+};
+} // end namespace ilist_detail
+template <class OptionsT>
+class ilist_sentinel : public ilist_node_impl<OptionsT> {
public:
- ilist_iterator<NodeTy> getIterator() {
- // FIXME: Stop downcasting to create the iterator (potential UB).
- return ilist_iterator<NodeTy>(static_cast<NodeTy *>(this));
+ ilist_sentinel() {
+ this->initializeSentinel();
+ reset();
}
- ilist_iterator<const NodeTy> getIterator() const {
- // FIXME: Stop downcasting to create the iterator (potential UB).
- return ilist_iterator<const NodeTy>(static_cast<const NodeTy *>(this));
+
+ void reset() {
+ this->setPrev(this);
+ this->setNext(this);
}
+
+ bool empty() const { return this == this->getPrev(); }
};
/// An ilist node that can access its parent list.
///
/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
-template <typename NodeTy, typename ParentTy>
-class ilist_node_with_parent : public ilist_node<NodeTy> {
+template <typename NodeTy, typename ParentTy, class... Options>
+class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
protected:
ilist_node_with_parent() = default;
diff --git a/include/llvm/ADT/ilist_node_base.h b/include/llvm/ADT/ilist_node_base.h
new file mode 100644
index 000000000000..e5062ac4eaad
--- /dev/null
+++ b/include/llvm/ADT/ilist_node_base.h
@@ -0,0 +1,53 @@
+//===- llvm/ADT/ilist_node_base.h - Intrusive List Node Base -----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_BASE_H
+#define LLVM_ADT_ILIST_NODE_BASE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace llvm {
+
+/// Base class for ilist nodes.
+///
+/// Optionally tracks whether this node is the sentinel.
+template <bool EnableSentinelTracking> class ilist_node_base;
+
+template <> class ilist_node_base<false> {
+ ilist_node_base *Prev = nullptr;
+ ilist_node_base *Next = nullptr;
+
+public:
+ void setPrev(ilist_node_base *Prev) { this->Prev = Prev; }
+ void setNext(ilist_node_base *Next) { this->Next = Next; }
+ ilist_node_base *getPrev() const { return Prev; }
+ ilist_node_base *getNext() const { return Next; }
+
+ bool isKnownSentinel() const { return false; }
+ void initializeSentinel() {}
+};
+
+template <> class ilist_node_base<true> {
+ PointerIntPair<ilist_node_base *, 1> PrevAndSentinel;
+ ilist_node_base *Next = nullptr;
+
+public:
+ void setPrev(ilist_node_base *Prev) { PrevAndSentinel.setPointer(Prev); }
+ void setNext(ilist_node_base *Next) { this->Next = Next; }
+ ilist_node_base *getPrev() const { return PrevAndSentinel.getPointer(); }
+ ilist_node_base *getNext() const { return Next; }
+
+ bool isSentinel() const { return PrevAndSentinel.getInt(); }
+ bool isKnownSentinel() const { return isSentinel(); }
+ void initializeSentinel() { PrevAndSentinel.setInt(true); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_BASE_H
diff --git a/include/llvm/ADT/ilist_node_options.h b/include/llvm/ADT/ilist_node_options.h
new file mode 100644
index 000000000000..c33df1eeb819
--- /dev/null
+++ b/include/llvm/ADT/ilist_node_options.h
@@ -0,0 +1,133 @@
+//===- llvm/ADT/ilist_node_options.h - ilist_node Options -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_OPTIONS_H
+#define LLVM_ADT_ILIST_NODE_OPTIONS_H
+
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Config/llvm-config.h"
+
+#include <type_traits>
+
+namespace llvm {
+
+template <bool EnableSentinelTracking> class ilist_node_base;
+template <bool EnableSentinelTracking> class ilist_base;
+
+/// Option to choose whether to track sentinels.
+///
+/// This option affects the ABI for the nodes. When not specified explicitly,
+/// the ABI depends on LLVM_ENABLE_ABI_BREAKING_CHECKS. Specify explicitly to
+/// enable \a ilist_node::isSentinel().
+template <bool EnableSentinelTracking> struct ilist_sentinel_tracking {};
+
+/// Option to specify a tag for the node type.
+///
+/// This option allows a single value type to be inserted in multiple lists
+/// simultaneously. See \a ilist_node for usage examples.
+template <class Tag> struct ilist_tag {};
+
+namespace ilist_detail {
+
+/// Helper trait for recording whether an option is specified explicitly.
+template <bool IsExplicit> struct explicitness {
+ static const bool is_explicit = IsExplicit;
+};
+typedef explicitness<true> is_explicit;
+typedef explicitness<false> is_implicit;
+
+/// Check whether an option is valid.
+///
+/// The steps for adding and enabling a new ilist option include:
+/// \li define the option, ilist_foo<Bar>, above;
+/// \li add new parameters for Bar to \a ilist_detail::node_options;
+/// \li add an extraction meta-function, ilist_detail::extract_foo;
+/// \li call extract_foo from \a ilist_detail::compute_node_options and pass it
+/// into \a ilist_detail::node_options; and
+/// \li specialize \c is_valid_option<ilist_foo<Bar>> to inherit from \c
+/// std::true_type to get static assertions passing in \a simple_ilist and \a
+/// ilist_node.
+template <class Option> struct is_valid_option : std::false_type {};
+
+/// Extract sentinel tracking option.
+///
+/// Look through \p Options for the \a ilist_sentinel_tracking option, with the
+/// default depending on LLVM_ENABLE_ABI_BREAKING_CHECKS.
+template <class... Options> struct extract_sentinel_tracking;
+template <bool EnableSentinelTracking, class... Options>
+struct extract_sentinel_tracking<
+ ilist_sentinel_tracking<EnableSentinelTracking>, Options...>
+ : std::integral_constant<bool, EnableSentinelTracking>, is_explicit {};
+template <class Option1, class... Options>
+struct extract_sentinel_tracking<Option1, Options...>
+ : extract_sentinel_tracking<Options...> {};
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+template <> struct extract_sentinel_tracking<> : std::true_type, is_implicit {};
+#else
+template <>
+struct extract_sentinel_tracking<> : std::false_type, is_implicit {};
+#endif
+template <bool EnableSentinelTracking>
+struct is_valid_option<ilist_sentinel_tracking<EnableSentinelTracking>>
+ : std::true_type {};
+
+/// Extract custom tag option.
+///
+/// Look through \p Options for the \a ilist_tag option, pulling out the
+/// custom tag type, using void as a default.
+template <class... Options> struct extract_tag;
+template <class Tag, class... Options>
+struct extract_tag<ilist_tag<Tag>, Options...> {
+ typedef Tag type;
+};
+template <class Option1, class... Options>
+struct extract_tag<Option1, Options...> : extract_tag<Options...> {};
+template <> struct extract_tag<> { typedef void type; };
+template <class Tag> struct is_valid_option<ilist_tag<Tag>> : std::true_type {};
+
+/// Check whether options are valid.
+///
+/// The conjunction of \a is_valid_option on each individual option.
+template <class... Options> struct check_options;
+template <> struct check_options<> : std::true_type {};
+template <class Option1, class... Options>
+struct check_options<Option1, Options...>
+ : std::integral_constant<bool, is_valid_option<Option1>::value &&
+ check_options<Options...>::value> {};
+
+/// Traits for options for \a ilist_node.
+///
+/// This is usually computed via \a compute_node_options.
+template <class T, bool EnableSentinelTracking, bool IsSentinelTrackingExplicit,
+ class TagT>
+struct node_options {
+ typedef T value_type;
+ typedef T *pointer;
+ typedef T &reference;
+ typedef const T *const_pointer;
+ typedef const T &const_reference;
+
+ static const bool enable_sentinel_tracking = EnableSentinelTracking;
+ static const bool is_sentinel_tracking_explicit = IsSentinelTrackingExplicit;
+ typedef TagT tag;
+ typedef ilist_node_base<enable_sentinel_tracking> node_base_type;
+ typedef ilist_base<enable_sentinel_tracking> list_base_type;
+};
+
+template <class T, class... Options> struct compute_node_options {
+ typedef node_options<T, extract_sentinel_tracking<Options...>::value,
+ extract_sentinel_tracking<Options...>::is_explicit,
+ typename extract_tag<Options...>::type>
+ type;
+};
+
+} // end namespace ilist_detail
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_OPTIONS_H
diff --git a/include/llvm/ADT/iterator.h b/include/llvm/ADT/iterator.h
index 0bd28d5c6cd0..9ccacc10db0d 100644
--- a/include/llvm/ADT/iterator.h
+++ b/include/llvm/ADT/iterator.h
@@ -12,6 +12,7 @@
#include <cstddef>
#include <iterator>
+#include <type_traits>
namespace llvm {
@@ -256,6 +257,23 @@ struct pointee_iterator
T &operator*() const { return **this->I; }
};
-}
+template <typename WrappedIteratorT,
+ typename T = decltype(&*std::declval<WrappedIteratorT>())>
+class pointer_iterator
+ : public iterator_adaptor_base<pointer_iterator<WrappedIteratorT>,
+ WrappedIteratorT, T> {
+ mutable T Ptr;
+
+public:
+ pointer_iterator() = default;
+
+ explicit pointer_iterator(WrappedIteratorT u)
+ : pointer_iterator::iterator_adaptor_base(std::move(u)) {}
+
+ T &operator*() { return Ptr = &*this->I; }
+ const T &operator*() const { return Ptr = &*this->I; }
+};
+
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_ITERATOR_H
diff --git a/include/llvm/ADT/simple_ilist.h b/include/llvm/ADT/simple_ilist.h
new file mode 100644
index 000000000000..a1ab59170840
--- /dev/null
+++ b/include/llvm/ADT/simple_ilist.h
@@ -0,0 +1,310 @@
+//===- llvm/ADT/simple_ilist.h - Simple Intrusive List ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SIMPLE_ILIST_H
+#define LLVM_ADT_SIMPLE_ILIST_H
+
+#include "llvm/ADT/ilist_base.h"
+#include "llvm/ADT/ilist_iterator.h"
+#include "llvm/ADT/ilist_node.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+
+/// A simple intrusive list implementation.
+///
+/// This is a simple intrusive list for a \c T that inherits from \c
+/// ilist_node<T>. The list never takes ownership of anything inserted in it.
+///
+/// Unlike \a iplist<T> and \a ilist<T>, \a simple_ilist<T> never allocates or
+/// deletes values, and has no callback traits.
+///
+/// The API for adding nodes include \a push_front(), \a push_back(), and \a
+/// insert(). These all take values by reference (not by pointer), except for
+/// the range version of \a insert().
+///
+/// There are three sets of API for discarding nodes from the list: \a
+/// remove(), which takes a reference to the node to remove, \a erase(), which
+/// takes an iterator or iterator range and returns the next one, and \a
+/// clear(), which empties out the container. All three are constant time
+/// operations. None of these deletes any nodes; in particular, if there is a
+/// single node in the list, then these have identical semantics:
+/// \li \c L.remove(L.front());
+/// \li \c L.erase(L.begin());
+/// \li \c L.clear();
+///
+/// As a convenience for callers, there are parallel APIs that take a \c
+/// Disposer (such as \c std::default_delete<T>): \a removeAndDispose(), \a
+/// eraseAndDispose(), and \a clearAndDispose(). These have different names
+/// because the extra semantic is otherwise non-obvious. They are equivalent
+/// to calling \a std::for_each() on the range to be discarded.
+///
+/// The currently available \p Options customize the nodes in the list. The
+/// same options must be specified in the \a ilist_node instantation for
+/// compatibility (although the order is irrelevant).
+/// \li Use \a ilist_tag to designate which ilist_node for a given \p T this
+/// list should use. This is useful if a type \p T is part of multiple,
+/// independent lists simultaneously.
+/// \li Use \a ilist_sentinel_tracking to always (or never) track whether a
+/// node is a sentinel. Specifying \c true enables the \a
+/// ilist_node::isSentinel() API. Unlike \a ilist_node::isKnownSentinel(),
+/// which is only appropriate for assertions, \a ilist_node::isSentinel() is
+/// appropriate for real logic.
+///
+/// Here are examples of \p Options usage:
+/// \li \c simple_ilist<T> gives the defaults. \li \c
+/// simple_ilist<T,ilist_sentinel_tracking<true>> enables the \a
+/// ilist_node::isSentinel() API.
+/// \li \c simple_ilist<T,ilist_tag<A>,ilist_sentinel_tracking<false>>
+/// specifies a tag of A and that tracking should be off (even when
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS are enabled).
+/// \li \c simple_ilist<T,ilist_sentinel_tracking<false>,ilist_tag<A>> is
+/// equivalent to the last.
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <typename T, class... Options>
+class simple_ilist
+ : ilist_detail::compute_node_options<T, Options...>::type::list_base_type,
+ ilist_detail::SpecificNodeAccess<
+ typename ilist_detail::compute_node_options<T, Options...>::type> {
+ static_assert(ilist_detail::check_options<Options...>::value,
+ "Unrecognized node option!");
+ typedef
+ typename ilist_detail::compute_node_options<T, Options...>::type OptionsT;
+ typedef typename OptionsT::list_base_type list_base_type;
+ ilist_sentinel<OptionsT> Sentinel;
+
+public:
+ typedef typename OptionsT::value_type value_type;
+ typedef typename OptionsT::pointer pointer;
+ typedef typename OptionsT::reference reference;
+ typedef typename OptionsT::const_pointer const_pointer;
+ typedef typename OptionsT::const_reference const_reference;
+ typedef ilist_iterator<OptionsT, false, false> iterator;
+ typedef ilist_iterator<OptionsT, false, true> const_iterator;
+ typedef ilist_iterator<OptionsT, true, false> reverse_iterator;
+ typedef ilist_iterator<OptionsT, true, true> const_reverse_iterator;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ simple_ilist() = default;
+ ~simple_ilist() = default;
+
+ // No copy constructors.
+ simple_ilist(const simple_ilist &) = delete;
+ simple_ilist &operator=(const simple_ilist &) = delete;
+
+ // Move constructors.
+ simple_ilist(simple_ilist &&X) { splice(end(), X); }
+ simple_ilist &operator=(simple_ilist &&X) {
+ clear();
+ splice(end(), X);
+ return *this;
+ }
+
+ iterator begin() { return ++iterator(Sentinel); }
+ const_iterator begin() const { return ++const_iterator(Sentinel); }
+ iterator end() { return iterator(Sentinel); }
+ const_iterator end() const { return const_iterator(Sentinel); }
+ reverse_iterator rbegin() { return ++reverse_iterator(Sentinel); }
+ const_reverse_iterator rbegin() const {
+ return ++const_reverse_iterator(Sentinel);
+ }
+ reverse_iterator rend() { return reverse_iterator(Sentinel); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(Sentinel);
+ }
+
+ /// Check if the list is empty in constant time.
+ LLVM_NODISCARD bool empty() const { return Sentinel.empty(); }
+
+ /// Calculate the size of the list in linear time.
+ LLVM_NODISCARD size_type size() const {
+ return std::distance(begin(), end());
+ }
+
+ reference front() { return *begin(); }
+ const_reference front() const { return *begin(); }
+ reference back() { return *rbegin(); }
+ const_reference back() const { return *rbegin(); }
+
+ /// Insert a node at the front; never copies.
+ void push_front(reference Node) { insert(begin(), Node); }
+
+ /// Insert a node at the back; never copies.
+ void push_back(reference Node) { insert(end(), Node); }
+
+ /// Remove the node at the front; never deletes.
+ void pop_front() { erase(begin()); }
+
+ /// Remove the node at the back; never deletes.
+ void pop_back() { erase(--end()); }
+
+ /// Swap with another list in place using std::swap.
+ void swap(simple_ilist &X) { std::swap(*this, X); }
+
+ /// Insert a node by reference; never copies.
+ iterator insert(iterator I, reference Node) {
+ list_base_type::insertBefore(*I.getNodePtr(), *this->getNodePtr(&Node));
+ return iterator(&Node);
+ }
+
+ /// Insert a range of nodes; never copies.
+ template <class Iterator>
+ void insert(iterator I, Iterator First, Iterator Last) {
+ for (; First != Last; ++First)
+ insert(I, *First);
+ }
+
+ /// Clone another list.
+ template <class Cloner, class Disposer>
+ void cloneFrom(const simple_ilist &L2, Cloner clone, Disposer dispose) {
+ clearAndDispose(dispose);
+ for (const_reference V : L2)
+ push_back(*clone(V));
+ }
+
+ /// Remove a node by reference; never deletes.
+ ///
+ /// \see \a erase() for removing by iterator.
+ /// \see \a removeAndDispose() if the node should be deleted.
+ void remove(reference N) { list_base_type::remove(*this->getNodePtr(&N)); }
+
+ /// Remove a node by reference and dispose of it.
+ template <class Disposer>
+ void removeAndDispose(reference N, Disposer dispose) {
+ remove(N);
+ dispose(&N);
+ }
+
+ /// Remove a node by iterator; never deletes.
+ ///
+ /// \see \a remove() for removing by reference.
+ /// \see \a eraseAndDispose() it the node should be deleted.
+ iterator erase(iterator I) {
+ assert(I != end() && "Cannot remove end of list!");
+ remove(*I++);
+ return I;
+ }
+
+ /// Remove a range of nodes; never deletes.
+ ///
+ /// \see \a eraseAndDispose() if the nodes should be deleted.
+ iterator erase(iterator First, iterator Last) {
+ list_base_type::removeRange(*First.getNodePtr(), *Last.getNodePtr());
+ return Last;
+ }
+
+ /// Remove a node by iterator and dispose of it.
+ template <class Disposer>
+ iterator eraseAndDispose(iterator I, Disposer dispose) {
+ auto Next = std::next(I);
+ erase(I);
+ dispose(&*I);
+ return Next;
+ }
+
+ /// Remove a range of nodes and dispose of them.
+ template <class Disposer>
+ iterator eraseAndDispose(iterator First, iterator Last, Disposer dispose) {
+ while (First != Last)
+ First = eraseAndDispose(First, dispose);
+ return Last;
+ }
+
+ /// Clear the list; never deletes.
+ ///
+ /// \see \a clearAndDispose() if the nodes should be deleted.
+ void clear() { Sentinel.reset(); }
+
+ /// Clear the list and dispose of the nodes.
+ template <class Disposer> void clearAndDispose(Disposer dispose) {
+ eraseAndDispose(begin(), end(), dispose);
+ }
+
+ /// Splice in another list.
+ void splice(iterator I, simple_ilist &L2) {
+ splice(I, L2, L2.begin(), L2.end());
+ }
+
+ /// Splice in a node from another list.
+ void splice(iterator I, simple_ilist &L2, iterator Node) {
+ splice(I, L2, Node, std::next(Node));
+ }
+
+ /// Splice in a range of nodes from another list.
+ void splice(iterator I, simple_ilist &, iterator First, iterator Last) {
+ list_base_type::transferBefore(*I.getNodePtr(), *First.getNodePtr(),
+ *Last.getNodePtr());
+ }
+
+ /// Merge in another list.
+ ///
+ /// \pre \c this and \p RHS are sorted.
+ ///@{
+ void merge(simple_ilist &RHS) { merge(RHS, std::less<T>()); }
+ template <class Compare> void merge(simple_ilist &RHS, Compare comp);
+ ///@}
+
+ /// Sort the list.
+ ///@{
+ void sort() { sort(std::less<T>()); }
+ template <class Compare> void sort(Compare comp);
+ ///@}
+};
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::merge(simple_ilist &RHS, Compare comp) {
+ if (this == &RHS || RHS.empty())
+ return;
+ iterator LI = begin(), LE = end();
+ iterator RI = RHS.begin(), RE = RHS.end();
+ while (LI != LE) {
+ if (comp(*RI, *LI)) {
+ // Transfer a run of at least size 1 from RHS to LHS.
+ iterator RunStart = RI++;
+ RI = std::find_if(RI, RE, [&](reference RV) { return !comp(RV, *LI); });
+ splice(LI, RHS, RunStart, RI);
+ if (RI == RE)
+ return;
+ }
+ ++LI;
+ }
+ // Transfer the remaining RHS nodes once LHS is finished.
+ splice(LE, RHS, RI, RE);
+}
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::sort(Compare comp) {
+ // Vacuously sorted.
+ if (empty() || std::next(begin()) == end())
+ return;
+
+ // Split the list in the middle.
+ iterator Center = begin(), End = begin();
+ while (End != end() && ++End != end()) {
+ ++Center;
+ ++End;
+ }
+ simple_ilist RHS;
+ RHS.splice(RHS.end(), *this, Center, end());
+
+ // Sort the sublists and merge back together.
+ sort(comp);
+ RHS.sort(comp);
+ merge(RHS, comp);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SIMPLE_ILIST_H
diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h
index d6308b7073a0..d8e50438e722 100644
--- a/include/llvm/Analysis/AliasAnalysis.h
+++ b/include/llvm/Analysis/AliasAnalysis.h
@@ -112,8 +112,10 @@ enum FunctionModRefLocation {
FMRL_Nowhere = 0,
/// Access to memory via argument pointers.
FMRL_ArgumentPointees = 4,
+ /// Memory that is inaccessible via LLVM IR.
+ FMRL_InaccessibleMem = 8,
/// Access to any memory.
- FMRL_Anywhere = 8 | FMRL_ArgumentPointees
+ FMRL_Anywhere = 16 | FMRL_InaccessibleMem | FMRL_ArgumentPointees
};
/// Summary of how a function affects memory in the program.
@@ -143,6 +145,22 @@ enum FunctionModRefBehavior {
/// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef,
+ /// The only memory references in this function (if it has any) are
+ /// references of memory that is otherwise inaccessible via LLVM IR.
+ ///
+ /// This property corresponds to the LLVM IR inaccessiblememonly attribute.
+ FMRB_OnlyAccessesInaccessibleMem = FMRL_InaccessibleMem | MRI_ModRef,
+
+ /// The function may perform non-volatile loads and stores of objects
+ /// pointed to by its pointer-typed arguments, with arbitrary offsets, and
+ /// it may also perform loads and stores of memory that is otherwise
+ /// inaccessible via LLVM IR.
+ ///
+ /// This property corresponds to the LLVM IR
+ /// inaccessiblemem_or_argmemonly attribute.
+ FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem |
+ FMRL_ArgumentPointees | MRI_ModRef,
+
/// This function does not perform any non-local stores or volatile loads,
/// but may read from any memory location.
///
@@ -179,6 +197,20 @@ public:
AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
}
+ /// Register a function analysis ID that the results aggregation depends on.
+ ///
+ /// This is used in the new pass manager to implement the invalidation logic
+ /// where we must invalidate the results aggregation if any of our component
+ /// analyses become invalid.
+ void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); }
+
+ /// Handle invalidation events in the new pass manager.
+ ///
+ /// The aggregation is invalidated if any of the underlying analyses is
+ /// invalidated.
+ bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
+
//===--------------------------------------------------------------------===//
/// \name Alias Queries
/// @{
@@ -339,6 +371,26 @@ public:
return (MRB & MRI_ModRef) && (MRB & FMRL_ArgumentPointees);
}
+ /// Checks if functions with the specified behavior are known to read and
+ /// write at most from memory that is inaccessible from LLVM IR.
+ static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) {
+ return !(MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem);
+ }
+
+ /// Checks if functions with the specified behavior are known to potentially
+ /// read or write from memory that is inaccessible from LLVM IR.
+ static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) {
+ return (MRB & MRI_ModRef) && (MRB & FMRL_InaccessibleMem);
+ }
+
+ /// Checks if functions with the specified behavior are known to read and
+ /// write at most from memory that is inaccessible from LLVM IR or objects
+ /// pointed to by their pointer-typed arguments (with arbitrary offsets).
+ static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) {
+ return !(MRB & FMRL_Anywhere &
+ ~(FMRL_InaccessibleMem | FMRL_ArgumentPointees));
+ }
+
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
@@ -571,6 +623,8 @@ private:
const TargetLibraryInfo &TLI;
std::vector<std::unique_ptr<Concept>> AAs;
+
+ std::vector<AnalysisKey *> AADeps;
};
/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
@@ -854,20 +908,6 @@ class AAManager : public AnalysisInfoMixin<AAManager> {
public:
typedef AAResults Result;
- // This type hase value semantics. We have to spell these out because MSVC
- // won't synthesize them.
- AAManager() {}
- AAManager(AAManager &&Arg) : ResultGetters(std::move(Arg.ResultGetters)) {}
- AAManager(const AAManager &Arg) : ResultGetters(Arg.ResultGetters) {}
- AAManager &operator=(AAManager &&RHS) {
- ResultGetters = std::move(RHS.ResultGetters);
- return *this;
- }
- AAManager &operator=(const AAManager &RHS) {
- ResultGetters = RHS.ResultGetters;
- return *this;
- }
-
/// Register a specific AA result.
template <typename AnalysisT> void registerFunctionAnalysis() {
ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
@@ -878,7 +918,7 @@ public:
ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
}
- Result run(Function &F, AnalysisManager<Function> &AM) {
+ Result run(Function &F, FunctionAnalysisManager &AM) {
Result R(AM.getResult<TargetLibraryAnalysis>(F));
for (auto &Getter : ResultGetters)
(*Getter)(F, AM, R);
@@ -887,26 +927,30 @@ public:
private:
friend AnalysisInfoMixin<AAManager>;
- static char PassID;
+ static AnalysisKey Key;
- SmallVector<void (*)(Function &F, AnalysisManager<Function> &AM,
+ SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM,
AAResults &AAResults),
4> ResultGetters;
template <typename AnalysisT>
static void getFunctionAAResultImpl(Function &F,
- AnalysisManager<Function> &AM,
+ FunctionAnalysisManager &AM,
AAResults &AAResults) {
AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
+ AAResults.addAADependencyID(AnalysisT::ID());
}
template <typename AnalysisT>
- static void getModuleAAResultImpl(Function &F, AnalysisManager<Function> &AM,
+ static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM,
AAResults &AAResults) {
- auto &MAM =
- AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
- if (auto *R = MAM.template getCachedResult<AnalysisT>(*F.getParent()))
+ auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
+ auto &MAM = MAMProxy.getManager();
+ if (auto *R = MAM.template getCachedResult<AnalysisT>(*F.getParent())) {
AAResults.addAAResult(*R);
+ MAMProxy
+ .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>();
+ }
}
};
diff --git a/include/llvm/Analysis/AliasAnalysisEvaluator.h b/include/llvm/Analysis/AliasAnalysisEvaluator.h
index 505ed0d9723a..214574852655 100644
--- a/include/llvm/Analysis/AliasAnalysisEvaluator.h
+++ b/include/llvm/Analysis/AliasAnalysisEvaluator.h
@@ -53,7 +53,7 @@ public:
~AAEvaluator();
/// \brief Run the pass over the function.
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
private:
// Allow the legacy pass to run this using an internal API.
diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h
index cec56889c0ae..5d11b22c6eed 100644
--- a/include/llvm/Analysis/AliasSetTracker.h
+++ b/include/llvm/Analysis/AliasSetTracker.h
@@ -115,15 +115,21 @@ class AliasSet : public ilist_node<AliasSet> {
}
};
- PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes.
- AliasSet *Forward; // Forwarding pointer.
+ // Doubly linked list of nodes.
+ PointerRec *PtrList, **PtrListEnd;
+ // Forwarding pointer.
+ AliasSet *Forward;
/// All instructions without a specific address in this alias set.
std::vector<AssertingVH<Instruction> > UnknownInsts;
/// Number of nodes pointing to this AliasSet plus the number of AliasSets
/// forwarding to it.
- unsigned RefCount : 28;
+ unsigned RefCount : 27;
+
+ // Signifies that this set should be considered to alias any pointer.
+ // Use when the tracker holding this set is saturated.
+ unsigned AliasAny : 1;
/// The kinds of access this alias set models.
///
@@ -153,7 +159,10 @@ class AliasSet : public ilist_node<AliasSet> {
/// True if this alias set contains volatile loads or stores.
unsigned Volatile : 1;
+ unsigned SetSize;
+
void addRef() { ++RefCount; }
+
void dropRef(AliasSetTracker &AST) {
assert(RefCount >= 1 && "Invalid reference count detected!");
if (--RefCount == 0)
@@ -189,6 +198,10 @@ public:
iterator end() const { return iterator(); }
bool empty() const { return PtrList == nullptr; }
+ // Unfortunately, ilist::size() is linear, so we have to add code to keep
+ // track of the list's exact size.
+ unsigned size() { return SetSize; }
+
void print(raw_ostream &OS) const;
void dump() const;
@@ -226,13 +239,11 @@ public:
};
private:
- // Can only be created by AliasSetTracker. Also, ilist creates one
- // to serve as a sentinel.
- friend struct ilist_sentinel_traits<AliasSet>;
+ // Can only be created by AliasSetTracker.
AliasSet()
- : PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
- Access(NoAccess), Alias(SetMustAlias), Volatile(false) {
- }
+ : PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
+ AliasAny(false), Access(NoAccess), Alias(SetMustAlias),
+ Volatile(false), SetSize(0) {}
AliasSet(const AliasSet &AS) = delete;
void operator=(const AliasSet &AS) = delete;
@@ -317,7 +328,8 @@ class AliasSetTracker {
public:
/// Create an empty collection of AliasSets, and use the specified alias
/// analysis object to disambiguate load and store addresses.
- explicit AliasSetTracker(AliasAnalysis &aa) : AA(aa) {}
+ explicit AliasSetTracker(AliasAnalysis &aa)
+ : AA(aa), TotalMayAliasSetSize(0), AliasAnyAS(nullptr) {}
~AliasSetTracker() { clear(); }
/// These methods are used to add different types of instructions to the alias
@@ -332,27 +344,16 @@ public:
/// These methods return true if inserting the instruction resulted in the
/// addition of a new alias set (i.e., the pointer did not alias anything).
///
- bool add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
- bool add(LoadInst *LI);
- bool add(StoreInst *SI);
- bool add(VAArgInst *VAAI);
- bool add(MemSetInst *MSI);
- bool add(Instruction *I); // Dispatch to one of the other add methods...
+ void add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
+ void add(LoadInst *LI);
+ void add(StoreInst *SI);
+ void add(VAArgInst *VAAI);
+ void add(MemSetInst *MSI);
+ void add(MemTransferInst *MTI);
+ void add(Instruction *I); // Dispatch to one of the other add methods...
void add(BasicBlock &BB); // Add all instructions in basic block
void add(const AliasSetTracker &AST); // Add alias relations from another AST
- bool addUnknown(Instruction *I);
-
- /// These methods are used to remove all entries that might be aliased by the
- /// specified instruction. These methods return true if any alias sets were
- /// eliminated.
- bool remove(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo);
- bool remove(LoadInst *LI);
- bool remove(StoreInst *SI);
- bool remove(VAArgInst *VAAI);
- bool remove(MemSetInst *MSI);
- bool remove(Instruction *I);
- void remove(AliasSet &AS);
- bool removeUnknown(Instruction *I);
+ void addUnknown(Instruction *I);
void clear();
@@ -364,8 +365,7 @@ public:
/// set is created to contain the pointer (because the pointer didn't alias
/// anything).
AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
- const AAMDNodes &AAInfo,
- bool *New = nullptr);
+ const AAMDNodes &AAInfo);
/// Return the alias set containing the location specified if one exists,
/// otherwise return null.
@@ -374,11 +374,6 @@ public:
return mergeAliasSetsForPointer(P, Size, AAInfo);
}
- /// Return true if the specified location is represented by this alias set,
- /// false otherwise. This does not modify the AST object or alias sets.
- bool containsPointer(const Value *P, uint64_t Size,
- const AAMDNodes &AAInfo) const;
-
/// Return true if the specified instruction "may" (or must) alias one of the
/// members in any of the sets.
bool containsUnknown(const Instruction *I) const;
@@ -412,6 +407,14 @@ public:
private:
friend class AliasSet;
+
+ // The total number of pointers contained in all "may" alias sets.
+ unsigned TotalMayAliasSetSize;
+
+ // A non-null value signifies this AST is saturated. A saturated AST lumps
+ // all pointers into a single "May" set.
+ AliasSet *AliasAnyAS;
+
void removeAliasSet(AliasSet *AS);
/// Just like operator[] on the map, except that it creates an entry for the
@@ -424,16 +427,14 @@ private:
}
AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
- AliasSet::AccessLattice E,
- bool &NewSet) {
- NewSet = false;
- AliasSet &AS = getAliasSetForPointer(P, Size, AAInfo, &NewSet);
- AS.Access |= E;
- return AS;
- }
+ AliasSet::AccessLattice E);
AliasSet *mergeAliasSetsForPointer(const Value *Ptr, uint64_t Size,
const AAMDNodes &AAInfo);
+ /// Merge all alias sets into a single set that is considered to alias any
+ /// pointer.
+ AliasSet &mergeAllAliasSets();
+
AliasSet *findAliasSetForUnknownInst(Instruction *Inst);
};
diff --git a/include/llvm/Analysis/AssumptionCache.h b/include/llvm/Analysis/AssumptionCache.h
index 06f2a117ac21..406a1fe9f560 100644
--- a/include/llvm/Analysis/AssumptionCache.h
+++ b/include/llvm/Analysis/AssumptionCache.h
@@ -95,17 +95,11 @@ public:
/// assumption caches for a given function.
class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
friend AnalysisInfoMixin<AssumptionAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef AssumptionCache Result;
- AssumptionAnalysis() {}
- AssumptionAnalysis(const AssumptionAnalysis &Arg) {}
- AssumptionAnalysis(AssumptionAnalysis &&Arg) {}
- AssumptionAnalysis &operator=(const AssumptionAnalysis &RHS) { return *this; }
- AssumptionAnalysis &operator=(AssumptionAnalysis &&RHS) { return *this; }
-
AssumptionCache run(Function &F, FunctionAnalysisManager &) {
return AssumptionCache(F);
}
@@ -117,7 +111,7 @@ class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
public:
explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief An immutable pass that tracks lazily created \c AssumptionCache
diff --git a/include/llvm/Analysis/BasicAliasAnalysis.h b/include/llvm/Analysis/BasicAliasAnalysis.h
index a3195d17b029..addfffa01061 100644
--- a/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -33,9 +33,10 @@ class LoopInfo;
/// This is the AA result object for the basic, local, and stateless alias
/// analysis. It implements the AA query interface in an entirely stateless
-/// manner. As one consequence, it is never invalidated. While it does retain
-/// some storage, that is used as an optimization and not to preserve
-/// information from query to query.
+/// manner. As one consequence, it is never invalidated due to IR changes.
+/// While it does retain some storage, that is used as an optimization and not
+/// to preserve information from query to query. However it does retain handles
+/// to various other analyses and must be recomputed when those analyses are.
class BasicAAResult : public AAResultBase<BasicAAResult> {
friend AAResultBase<BasicAAResult>;
@@ -58,10 +59,9 @@ public:
: AAResultBase(std::move(Arg)), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC),
DT(Arg.DT), LI(Arg.LI) {}
- /// Handle invalidation events from the new pass manager.
- ///
- /// By definition, this result is stateless and so remains valid.
- bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ /// Handle invalidation events in the new pass manager.
+ bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
@@ -185,25 +185,28 @@ private:
AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo);
+ uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderV2);
AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
const AAMDNodes &SIAAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo);
+ uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderV2);
AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag,
- const Value *V2, uint64_t V2Size, AAMDNodes V2AATag);
+ const Value *V2, uint64_t V2Size, AAMDNodes V2AATag,
+ const Value *O1 = nullptr, const Value *O2 = nullptr);
};
/// Analysis pass providing a never-invalidated alias analysis result.
class BasicAA : public AnalysisInfoMixin<BasicAA> {
friend AnalysisInfoMixin<BasicAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef BasicAAResult Result;
- BasicAAResult run(Function &F, AnalysisManager<Function> &AM);
+ BasicAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the BasicAAResult object.
diff --git a/include/llvm/Analysis/BlockFrequencyInfo.h b/include/llvm/Analysis/BlockFrequencyInfo.h
index 7d48dfc9121e..562041d11fa1 100644
--- a/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -61,6 +61,11 @@ public:
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getBlockProfileCount(const BasicBlock *BB) const;
+ /// \brief Returns the estimated profile count of \p Freq.
+ /// This uses the frequency \p Freq and multiplies it by
+ /// the enclosing function's count (if available) and returns the value.
+ Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
+
// Set the frequency of the given basic block.
void setBlockFreq(const BasicBlock *BB, uint64_t Freq);
@@ -85,14 +90,14 @@ public:
class BlockFrequencyAnalysis
: public AnalysisInfoMixin<BlockFrequencyAnalysis> {
friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef BlockFrequencyInfo Result;
/// \brief Run the analysis pass over a function and produce BFI.
- Result run(Function &F, AnalysisManager<Function> &AM);
+ Result run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c BlockFrequencyInfo results.
@@ -102,7 +107,7 @@ class BlockFrequencyPrinterPass
public:
explicit BlockFrequencyPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index 7ed06b1bb68f..3f4428d18740 100644
--- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -482,6 +482,8 @@ public:
BlockFrequency getBlockFreq(const BlockNode &Node) const;
Optional<uint64_t> getBlockProfileCount(const Function &F,
const BlockNode &Node) const;
+ Optional<uint64_t> getProfileCountFromFreq(const Function &F,
+ uint64_t Freq) const;
void setBlockFreq(const BlockNode &Node, uint64_t Freq);
@@ -925,6 +927,10 @@ public:
const BlockT *BB) const {
return BlockFrequencyInfoImplBase::getBlockProfileCount(F, getNode(BB));
}
+ Optional<uint64_t> getProfileCountFromFreq(const Function &F,
+ uint64_t Freq) const {
+ return BlockFrequencyInfoImplBase::getProfileCountFromFreq(F, Freq);
+ }
void setBlockFreq(const BlockT *BB, uint64_t Freq);
Scaled64 getFloatingBlockFreq(const BlockT *BB) const {
return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
@@ -1245,7 +1251,7 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
: DefaultDOTGraphTraits(isSimple) {}
typedef GraphTraits<BlockFrequencyInfoT *> GTraits;
- typedef typename GTraits::NodeType NodeType;
+ typedef typename GTraits::NodeRef NodeRef;
typedef typename GTraits::ChildIteratorType EdgeIter;
typedef typename GTraits::nodes_iterator NodeIter;
@@ -1254,8 +1260,7 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
return G->getFunction()->getName();
}
- std::string getNodeAttributes(const NodeType *Node,
- const BlockFrequencyInfoT *Graph,
+ std::string getNodeAttributes(NodeRef Node, const BlockFrequencyInfoT *Graph,
unsigned HotPercentThreshold = 0) {
std::string Result;
if (!HotPercentThreshold)
@@ -1266,9 +1271,9 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
for (NodeIter I = GTraits::nodes_begin(Graph),
E = GTraits::nodes_end(Graph);
I != E; ++I) {
- NodeType &N = *I;
+ NodeRef N = *I;
MaxFrequency =
- std::max(MaxFrequency, Graph->getBlockFreq(&N).getFrequency());
+ std::max(MaxFrequency, Graph->getBlockFreq(N).getFrequency());
}
}
BlockFrequency Freq = Graph->getBlockFreq(Node);
@@ -1285,8 +1290,8 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
return Result;
}
- std::string getNodeLabel(const NodeType *Node,
- const BlockFrequencyInfoT *Graph, GVDAGType GType) {
+ std::string getNodeLabel(NodeRef Node, const BlockFrequencyInfoT *Graph,
+ GVDAGType GType) {
std::string Result;
raw_string_ostream OS(Result);
@@ -1313,7 +1318,7 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
return Result;
}
- std::string getEdgeAttributes(const NodeType *Node, EdgeIter EI,
+ std::string getEdgeAttributes(NodeRef Node, EdgeIter EI,
const BlockFrequencyInfoT *BFI,
const BranchProbabilityInfoT *BPI,
unsigned HotPercentThreshold = 0) {
diff --git a/include/llvm/Analysis/BranchProbabilityInfo.h b/include/llvm/Analysis/BranchProbabilityInfo.h
index 6434ba962ebc..14b7a7f529f7 100644
--- a/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -178,14 +178,14 @@ private:
class BranchProbabilityAnalysis
: public AnalysisInfoMixin<BranchProbabilityAnalysis> {
friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef BranchProbabilityInfo Result;
/// \brief Run the analysis pass over a function and produce BPI.
- BranchProbabilityInfo run(Function &F, AnalysisManager<Function> &AM);
+ BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c BranchProbabilityAnalysis results.
@@ -195,7 +195,7 @@ class BranchProbabilityPrinterPass
public:
explicit BranchProbabilityPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
diff --git a/include/llvm/Analysis/CFGPrinter.h b/include/llvm/Analysis/CFGPrinter.h
index 035764837e6f..efaa9d6df8ea 100644
--- a/include/llvm/Analysis/CFGPrinter.h
+++ b/include/llvm/Analysis/CFGPrinter.h
@@ -7,6 +7,10 @@
//
//===----------------------------------------------------------------------===//
//
+// This file defines a 'dot-cfg' analysis pass, which emits the
+// cfg.<fnname>.dot file for each function in the program, with a graph of the
+// CFG for that function.
+//
// This file defines external functions that can be called to explicitly
// instantiate the CFG printer.
//
@@ -19,9 +23,34 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Support/GraphWriter.h"
namespace llvm {
+class CFGViewerPass
+ : public PassInfoMixin<CFGViewerPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class CFGOnlyViewerPass
+ : public PassInfoMixin<CFGOnlyViewerPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class CFGPrinterPass
+ : public PassInfoMixin<CFGPrinterPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class CFGOnlyPrinterPass
+ : public PassInfoMixin<CFGOnlyPrinterPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
template<>
struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
@@ -118,13 +147,42 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
}
return "";
}
+
+ /// Display the raw branch weights from PGO.
+ std::string getEdgeAttributes(const BasicBlock *Node, succ_const_iterator I,
+ const Function *F) {
+ const TerminatorInst *TI = Node->getTerminator();
+ if (TI->getNumSuccessors() == 1)
+ return "";
+
+ MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
+ if (!WeightsNode)
+ return "";
+
+ MDString *MDName = cast<MDString>(WeightsNode->getOperand(0));
+ if (MDName->getString() != "branch_weights")
+ return "";
+
+ unsigned OpNo = I.getSuccessorIndex() + 1;
+ if (OpNo >= WeightsNode->getNumOperands())
+ return "";
+ ConstantInt *Weight =
+ mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(OpNo));
+ if (!Weight)
+ return "";
+
+ // Prepend a 'W' to indicate that this is a weight rather than the actual
+ // profile count (due to scaling).
+ Twine Attrs = "label=\"W:" + Twine(Weight->getZExtValue()) + "\"";
+ return Attrs.str();
+ }
};
} // End llvm namespace
namespace llvm {
class FunctionPass;
- FunctionPass *createCFGPrinterPass ();
- FunctionPass *createCFGOnlyPrinterPass ();
+ FunctionPass *createCFGPrinterLegacyPassPass ();
+ FunctionPass *createCFGOnlyPrinterLegacyPassPass ();
} // End llvm namespace
#endif
diff --git a/include/llvm/Analysis/CFLAndersAliasAnalysis.h b/include/llvm/Analysis/CFLAndersAliasAnalysis.h
index 48eca888419a..f3520aa3fe82 100644
--- a/include/llvm/Analysis/CFLAndersAliasAnalysis.h
+++ b/include/llvm/Analysis/CFLAndersAliasAnalysis.h
@@ -42,7 +42,10 @@ public:
/// Handle invalidation events from the new pass manager.
/// By definition, this result is stateless and so remains valid.
- bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
+ return false;
+ }
/// Evict the given function from cache
void evict(const Function &Fn);
@@ -103,12 +106,12 @@ private:
/// in particular to leverage invalidation to trigger re-computation.
class CFLAndersAA : public AnalysisInfoMixin<CFLAndersAA> {
friend AnalysisInfoMixin<CFLAndersAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef CFLAndersAAResult Result;
- CFLAndersAAResult run(Function &F, AnalysisManager<Function> &AM);
+ CFLAndersAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the CFLAndersAAResult object.
diff --git a/include/llvm/Analysis/CFLSteensAliasAnalysis.h b/include/llvm/Analysis/CFLSteensAliasAnalysis.h
index 80a00d02b811..3aae9a1e9b2e 100644
--- a/include/llvm/Analysis/CFLSteensAliasAnalysis.h
+++ b/include/llvm/Analysis/CFLSteensAliasAnalysis.h
@@ -45,7 +45,10 @@ public:
/// Handle invalidation events from the new pass manager.
///
/// By definition, this result is stateless and so remains valid.
- bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
+ return false;
+ }
/// \brief Inserts the given Function into the cache.
void scan(Function *Fn);
@@ -81,16 +84,6 @@ public:
return QueryResult;
}
- /// Get the location associated with a pointer argument of a callsite.
- ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
-
- /// Returns the behavior when calling the given call site.
- FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
-
- /// Returns the behavior when calling the given function. For use when the
- /// call site is not known.
- FunctionModRefBehavior getModRefBehavior(const Function *F);
-
private:
struct FunctionHandle final : public CallbackVH {
FunctionHandle(Function *Fn, CFLSteensAAResult *Result)
@@ -132,12 +125,12 @@ private:
/// in particular to leverage invalidation to trigger re-computation of sets.
class CFLSteensAA : public AnalysisInfoMixin<CFLSteensAA> {
friend AnalysisInfoMixin<CFLSteensAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef CFLSteensAAResult Result;
- CFLSteensAAResult run(Function &F, AnalysisManager<Function> &AM);
+ CFLSteensAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the CFLSteensAAResult object.
diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h
index 3263ecec4e26..54ef1a688d37 100644
--- a/include/llvm/Analysis/CGSCCPassManager.h
+++ b/include/llvm/Analysis/CGSCCPassManager.h
@@ -11,49 +11,272 @@
/// This header provides classes for managing passes over SCCs of the call
/// graph. These passes form an important component of LLVM's interprocedural
/// optimizations. Because they operate on the SCCs of the call graph, and they
-/// traverse the graph in post order, they can effectively do pair-wise
-/// interprocedural optimizations for all call edges in the program. At each
-/// call site edge, the callee has already been optimized as much as is
-/// possible. This in turn allows very accurate analysis of it for IPO.
+/// traverse the graph in post-order, they can effectively do pair-wise
+/// interprocedural optimizations for all call edges in the program while
+/// incrementally refining it and improving the context of these pair-wise
+/// optimizations. At each call site edge, the callee has already been
+/// optimized as much as is possible. This in turn allows very accurate
+/// analysis of it for IPO.
+///
+/// A secondary more general goal is to be able to isolate optimization on
+/// unrelated parts of the IR module. This is useful to ensure our
+/// optimizations are principled and don't miss oportunities where refinement
+/// of one part of the module influence transformations in another part of the
+/// module. But this is also useful if we want to parallelize the optimizations
+/// across common large module graph shapes which tend to be very wide and have
+/// large regions of unrelated cliques.
+///
+/// To satisfy these goals, we use the LazyCallGraph which provides two graphs
+/// nested inside each other (and built lazily from the bottom-up): the call
+/// graph proper, and a reference graph. The reference graph is super set of
+/// the call graph and is a conservative approximation of what could through
+/// scalar or CGSCC transforms *become* the call graph. Using this allows us to
+/// ensure we optimize functions prior to them being introduced into the call
+/// graph by devirtualization or other technique, and thus ensures that
+/// subsequent pair-wise interprocedural optimizations observe the optimized
+/// form of these functions. The (potentially transitive) reference
+/// reachability used by the reference graph is a conservative approximation
+/// that still allows us to have independent regions of the graph.
+///
+/// FIXME: There is one major drawback of the reference graph: in its naive
+/// form it is quadratic because it contains a distinct edge for each
+/// (potentially indirect) reference, even if are all through some common
+/// global table of function pointers. This can be fixed in a number of ways
+/// that essentially preserve enough of the normalization. While it isn't
+/// expected to completely preclude the usability of this, it will need to be
+/// addressed.
+///
+///
+/// All of these issues are made substantially more complex in the face of
+/// mutations to the call graph while optimization passes are being run. When
+/// mutations to the call graph occur we want to achieve two different things:
+///
+/// - We need to update the call graph in-flight and invalidate analyses
+/// cached on entities in the graph. Because of the cache-based analysis
+/// design of the pass manager, it is essential to have stable identities for
+/// the elements of the IR that passes traverse, and to invalidate any
+/// analyses cached on these elements as the mutations take place.
+///
+/// - We want to preserve the incremental and post-order traversal of the
+/// graph even as it is refined and mutated. This means we want optimization
+/// to observe the most refined form of the call graph and to do so in
+/// post-order.
+///
+/// To address this, the CGSCC manager uses both worklists that can be expanded
+/// by passes which transform the IR, and provides invalidation tests to skip
+/// entries that become dead. This extra data is provided to every SCC pass so
+/// that it can carefully update the manager's traversal as the call graph
+/// mutates.
+///
+/// We also provide support for running function passes within the CGSCC walk,
+/// and there we provide automatic update of the call graph including of the
+/// pass manager to reflect call graph changes that fall out naturally as part
+/// of scalar transformations.
+///
+/// The patterns used to ensure the goals of post-order visitation of the fully
+/// refined graph:
+///
+/// 1) Sink toward the "bottom" as the graph is refined. This means that any
+/// iteration continues in some valid post-order sequence after the mutation
+/// has altered the structure.
+///
+/// 2) Enqueue in post-order, including the current entity. If the current
+/// entity's shape changes, it and everything after it in post-order needs
+/// to be visited to observe that shape.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
#define LLVM_ANALYSIS_CGSCCPASSMANAGER_H
+#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
namespace llvm {
-extern template class PassManager<LazyCallGraph::SCC>;
-/// \brief The CGSCC pass manager.
-///
-/// See the documentation for the PassManager template for details. It runs
-/// a sequency of SCC passes over each SCC that the manager is run over. This
-/// typedef serves as a convenient way to refer to this construct.
-typedef PassManager<LazyCallGraph::SCC> CGSCCPassManager;
+struct CGSCCUpdateResult;
-extern template class AnalysisManager<LazyCallGraph::SCC>;
+/// Extern template declaration for the analysis set for this IR unit.
+extern template class AllAnalysesOn<LazyCallGraph::SCC>;
+
+extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
/// \brief The CGSCC analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
-typedef AnalysisManager<LazyCallGraph::SCC> CGSCCAnalysisManager;
+typedef AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>
+ CGSCCAnalysisManager;
+
+// Explicit specialization and instantiation declarations for the pass manager.
+// See the comments on the definition of the specialization for details on how
+// it differs from the primary template.
+template <>
+PreservedAnalyses
+PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
+ CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
+ CGSCCAnalysisManager &AM,
+ LazyCallGraph &G, CGSCCUpdateResult &UR);
+extern template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
+ LazyCallGraph &, CGSCCUpdateResult &>;
+
+/// \brief The CGSCC pass manager.
+///
+/// See the documentation for the PassManager template for details. It runs
+/// a sequency of SCC passes over each SCC that the manager is run over. This
+/// typedef serves as a convenient way to refer to this construct.
+typedef PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
+ CGSCCUpdateResult &>
+ CGSCCPassManager;
+
+/// An explicit specialization of the require analysis template pass.
+template <typename AnalysisT>
+struct RequireAnalysisPass<AnalysisT, LazyCallGraph::SCC, CGSCCAnalysisManager,
+ LazyCallGraph &, CGSCCUpdateResult &>
+ : PassInfoMixin<RequireAnalysisPass<AnalysisT, LazyCallGraph::SCC,
+ CGSCCAnalysisManager, LazyCallGraph &,
+ CGSCCUpdateResult &>> {
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &) {
+ (void)AM.template getResult<AnalysisT>(C, CG);
+ return PreservedAnalyses::all();
+ }
+};
-extern template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
/// A proxy from a \c CGSCCAnalysisManager to a \c Module.
typedef InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>
CGSCCAnalysisManagerModuleProxy;
-extern template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
- LazyCallGraph::SCC>;
+/// We need a specialized result for the \c CGSCCAnalysisManagerModuleProxy so
+/// it can have access to the call graph in order to walk all the SCCs when
+/// invalidating things.
+template <> class CGSCCAnalysisManagerModuleProxy::Result {
+public:
+ explicit Result(CGSCCAnalysisManager &InnerAM, LazyCallGraph &G)
+ : InnerAM(&InnerAM), G(&G) {}
+
+ /// \brief Accessor for the analysis manager.
+ CGSCCAnalysisManager &getManager() { return *InnerAM; }
+
+ /// \brief Handler for invalidation of the Module.
+ ///
+ /// If the proxy analysis itself is preserved, then we assume that the set of
+ /// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the
+ /// CGSCCAnalysisManager are still valid, and we don't need to call \c clear
+ /// on the CGSCCAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c CGSCCAnalysisManager are potentially invalidated based
+ /// on the set of preserved analyses.
+ bool invalidate(Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv);
+
+private:
+ CGSCCAnalysisManager *InnerAM;
+ LazyCallGraph *G;
+};
+
+/// Provide a specialized run method for the \c CGSCCAnalysisManagerModuleProxy
+/// so it can pass the lazy call graph to the result.
+template <>
+CGSCCAnalysisManagerModuleProxy::Result
+CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM);
+
+// Ensure the \c CGSCCAnalysisManagerModuleProxy is provided as an extern
+// template.
+extern template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
+
+extern template class OuterAnalysisManagerProxy<
+ ModuleAnalysisManager, LazyCallGraph::SCC, LazyCallGraph &>;
/// A proxy from a \c ModuleAnalysisManager to an \c SCC.
-typedef OuterAnalysisManagerProxy<ModuleAnalysisManager, LazyCallGraph::SCC>
+typedef OuterAnalysisManagerProxy<ModuleAnalysisManager, LazyCallGraph::SCC,
+ LazyCallGraph &>
ModuleAnalysisManagerCGSCCProxy;
+/// Support structure for SCC passes to communicate updates the call graph back
+/// to the CGSCC pass manager infrsatructure.
+///
+/// The CGSCC pass manager runs SCC passes which are allowed to update the call
+/// graph and SCC structures. This means the structure the pass manager works
+/// on is mutating underneath it. In order to support that, there needs to be
+/// careful communication about the precise nature and ramifications of these
+/// updates to the pass management infrastructure.
+///
+/// All SCC passes will have to accept a reference to the management layer's
+/// update result struct and use it to reflect the results of any CG updates
+/// performed.
+///
+/// Passes which do not change the call graph structure in any way can just
+/// ignore this argument to their run method.
+struct CGSCCUpdateResult {
+ /// Worklist of the RefSCCs queued for processing.
+ ///
+ /// When a pass refines the graph and creates new RefSCCs or causes them to
+ /// have a different shape or set of component SCCs it should add the RefSCCs
+ /// to this worklist so that we visit them in the refined form.
+ ///
+ /// This worklist is in reverse post-order, as we pop off the back in order
+ /// to observe RefSCCs in post-order. When adding RefSCCs, clients should add
+ /// them in reverse post-order.
+ SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> &RCWorklist;
+
+ /// Worklist of the SCCs queued for processing.
+ ///
+ /// When a pass refines the graph and creates new SCCs or causes them to have
+ /// a different shape or set of component functions it should add the SCCs to
+ /// this worklist so that we visit them in the refined form.
+ ///
+ /// Note that if the SCCs are part of a RefSCC that is added to the \c
+ /// RCWorklist, they don't need to be added here as visiting the RefSCC will
+ /// be sufficient to re-visit the SCCs within it.
+ ///
+ /// This worklist is in reverse post-order, as we pop off the back in order
+ /// to observe SCCs in post-order. When adding SCCs, clients should add them
+ /// in reverse post-order.
+ SmallPriorityWorklist<LazyCallGraph::SCC *, 1> &CWorklist;
+
+ /// The set of invalidated RefSCCs which should be skipped if they are found
+ /// in \c RCWorklist.
+ ///
+ /// This is used to quickly prune out RefSCCs when they get deleted and
+ /// happen to already be on the worklist. We use this primarily to avoid
+ /// scanning the list and removing entries from it.
+ SmallPtrSetImpl<LazyCallGraph::RefSCC *> &InvalidatedRefSCCs;
+
+ /// The set of invalidated SCCs which should be skipped if they are found
+ /// in \c CWorklist.
+ ///
+ /// This is used to quickly prune out SCCs when they get deleted and happen
+ /// to already be on the worklist. We use this primarily to avoid scanning
+ /// the list and removing entries from it.
+ SmallPtrSetImpl<LazyCallGraph::SCC *> &InvalidatedSCCs;
+
+ /// If non-null, the updated current \c RefSCC being processed.
+ ///
+ /// This is set when a graph refinement takes place an the "current" point in
+ /// the graph moves "down" or earlier in the post-order walk. This will often
+ /// cause the "current" RefSCC to be a newly created RefSCC object and the
+ /// old one to be added to the above worklist. When that happens, this
+ /// pointer is non-null and can be used to continue processing the "top" of
+ /// the post-order walk.
+ LazyCallGraph::RefSCC *UpdatedRC;
+
+ /// If non-null, the updated current \c SCC being processed.
+ ///
+ /// This is set when a graph refinement takes place an the "current" point in
+ /// the graph moves "down" or earlier in the post-order walk. This will often
+ /// cause the "current" SCC to be a newly created SCC object and the old one
+ /// to be added to the above worklist. When that happens, this pointer is
+ /// non-null and can be used to continue processing the "top" of the
+ /// post-order walk.
+ LazyCallGraph::SCC *UpdatedC;
+};
+
/// \brief The core module pass which does a post-order walk of the SCCs and
/// runs a CGSCC pass over each one.
///
@@ -97,35 +320,129 @@ public:
// Get the call graph for this module.
LazyCallGraph &CG = AM.getResult<LazyCallGraphAnalysis>(M);
+ // We keep worklists to allow us to push more work onto the pass manager as
+ // the passes are run.
+ SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> RCWorklist;
+ SmallPriorityWorklist<LazyCallGraph::SCC *, 1> CWorklist;
+
+ // Keep sets for invalidated SCCs and RefSCCs that should be skipped when
+ // iterating off the worklists.
+ SmallPtrSet<LazyCallGraph::RefSCC *, 4> InvalidRefSCCSet;
+ SmallPtrSet<LazyCallGraph::SCC *, 4> InvalidSCCSet;
+
+ CGSCCUpdateResult UR = {RCWorklist, CWorklist, InvalidRefSCCSet,
+ InvalidSCCSet, nullptr, nullptr};
+
PreservedAnalyses PA = PreservedAnalyses::all();
- for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs()) {
- if (DebugLogging)
- dbgs() << "Running an SCC pass across the RefSCC: " << RC << "\n";
-
- for (LazyCallGraph::SCC &C : RC) {
- PreservedAnalyses PassPA = Pass.run(C, CGAM);
-
- // We know that the CGSCC pass couldn't have invalidated any other
- // SCC's analyses (that's the contract of a CGSCC pass), so
- // directly handle the CGSCC analysis manager's invalidation here. We
- // also update the preserved set of analyses to reflect that invalidated
- // analyses are now safe to preserve.
- // FIXME: This isn't quite correct. We need to handle the case where the
- // pass updated the CG, particularly some child of the current SCC, and
- // invalidate its analyses.
- PassPA = CGAM.invalidate(C, std::move(PassPA));
-
- // Then intersect the preserved set so that invalidation of module
- // analyses will eventually occur when the module pass completes.
- PA.intersect(std::move(PassPA));
- }
+ for (auto RCI = CG.postorder_ref_scc_begin(),
+ RCE = CG.postorder_ref_scc_end();
+ RCI != RCE;) {
+ assert(RCWorklist.empty() &&
+ "Should always start with an empty RefSCC worklist");
+ // The postorder_ref_sccs range we are walking is lazily constructed, so
+ // we only push the first one onto the worklist. The worklist allows us
+ // to capture *new* RefSCCs created during transformations.
+ //
+ // We really want to form RefSCCs lazily because that makes them cheaper
+ // to update as the program is simplified and allows us to have greater
+ // cache locality as forming a RefSCC touches all the parts of all the
+ // functions within that RefSCC.
+ //
+ // We also eagerly increment the iterator to the next position because
+ // the CGSCC passes below may delete the current RefSCC.
+ RCWorklist.insert(&*RCI++);
+
+ do {
+ LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
+ if (InvalidRefSCCSet.count(RC)) {
+ if (DebugLogging)
+ dbgs() << "Skipping an invalid RefSCC...\n";
+ continue;
+ }
+
+ assert(CWorklist.empty() &&
+ "Should always start with an empty SCC worklist");
+
+ if (DebugLogging)
+ dbgs() << "Running an SCC pass across the RefSCC: " << *RC << "\n";
+
+ // Push the initial SCCs in reverse post-order as we'll pop off the the
+ // back and so see this in post-order.
+ for (LazyCallGraph::SCC &C : reverse(*RC))
+ CWorklist.insert(&C);
+
+ do {
+ LazyCallGraph::SCC *C = CWorklist.pop_back_val();
+ // Due to call graph mutations, we may have invalid SCCs or SCCs from
+ // other RefSCCs in the worklist. The invalid ones are dead and the
+ // other RefSCCs should be queued above, so we just need to skip both
+ // scenarios here.
+ if (InvalidSCCSet.count(C)) {
+ if (DebugLogging)
+ dbgs() << "Skipping an invalid SCC...\n";
+ continue;
+ }
+ if (&C->getOuterRefSCC() != RC) {
+ if (DebugLogging)
+ dbgs() << "Skipping an SCC that is now part of some other "
+ "RefSCC...\n";
+ continue;
+ }
+
+ do {
+ // Check that we didn't miss any update scenario.
+ assert(!InvalidSCCSet.count(C) && "Processing an invalid SCC!");
+ assert(C->begin() != C->end() && "Cannot have an empty SCC!");
+ assert(&C->getOuterRefSCC() == RC &&
+ "Processing an SCC in a different RefSCC!");
+
+ UR.UpdatedRC = nullptr;
+ UR.UpdatedC = nullptr;
+ PreservedAnalyses PassPA = Pass.run(*C, CGAM, CG, UR);
+
+ // We handle invalidating the CGSCC analysis manager's information
+ // for the (potentially updated) SCC here. Note that any other SCCs
+ // whose structure has changed should have been invalidated by
+ // whatever was updating the call graph. This SCC gets invalidated
+ // late as it contains the nodes that were actively being
+ // processed.
+ CGAM.invalidate(*(UR.UpdatedC ? UR.UpdatedC : C), PassPA);
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+
+ // The pass may have restructured the call graph and refined the
+ // current SCC and/or RefSCC. We need to update our current SCC and
+ // RefSCC pointers to follow these. Also, when the current SCC is
+ // refined, re-run the SCC pass over the newly refined SCC in order
+ // to observe the most precise SCC model available. This inherently
+ // cannot cycle excessively as it only happens when we split SCCs
+ // apart, at most converging on a DAG of single nodes.
+ // FIXME: If we ever start having RefSCC passes, we'll want to
+ // iterate there too.
+ RC = UR.UpdatedRC ? UR.UpdatedRC : RC;
+ C = UR.UpdatedC ? UR.UpdatedC : C;
+ if (DebugLogging && UR.UpdatedC)
+ dbgs() << "Re-running SCC passes after a refinement of the "
+ "current SCC: "
+ << *UR.UpdatedC << "\n";
+
+ // Note that both `C` and `RC` may at this point refer to deleted,
+ // invalid SCC and RefSCCs respectively. But we will short circuit
+ // the processing when we check them in the loop above.
+ } while (UR.UpdatedC);
+
+ } while (!CWorklist.empty());
+ } while (!RCWorklist.empty());
}
- // By definition we preserve the proxy. This precludes *any* invalidation
- // of CGSCC analyses by the proxy, but that's OK because we've taken
- // care to invalidate analyses in the CGSCC analysis manager
- // incrementally above.
+ // By definition we preserve the call garph, all SCC analyses, and the
+ // analysis proxies by handling them above and in any nested pass managers.
+ PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
+ PA.preserve<LazyCallGraphAnalysis>();
PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
return PA;
}
@@ -142,17 +459,54 @@ createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass, bool DebugLogging = fal
return ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>(std::move(Pass), DebugLogging);
}
-extern template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
- LazyCallGraph::SCC>;
/// A proxy from a \c FunctionAnalysisManager to an \c SCC.
-typedef InnerAnalysisManagerProxy<FunctionAnalysisManager, LazyCallGraph::SCC>
- FunctionAnalysisManagerCGSCCProxy;
+///
+/// When a module pass runs and triggers invalidation, both the CGSCC and
+/// Function analysis manager proxies on the module get an invalidation event.
+/// We don't want to fully duplicate responsibility for most of the
+/// invalidation logic. Instead, this layer is only responsible for SCC-local
+/// invalidation events. We work with the module's FunctionAnalysisManager to
+/// invalidate function analyses.
+class FunctionAnalysisManagerCGSCCProxy
+ : public AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy> {
+public:
+ class Result {
+ public:
+ explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+
+ /// \brief Accessor for the analysis manager.
+ FunctionAnalysisManager &getManager() { return *FAM; }
+
+ bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
+ CGSCCAnalysisManager::Invalidator &Inv);
+
+ private:
+ FunctionAnalysisManager *FAM;
+ };
+
+ /// Computes the \c FunctionAnalysisManager and stores it in the result proxy.
+ Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &);
+
+private:
+ friend AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy>;
+ static AnalysisKey Key;
+};
extern template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
/// A proxy from a \c CGSCCAnalysisManager to a \c Function.
typedef OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>
CGSCCAnalysisManagerFunctionProxy;
+/// Helper to update the call graph after running a function pass.
+///
+/// Function passes can only mutate the call graph in specific ways. This
+/// routine provides a helper that updates the call graph in those ways
+/// including returning whether any changes were made and populating a CG
+/// update result struct for the overall CGSCC walk.
+LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
+ LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
+ CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, bool DebugLogging = false);
+
/// \brief Adaptor that maps from a SCC to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
@@ -185,37 +539,61 @@ public:
}
/// \brief Runs the function pass across every function in the module.
- PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM) {
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR) {
// Setup the function analysis manager from its proxy.
FunctionAnalysisManager &FAM =
- AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager();
+ AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
+
+ SmallVector<LazyCallGraph::Node *, 4> Nodes;
+ for (LazyCallGraph::Node &N : C)
+ Nodes.push_back(&N);
+
+ // The SCC may get split while we are optimizing functions due to deleting
+ // edges. If this happens, the current SCC can shift, so keep track of
+ // a pointer we can overwrite.
+ LazyCallGraph::SCC *CurrentC = &C;
if (DebugLogging)
dbgs() << "Running function passes across an SCC: " << C << "\n";
PreservedAnalyses PA = PreservedAnalyses::all();
- for (LazyCallGraph::Node &N : C) {
- PreservedAnalyses PassPA = Pass.run(N.getFunction(), FAM);
+ for (LazyCallGraph::Node *N : Nodes) {
+ // Skip nodes from other SCCs. These may have been split out during
+ // processing. We'll eventually visit those SCCs and pick up the nodes
+ // there.
+ if (CG.lookupSCC(*N) != CurrentC)
+ continue;
+
+ PreservedAnalyses PassPA = Pass.run(N->getFunction(), FAM);
// We know that the function pass couldn't have invalidated any other
// function's analyses (that's the contract of a function pass), so
// directly handle the function analysis manager's invalidation here.
- // Also, update the preserved analyses to reflect that once invalidated
- // these can again be preserved.
- PassPA = FAM.invalidate(N.getFunction(), std::move(PassPA));
+ FAM.invalidate(N->getFunction(), PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
+
+ // Update the call graph based on this function pass. This may also
+ // update the current SCC to point to a smaller, more refined SCC.
+ CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
+ CG, *CurrentC, *N, AM, UR, DebugLogging);
+ assert(CG.lookupSCC(*N) == CurrentC &&
+ "Current SCC not updated to the SCC containing the current node!");
}
- // By definition we preserve the proxy. This precludes *any* invalidation
- // of function analyses by the proxy, but that's OK because we've taken
- // care to invalidate analyses in the function analysis manager
- // incrementally above.
- // FIXME: We need to update the call graph here to account for any deleted
- // edges!
+ // By definition we preserve the proxy. And we preserve all analyses on
+ // Functions. This precludes *any* invalidation of function analyses by the
+ // proxy, but that's OK because we've taken care to invalidate analyses in
+ // the function analysis manager incrementally above.
+ PA.preserveSet<AllAnalysesOn<Function>>();
PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+
+ // We've also ensured that we updated the call graph along the way.
+ PA.preserve<LazyCallGraphAnalysis>();
+
return PA;
}
@@ -232,6 +610,185 @@ createCGSCCToFunctionPassAdaptor(FunctionPassT Pass, bool DebugLogging = false)
return CGSCCToFunctionPassAdaptor<FunctionPassT>(std::move(Pass),
DebugLogging);
}
+
+/// A helper that repeats an SCC pass each time an indirect call is refined to
+/// a direct call by that pass.
+///
+/// While the CGSCC pass manager works to re-visit SCCs and RefSCCs as they
+/// change shape, we may also want to repeat an SCC pass if it simply refines
+/// an indirect call to a direct call, even if doing so does not alter the
+/// shape of the graph. Note that this only pertains to direct calls to
+/// functions where IPO across the SCC may be able to compute more precise
+/// results. For intrinsics, we assume scalar optimizations already can fully
+/// reason about them.
+///
+/// This repetition has the potential to be very large however, as each one
+/// might refine a single call site. As a consequence, in practice we use an
+/// upper bound on the number of repetitions to limit things.
+template <typename PassT>
+class DevirtSCCRepeatedPass
+ : public PassInfoMixin<DevirtSCCRepeatedPass<PassT>> {
+public:
+ explicit DevirtSCCRepeatedPass(PassT Pass, int MaxIterations,
+ bool DebugLogging = false)
+ : Pass(std::move(Pass)), MaxIterations(MaxIterations),
+ DebugLogging(DebugLogging) {}
+
+ /// Runs the wrapped pass up to \c MaxIterations on the SCC, iterating
+ /// whenever an indirect call is refined.
+ PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+ PreservedAnalyses PA = PreservedAnalyses::all();
+
+ // The SCC may be refined while we are running passes over it, so set up
+ // a pointer that we can update.
+ LazyCallGraph::SCC *C = &InitialC;
+
+ // Collect value handles for all of the indirect call sites.
+ SmallVector<WeakVH, 8> CallHandles;
+
+ // Struct to track the counts of direct and indirect calls in each function
+ // of the SCC.
+ struct CallCount {
+ int Direct;
+ int Indirect;
+ };
+
+ // Put value handles on all of the indirect calls and return the number of
+ // direct calls for each function in the SCC.
+ auto ScanSCC = [](LazyCallGraph::SCC &C,
+ SmallVectorImpl<WeakVH> &CallHandles) {
+ assert(CallHandles.empty() && "Must start with a clear set of handles.");
+
+ SmallVector<CallCount, 4> CallCounts;
+ for (LazyCallGraph::Node &N : C) {
+ CallCounts.push_back({0, 0});
+ CallCount &Count = CallCounts.back();
+ for (Instruction &I : instructions(N.getFunction()))
+ if (auto CS = CallSite(&I)) {
+ if (CS.getCalledFunction()) {
+ ++Count.Direct;
+ } else {
+ ++Count.Indirect;
+ CallHandles.push_back(WeakVH(&I));
+ }
+ }
+ }
+
+ return CallCounts;
+ };
+
+ // Populate the initial call handles and get the initial call counts.
+ auto CallCounts = ScanSCC(*C, CallHandles);
+
+ for (int Iteration = 0;; ++Iteration) {
+ PreservedAnalyses PassPA = Pass.run(*C, AM, CG, UR);
+
+ // If the SCC structure has changed, bail immediately and let the outer
+ // CGSCC layer handle any iteration to reflect the refined structure.
+ if (UR.UpdatedC && UR.UpdatedC != C) {
+ PA.intersect(std::move(PassPA));
+ break;
+ }
+
+ // Check that we didn't miss any update scenario.
+ assert(!UR.InvalidatedSCCs.count(C) && "Processing an invalid SCC!");
+ assert(C->begin() != C->end() && "Cannot have an empty SCC!");
+ assert((int)CallCounts.size() == C->size() &&
+ "Cannot have changed the size of the SCC!");
+
+ // Check whether any of the handles were devirtualized.
+ auto IsDevirtualizedHandle = [&](WeakVH &CallH) {
+ if (!CallH)
+ return false;
+ auto CS = CallSite(CallH);
+ if (!CS)
+ return false;
+
+ // If the call is still indirect, leave it alone.
+ Function *F = CS.getCalledFunction();
+ if (!F)
+ return false;
+
+ if (DebugLogging)
+ dbgs() << "Found devirutalized call from "
+ << CS.getParent()->getParent()->getName() << " to "
+ << F->getName() << "\n";
+
+ // We now have a direct call where previously we had an indirect call,
+ // so iterate to process this devirtualization site.
+ return true;
+ };
+ bool Devirt = any_of(CallHandles, IsDevirtualizedHandle);
+
+ // Rescan to build up a new set of handles and count how many direct
+ // calls remain. If we decide to iterate, this also sets up the input to
+ // the next iteration.
+ CallHandles.clear();
+ auto NewCallCounts = ScanSCC(*C, CallHandles);
+
+ // If we haven't found an explicit devirtualization already see if we
+ // have decreased the number of indirect calls and increased the number
+ // of direct calls for any function in the SCC. This can be fooled by all
+ // manner of transformations such as DCE and other things, but seems to
+ // work well in practice.
+ if (!Devirt)
+ for (int i = 0, Size = C->size(); i < Size; ++i)
+ if (CallCounts[i].Indirect > NewCallCounts[i].Indirect &&
+ CallCounts[i].Direct < NewCallCounts[i].Direct) {
+ Devirt = true;
+ break;
+ }
+
+ if (!Devirt) {
+ PA.intersect(std::move(PassPA));
+ break;
+ }
+
+ // Otherwise, if we've already hit our max, we're done.
+ if (Iteration >= MaxIterations) {
+ if (DebugLogging)
+ dbgs() << "Found another devirtualization after hitting the max "
+ "number of repetitions ("
+ << MaxIterations << ") on SCC: " << *C << "\n";
+ PA.intersect(std::move(PassPA));
+ break;
+ }
+
+ if (DebugLogging)
+ dbgs() << "Repeating an SCC pass after finding a devirtualization in: "
+ << *C << "\n";
+
+ // Move over the new call counts in preparation for iterating.
+ CallCounts = std::move(NewCallCounts);
+
+ // Update the analysis manager with each run and intersect the total set
+ // of preserved analyses so we're ready to iterate.
+ AM.invalidate(*C, PassPA);
+ PA.intersect(std::move(PassPA));
+ }
+
+ // Note that we don't add any preserved entries here unlike a more normal
+ // "pass manager" because we only handle invalidation *between* iterations,
+ // not after the last iteration.
+ return PA;
+ }
+
+private:
+ PassT Pass;
+ int MaxIterations;
+ bool DebugLogging;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename PassT>
+DevirtSCCRepeatedPass<PassT>
+createDevirtSCCRepeatedPass(PassT Pass, int MaxIterations,
+ bool DebugLogging = false) {
+ return DevirtSCCRepeatedPass<PassT>(std::move(Pass), MaxIterations,
+ DebugLogging);
+}
}
#endif
diff --git a/include/llvm/Analysis/CallGraph.h b/include/llvm/Analysis/CallGraph.h
index f37e843fed5e..4ecbaa75ac75 100644
--- a/include/llvm/Analysis/CallGraph.h
+++ b/include/llvm/Analysis/CallGraph.h
@@ -297,7 +297,7 @@ private:
/// resulting data.
class CallGraphAnalysis : public AnalysisInfoMixin<CallGraphAnalysis> {
friend AnalysisInfoMixin<CallGraphAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief A formulaic typedef to inform clients of the result type.
@@ -315,7 +315,7 @@ class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
public:
explicit CallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
@@ -409,94 +409,87 @@ public:
// Provide graph traits for tranversing call graphs using standard graph
// traversals.
template <> struct GraphTraits<CallGraphNode *> {
- typedef CallGraphNode NodeType;
typedef CallGraphNode *NodeRef;
typedef CallGraphNode::CallRecord CGNPairTy;
- typedef std::pointer_to_unary_function<CGNPairTy, CallGraphNode *>
- CGNDerefFun;
- static NodeType *getEntryNode(CallGraphNode *CGN) { return CGN; }
+ static NodeRef getEntryNode(CallGraphNode *CGN) { return CGN; }
- typedef mapped_iterator<NodeType::iterator, CGNDerefFun> ChildIteratorType;
+ static CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ typedef mapped_iterator<CallGraphNode::iterator, decltype(&CGNGetValue)>
+ ChildIteratorType;
+
+ static ChildIteratorType child_begin(NodeRef N) {
+ return ChildIteratorType(N->begin(), &CGNGetValue);
}
- static inline ChildIteratorType child_end(NodeType *N) {
- return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ static ChildIteratorType child_end(NodeRef N) {
+ return ChildIteratorType(N->end(), &CGNGetValue);
}
-
- static CallGraphNode *CGNDeref(CGNPairTy P) { return P.second; }
};
template <> struct GraphTraits<const CallGraphNode *> {
- typedef const CallGraphNode NodeType;
typedef const CallGraphNode *NodeRef;
typedef CallGraphNode::CallRecord CGNPairTy;
- typedef std::pointer_to_unary_function<CGNPairTy, const CallGraphNode *>
- CGNDerefFun;
- static NodeType *getEntryNode(const CallGraphNode *CGN) { return CGN; }
+ static NodeRef getEntryNode(const CallGraphNode *CGN) { return CGN; }
- typedef mapped_iterator<NodeType::const_iterator, CGNDerefFun>
+ static const CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
+
+ typedef mapped_iterator<CallGraphNode::const_iterator, decltype(&CGNGetValue)>
ChildIteratorType;
- static inline ChildIteratorType child_begin(NodeType *N) {
- return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ static ChildIteratorType child_begin(NodeRef N) {
+ return ChildIteratorType(N->begin(), &CGNGetValue);
}
- static inline ChildIteratorType child_end(NodeType *N) {
- return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ static ChildIteratorType child_end(NodeRef N) {
+ return ChildIteratorType(N->end(), &CGNGetValue);
}
-
- static const CallGraphNode *CGNDeref(CGNPairTy P) { return P.second; }
};
template <>
struct GraphTraits<CallGraph *> : public GraphTraits<CallGraphNode *> {
- static NodeType *getEntryNode(CallGraph *CGN) {
+ static NodeRef getEntryNode(CallGraph *CGN) {
return CGN->getExternalCallingNode(); // Start at the external node!
}
typedef std::pair<const Function *const, std::unique_ptr<CallGraphNode>>
PairTy;
- typedef std::pointer_to_unary_function<const PairTy &, CallGraphNode &>
- DerefFun;
+ static CallGraphNode *CGGetValuePtr(const PairTy &P) {
+ return P.second.get();
+ }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef mapped_iterator<CallGraph::iterator, DerefFun> nodes_iterator;
+ typedef mapped_iterator<CallGraph::iterator, decltype(&CGGetValuePtr)>
+ nodes_iterator;
static nodes_iterator nodes_begin(CallGraph *CG) {
- return map_iterator(CG->begin(), DerefFun(CGdereference));
+ return nodes_iterator(CG->begin(), &CGGetValuePtr);
}
static nodes_iterator nodes_end(CallGraph *CG) {
- return map_iterator(CG->end(), DerefFun(CGdereference));
+ return nodes_iterator(CG->end(), &CGGetValuePtr);
}
-
- static CallGraphNode &CGdereference(const PairTy &P) { return *P.second; }
};
template <>
struct GraphTraits<const CallGraph *> : public GraphTraits<
const CallGraphNode *> {
- static NodeType *getEntryNode(const CallGraph *CGN) {
+ static NodeRef getEntryNode(const CallGraph *CGN) {
return CGN->getExternalCallingNode(); // Start at the external node!
}
typedef std::pair<const Function *const, std::unique_ptr<CallGraphNode>>
PairTy;
- typedef std::pointer_to_unary_function<const PairTy &, const CallGraphNode &>
- DerefFun;
+ static const CallGraphNode *CGGetValuePtr(const PairTy &P) {
+ return P.second.get();
+ }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef mapped_iterator<CallGraph::const_iterator, DerefFun> nodes_iterator;
+ typedef mapped_iterator<CallGraph::const_iterator, decltype(&CGGetValuePtr)>
+ nodes_iterator;
static nodes_iterator nodes_begin(const CallGraph *CG) {
- return map_iterator(CG->begin(), DerefFun(CGdereference));
+ return nodes_iterator(CG->begin(), &CGGetValuePtr);
}
static nodes_iterator nodes_end(const CallGraph *CG) {
- return map_iterator(CG->end(), DerefFun(CGdereference));
- }
-
- static const CallGraphNode &CGdereference(const PairTy &P) {
- return *P.second;
+ return nodes_iterator(CG->end(), &CGGetValuePtr);
}
};
diff --git a/include/llvm/Analysis/CallGraphSCCPass.h b/include/llvm/Analysis/CallGraphSCCPass.h
index cb35b3292be7..f86f64bbb67d 100644
--- a/include/llvm/Analysis/CallGraphSCCPass.h
+++ b/include/llvm/Analysis/CallGraphSCCPass.h
@@ -94,8 +94,8 @@ class CallGraphSCC {
public:
CallGraphSCC(CallGraph &cg, void *context) : CG(cg), Context(context) {}
- void initialize(CallGraphNode *const *I, CallGraphNode *const *E) {
- Nodes.assign(I, E);
+ void initialize(ArrayRef<CallGraphNode *> NewNodes) {
+ Nodes.assign(NewNodes.begin(), NewNodes.end());
}
bool isSingular() const { return Nodes.size() == 1; }
diff --git a/include/llvm/Analysis/CodeMetrics.h b/include/llvm/Analysis/CodeMetrics.h
index f512aca57865..9e861ac18825 100644
--- a/include/llvm/Analysis/CodeMetrics.h
+++ b/include/llvm/Analysis/CodeMetrics.h
@@ -87,7 +87,7 @@ struct CodeMetrics {
/// \brief Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
- SmallPtrSetImpl<const Value*> &EphValues);
+ const SmallPtrSetImpl<const Value*> &EphValues);
/// \brief Collect a loop's ephemeral values (those used only by an assume
/// or similar intrinsics in the loop).
diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h
index b1504004d83c..517842c8b0dc 100644
--- a/include/llvm/Analysis/ConstantFolding.h
+++ b/include/llvm/Analysis/ConstantFolding.h
@@ -23,8 +23,10 @@
namespace llvm {
class APInt;
template <typename T> class ArrayRef;
+class CallSite;
class Constant;
class ConstantExpr;
+class ConstantVector;
class DataLayout;
class Function;
class GlobalValue;
@@ -45,11 +47,10 @@ bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
Constant *ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
-/// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified DataLayout. If successful, the constant result is
-/// result is returned, if not, null is returned.
-Constant *
-ConstantFoldConstantExpression(const ConstantExpr *CE, const DataLayout &DL,
+/// ConstantFoldConstant - Attempt to fold the constant using the
+/// specified DataLayout.
+/// If successful, the constant result is returned, if not, null is returned.
+Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@@ -62,19 +63,6 @@ Constant *ConstantFoldInstOperands(Instruction *I, ArrayRef<Constant *> Ops,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
-/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
-/// specified operands. If successful, the constant result is returned, if not,
-/// null is returned. Note that this function can fail when attempting to
-/// fold instructions like loads and stores, which have no constant expression
-/// form.
-///
-/// This function doesn't work for compares (use ConstantFoldCompareInstOperands
-/// for this) and GEPs.
-Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
- ArrayRef<Constant *> Ops,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr);
-
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
/// returns a constant expression of the specified operands.
@@ -137,6 +125,10 @@ bool canConstantFoldCallTo(const Function *F);
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI = nullptr);
+
+/// \brief Check whether the given call has no side-effects.
+/// Specifically checks for math routimes which sometimes set errno.
+bool isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI);
}
#endif
diff --git a/include/llvm/Analysis/DemandedBits.h b/include/llvm/Analysis/DemandedBits.h
index fafd5d00b481..c603274a7161 100644
--- a/include/llvm/Analysis/DemandedBits.h
+++ b/include/llvm/Analysis/DemandedBits.h
@@ -89,7 +89,7 @@ public:
/// An analysis that produces \c DemandedBits for a function.
class DemandedBitsAnalysis : public AnalysisInfoMixin<DemandedBitsAnalysis> {
friend AnalysisInfoMixin<DemandedBitsAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
@@ -97,7 +97,7 @@ public:
/// \brief Run the analysis pass over a function and produce demanded bits
/// information.
- DemandedBits run(Function &F, AnalysisManager<Function> &AM);
+ DemandedBits run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for DemandedBits
@@ -106,7 +106,7 @@ class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
public:
explicit DemandedBitsPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// Create a demanded bits analysis pass.
diff --git a/include/llvm/Analysis/DependenceAnalysis.h b/include/llvm/Analysis/DependenceAnalysis.h
index 32dd367a9c0a..90f33b8c42e5 100644
--- a/include/llvm/Analysis/DependenceAnalysis.h
+++ b/include/llvm/Analysis/DependenceAnalysis.h
@@ -70,13 +70,8 @@ template <typename T> class ArrayRef;
/// itelf.
class Dependence {
protected:
- Dependence(const Dependence &) = default;
-
- // FIXME: When we move to MSVC 2015 as the base compiler for Visual Studio
- // support, uncomment this line to allow a defaulted move constructor for
- // Dependence. Currently, FullDependence relies on the copy constructor, but
- // that is acceptable given the triviality of the class.
- // Dependence(Dependence &&) = default;
+ Dependence(Dependence &&) = default;
+ Dependence &operator=(Dependence &&) = default;
public:
Dependence(Instruction *Source,
@@ -222,11 +217,6 @@ template <typename T> class ArrayRef;
FullDependence(Instruction *Src, Instruction *Dst, bool LoopIndependent,
unsigned Levels);
- FullDependence(FullDependence &&RHS)
- : Dependence(std::move(RHS)), Levels(RHS.Levels),
- LoopIndependent(RHS.LoopIndependent), Consistent(RHS.Consistent),
- DV(std::move(RHS.DV)) {}
-
/// isLoopIndependent - Returns true if this is a loop-independent
/// dependence.
bool isLoopIndependent() const override { return LoopIndependent; }
@@ -931,7 +921,7 @@ template <typename T> class ArrayRef;
Result run(Function &F, FunctionAnalysisManager &FAM);
private:
- static char PassID;
+ static AnalysisKey Key;
friend struct AnalysisInfoMixin<DependenceAnalysis>;
}; // class DependenceAnalysis
diff --git a/include/llvm/Analysis/DominanceFrontier.h b/include/llvm/Analysis/DominanceFrontier.h
index 79672e4e4225..b9667f801ed3 100644
--- a/include/llvm/Analysis/DominanceFrontier.h
+++ b/include/llvm/Analysis/DominanceFrontier.h
@@ -171,14 +171,14 @@ extern template class ForwardDominanceFrontierBase<BasicBlock>;
class DominanceFrontierAnalysis
: public AnalysisInfoMixin<DominanceFrontierAnalysis> {
friend AnalysisInfoMixin<DominanceFrontierAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef DominanceFrontier Result;
/// \brief Run the analysis pass over a function and produce a dominator tree.
- DominanceFrontier run(Function &F, AnalysisManager<Function> &AM);
+ DominanceFrontier run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c DominanceFrontier.
@@ -188,7 +188,7 @@ class DominanceFrontierPrinterPass
public:
explicit DominanceFrontierPrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // End llvm namespace
diff --git a/include/llvm/Analysis/EHPersonalities.h b/include/llvm/Analysis/EHPersonalities.h
index a26c575cfe10..2c45ab4693e6 100644
--- a/include/llvm/Analysis/EHPersonalities.h
+++ b/include/llvm/Analysis/EHPersonalities.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -39,6 +40,10 @@ enum class EHPersonality {
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);
+StringRef getEHPersonalityName(EHPersonality Pers);
+
+EHPersonality getDefaultEHPersonality(const Triple &T);
+
/// \brief Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
diff --git a/include/llvm/Analysis/GlobalsModRef.h b/include/llvm/Analysis/GlobalsModRef.h
index 4c0a98949778..09cef68ce70f 100644
--- a/include/llvm/Analysis/GlobalsModRef.h
+++ b/include/llvm/Analysis/GlobalsModRef.h
@@ -120,12 +120,12 @@ private:
/// Analysis pass providing a never-invalidated alias analysis result.
class GlobalsAA : public AnalysisInfoMixin<GlobalsAA> {
friend AnalysisInfoMixin<GlobalsAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef GlobalsAAResult Result;
- GlobalsAAResult run(Module &M, AnalysisManager<Module> &AM);
+ GlobalsAAResult run(Module &M, ModuleAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the GlobalsAAResult object.
diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h
index e68a77526b96..e1a5467d8b63 100644
--- a/include/llvm/Analysis/IVUsers.h
+++ b/include/llvm/Analysis/IVUsers.h
@@ -16,6 +16,7 @@
#define LLVM_ANALYSIS_IVUSERS_H
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/IR/ValueHandle.h"
@@ -90,33 +91,6 @@ private:
void deleted() override;
};
-template<> struct ilist_traits<IVStrideUse>
- : public ilist_default_traits<IVStrideUse> {
- // createSentinel is used to get hold of a node that marks the end of
- // the list...
- // The sentinel is relative to this instance, so we use a non-static
- // method.
- IVStrideUse *createSentinel() const {
- // since i(p)lists always publicly derive from the corresponding
- // traits, placing a data member in this class will augment i(p)list.
- // But since the NodeTy is expected to publicly derive from
- // ilist_node<NodeTy>, there is a legal viable downcast from it
- // to NodeTy. We use this trick to superpose i(p)list with a "ghostly"
- // NodeTy, which becomes the sentinel. Dereferencing the sentinel is
- // forbidden (save the ilist_node<NodeTy>) so no one will ever notice
- // the superposition.
- return static_cast<IVStrideUse*>(&Sentinel);
- }
- static void destroySentinel(IVStrideUse*) {}
-
- IVStrideUse *provideInitialHead() const { return createSentinel(); }
- IVStrideUse *ensureHead(IVStrideUse*) const { return createSentinel(); }
- static void noteHead(IVStrideUse*, IVStrideUse*) {}
-
-private:
- mutable ilist_node<IVStrideUse> Sentinel;
-};
-
class IVUsers {
friend class IVStrideUse;
Loop *L;
@@ -137,6 +111,17 @@ public:
IVUsers(Loop *L, AssumptionCache *AC, LoopInfo *LI, DominatorTree *DT,
ScalarEvolution *SE);
+ IVUsers(IVUsers &&X)
+ : L(std::move(X.L)), AC(std::move(X.AC)), DT(std::move(X.DT)),
+ SE(std::move(X.SE)), Processed(std::move(X.Processed)),
+ IVUses(std::move(X.IVUses)), EphValues(std::move(X.EphValues)) {
+ for (IVStrideUse &U : IVUses)
+ U.Parent = this;
+ }
+ IVUsers(const IVUsers &) = delete;
+ IVUsers &operator=(IVUsers &&) = delete;
+ IVUsers &operator=(const IVUsers &) = delete;
+
Loop *getLoop() const { return L; }
/// AddUsersIfInteresting - Inspect the specified Instruction. If it is a
@@ -203,12 +188,12 @@ public:
/// Analysis pass that exposes the \c IVUsers for a loop.
class IVUsersAnalysis : public AnalysisInfoMixin<IVUsersAnalysis> {
friend AnalysisInfoMixin<IVUsersAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef IVUsers Result;
- IVUsers run(Loop &L, AnalysisManager<Loop> &AM);
+ IVUsers run(Loop &L, LoopAnalysisManager &AM);
};
/// Printer pass for the \c IVUsers for a loop.
@@ -217,7 +202,7 @@ class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
public:
explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
}
diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h
index 2928d2be30e5..5e7b00261f63 100644
--- a/include/llvm/Analysis/InlineCost.h
+++ b/include/llvm/Analysis/InlineCost.h
@@ -15,6 +15,7 @@
#define LLVM_ANALYSIS_INLINECOST_H
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include <cassert>
#include <climits>
@@ -27,16 +28,26 @@ class ProfileSummaryInfo;
class TargetTransformInfo;
namespace InlineConstants {
- // Various magic constants used to adjust heuristics.
- const int InstrCost = 5;
- const int IndirectCallThreshold = 100;
- const int CallPenalty = 25;
- const int LastCallToStaticBonus = -15000;
- const int ColdccPenalty = 2000;
- const int NoreturnPenalty = 10000;
- /// Do not inline functions which allocate this many bytes on the stack
- /// when the caller is recursive.
- const unsigned TotalAllocaSizeRecursiveCaller = 1024;
+// Various thresholds used by inline cost analysis.
+/// Use when optsize (-Os) is specified.
+const int OptSizeThreshold = 50;
+
+/// Use when minsize (-Oz) is specified.
+const int OptMinSizeThreshold = 5;
+
+/// Use when -O3 is specified.
+const int OptAggressiveThreshold = 250;
+
+// Various magic constants used to adjust heuristics.
+const int InstrCost = 5;
+const int IndirectCallThreshold = 100;
+const int CallPenalty = 25;
+const int LastCallToStaticBonus = 15000;
+const int ColdccPenalty = 2000;
+const int NoreturnPenalty = 10000;
+/// Do not inline functions which allocate this many bytes on the stack
+/// when the caller is recursive.
+const unsigned TotalAllocaSizeRecursiveCaller = 1024;
}
/// \brief Represents the cost of inlining a function.
@@ -99,6 +110,52 @@ public:
int getCostDelta() const { return Threshold - getCost(); }
};
+/// Thresholds to tune inline cost analysis. The inline cost analysis decides
+/// the condition to apply a threshold and applies it. Otherwise,
+/// DefaultThreshold is used. If a threshold is Optional, it is applied only
+/// when it has a valid value. Typically, users of inline cost analysis
+/// obtain an InlineParams object through one of the \c getInlineParams methods
+/// and pass it to \c getInlineCost. Some specialized versions of inliner
+/// (such as the pre-inliner) might have custom logic to compute \c InlineParams
+/// object.
+
+struct InlineParams {
+ /// The default threshold to start with for a callee.
+ int DefaultThreshold;
+
+ /// Threshold to use for callees with inline hint.
+ Optional<int> HintThreshold;
+
+ /// Threshold to use for cold callees.
+ Optional<int> ColdThreshold;
+
+ /// Threshold to use when the caller is optimized for size.
+ Optional<int> OptSizeThreshold;
+
+ /// Threshold to use when the caller is optimized for minsize.
+ Optional<int> OptMinSizeThreshold;
+
+ /// Threshold to use when the callsite is considered hot.
+ Optional<int> HotCallSiteThreshold;
+};
+
+/// Generate the parameters to tune the inline cost analysis based only on the
+/// commandline options.
+InlineParams getInlineParams();
+
+/// Generate the parameters to tune the inline cost analysis based on command
+/// line options. If -inline-threshold option is not explicitly passed,
+/// \p Threshold is used as the default threshold.
+InlineParams getInlineParams(int Threshold);
+
+/// Generate the parameters to tune the inline cost analysis based on command
+/// line options. If -inline-threshold option is not explicitly passed,
+/// the default threshold is computed from \p OptLevel and \p SizeOptLevel.
+/// An \p OptLevel value above 3 is considered an aggressive optimization mode.
+/// \p SizeOptLevel of 1 corresponds to the the -Os flag and 2 corresponds to
+/// the -Oz flag.
+InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
+
/// \brief Get an InlineCost object representing the cost of inlining this
/// callsite.
///
@@ -110,23 +167,22 @@ public:
///
/// Also note that calling this function *dynamically* computes the cost of
/// inlining the callsite. It is an expensive, heavyweight call.
-InlineCost getInlineCost(CallSite CS, int DefaultThreshold,
- TargetTransformInfo &CalleeTTI,
- AssumptionCacheTracker *ACT, ProfileSummaryInfo *PSI);
+InlineCost
+getInlineCost(CallSite CS, const InlineParams &Params,
+ TargetTransformInfo &CalleeTTI,
+ std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
+ ProfileSummaryInfo *PSI);
/// \brief Get an InlineCost with the callee explicitly specified.
/// This allows you to calculate the cost of inlining a function via a
/// pointer. This behaves exactly as the version with no explicit callee
/// parameter in all other respects.
//
-InlineCost getInlineCost(CallSite CS, Function *Callee, int DefaultThreshold,
- TargetTransformInfo &CalleeTTI,
- AssumptionCacheTracker *ACT, ProfileSummaryInfo *PSI);
-
-int computeThresholdFromOptLevels(unsigned OptLevel, unsigned SizeOptLevel);
-
-/// \brief Return the default value of -inline-threshold.
-int getDefaultInlineThreshold();
+InlineCost
+getInlineCost(CallSite CS, Function *Callee, const InlineParams &Params,
+ TargetTransformInfo &CalleeTTI,
+ std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
+ ProfileSummaryInfo *PSI);
/// \brief Minimal filter to detect invalid constructs for inlining.
bool isInlineViable(Function &Callee);
diff --git a/include/llvm/Analysis/InstructionSimplify.h b/include/llvm/Analysis/InstructionSimplify.h
index 410fa4165a91..47d6118313cb 100644
--- a/include/llvm/Analysis/InstructionSimplify.h
+++ b/include/llvm/Analysis/InstructionSimplify.h
@@ -238,12 +238,13 @@ namespace llvm {
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
- /// Given operands for an TruncInst, fold the result or return null.
- Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
+ /// Given operands for a CastInst, fold the result or return null.
+ Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
//=== Helper functions for higher up the class hierarchy.
diff --git a/include/llvm/Analysis/Interval.h b/include/llvm/Analysis/Interval.h
index a904753adaab..a63a004043cc 100644
--- a/include/llvm/Analysis/Interval.h
+++ b/include/llvm/Analysis/Interval.h
@@ -121,30 +121,22 @@ inline Interval::pred_iterator pred_end(Interval *I) {
}
template <> struct GraphTraits<Interval*> {
- typedef Interval NodeType;
+ typedef Interval *NodeRef;
typedef Interval::succ_iterator ChildIteratorType;
- static NodeType *getEntryNode(Interval *I) { return I; }
+ static NodeRef getEntryNode(Interval *I) { return I; }
/// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- static inline ChildIteratorType child_begin(NodeType *N) {
- return succ_begin(N);
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return succ_end(N);
- }
+ static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
+ static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};
template <> struct GraphTraits<Inverse<Interval*> > {
- typedef Interval NodeType;
+ typedef Interval *NodeRef;
typedef Interval::pred_iterator ChildIteratorType;
- static NodeType *getEntryNode(Inverse<Interval *> G) { return G.Graph; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return pred_begin(N);
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return pred_end(N);
- }
+ static NodeRef getEntryNode(Inverse<Interval *> G) { return G.Graph; }
+ static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
+ static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
} // End llvm namespace
diff --git a/include/llvm/Analysis/IteratedDominanceFrontier.h b/include/llvm/Analysis/IteratedDominanceFrontier.h
index 37da5617b913..af788c818f80 100644
--- a/include/llvm/Analysis/IteratedDominanceFrontier.h
+++ b/include/llvm/Analysis/IteratedDominanceFrontier.h
@@ -89,7 +89,6 @@ private:
DenseMap<DomTreeNode *, unsigned> DomLevels;
const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
- SmallVector<BasicBlock *, 32> PHIBlocks;
};
typedef IDFCalculator<BasicBlock *> ForwardIDFCalculator;
typedef IDFCalculator<Inverse<BasicBlock *>> ReverseIDFCalculator;
diff --git a/include/llvm/Analysis/LazyBlockFrequencyInfo.h b/include/llvm/Analysis/LazyBlockFrequencyInfo.h
index a2d24bb9eb88..5a02b9dce463 100644
--- a/include/llvm/Analysis/LazyBlockFrequencyInfo.h
+++ b/include/llvm/Analysis/LazyBlockFrequencyInfo.h
@@ -18,6 +18,7 @@
#define LLVM_ANALYSIS_LAZYBLOCKFREQUENCYINFO_H
#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/LazyBranchProbabilityInfo.h"
#include "llvm/Pass.h"
namespace llvm {
@@ -57,21 +58,21 @@ class LazyBlockFrequencyInfoPass : public FunctionPass {
class LazyBlockFrequencyInfo {
public:
LazyBlockFrequencyInfo()
- : Calculated(false), F(nullptr), BPI(nullptr), LI(nullptr) {}
+ : Calculated(false), F(nullptr), BPIPass(nullptr), LI(nullptr) {}
/// Set up the per-function input.
- void setAnalysis(const Function *F, const BranchProbabilityInfo *BPI,
+ void setAnalysis(const Function *F, LazyBranchProbabilityInfoPass *BPIPass,
const LoopInfo *LI) {
this->F = F;
- this->BPI = BPI;
+ this->BPIPass = BPIPass;
this->LI = LI;
}
/// Retrieve the BFI with the block frequencies computed.
BlockFrequencyInfo &getCalculated() {
if (!Calculated) {
- assert(F && BPI && LI && "call setAnalysis");
- BFI.calculate(*F, *BPI, *LI);
+ assert(F && BPIPass && LI && "call setAnalysis");
+ BFI.calculate(*F, BPIPass->getBPI(), *LI);
Calculated = true;
}
return BFI;
@@ -91,7 +92,7 @@ class LazyBlockFrequencyInfoPass : public FunctionPass {
BlockFrequencyInfo BFI;
bool Calculated;
const Function *F;
- const BranchProbabilityInfo *BPI;
+ LazyBranchProbabilityInfoPass *BPIPass;
const LoopInfo *LI;
};
diff --git a/include/llvm/Analysis/LazyBranchProbabilityInfo.h b/include/llvm/Analysis/LazyBranchProbabilityInfo.h
new file mode 100644
index 000000000000..c76fa1e819ae
--- /dev/null
+++ b/include/llvm/Analysis/LazyBranchProbabilityInfo.h
@@ -0,0 +1,109 @@
+//===- LazyBranchProbabilityInfo.h - Lazy Branch Probability ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
+// The difference is that with this pass the branch probabilities are not
+// computed when the analysis pass is executed but rather when the BPI results
+// is explicitly requested by the analysis client.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYBRANCHPROBABILITYINFO_H
+#define LLVM_ANALYSIS_LAZYBRANCHPROBABILITYINFO_H
+
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class AnalysisUsage;
+class Function;
+class LoopInfo;
+
+/// \brief This is an alternative analysis pass to
+/// BranchProbabilityInfoWrapperPass. The difference is that with this pass the
+/// branch probabilities are not computed when the analysis pass is executed but
+/// rather when the BPI results is explicitly requested by the analysis client.
+///
+/// There are some additional requirements for any client pass that wants to use
+/// the analysis:
+///
+/// 1. The pass needs to initialize dependent passes with:
+///
+/// INITIALIZE_PASS_DEPENDENCY(LazyBPIPass)
+///
+/// 2. Similarly, getAnalysisUsage should call:
+///
+/// LazyBranchProbabilityInfoPass::getLazyBPIAnalysisUsage(AU)
+///
+/// 3. The computed BPI should be requested with
+/// getAnalysis<LazyBranchProbabilityInfoPass>().getBPI() before LoopInfo
+/// could be invalidated for example by changing the CFG.
+///
+/// Note that it is expected that we wouldn't need this functionality for the
+/// new PM since with the new PM, analyses are executed on demand.
+class LazyBranchProbabilityInfoPass : public FunctionPass {
+
+ /// Wraps a BPI to allow lazy computation of the branch probabilities.
+ ///
+ /// A pass that only conditionally uses BPI can uncondtionally require the
+ /// analysis without paying for the overhead if BPI doesn't end up being used.
+ class LazyBranchProbabilityInfo {
+ public:
+ LazyBranchProbabilityInfo(const Function *F, const LoopInfo *LI)
+ : Calculated(false), F(F), LI(LI) {}
+
+ /// Retrieve the BPI with the branch probabilities computed.
+ BranchProbabilityInfo &getCalculated() {
+ if (!Calculated) {
+ assert(F && LI && "call setAnalysis");
+ BPI.calculate(*F, *LI);
+ Calculated = true;
+ }
+ return BPI;
+ }
+
+ const BranchProbabilityInfo &getCalculated() const {
+ return const_cast<LazyBranchProbabilityInfo *>(this)->getCalculated();
+ }
+
+ private:
+ BranchProbabilityInfo BPI;
+ bool Calculated;
+ const Function *F;
+ const LoopInfo *LI;
+ };
+
+ std::unique_ptr<LazyBranchProbabilityInfo> LBPI;
+
+public:
+ static char ID;
+
+ LazyBranchProbabilityInfoPass();
+
+ /// \brief Compute and return the branch probabilities.
+ BranchProbabilityInfo &getBPI() { return LBPI->getCalculated(); }
+
+ /// \brief Compute and return the branch probabilities.
+ const BranchProbabilityInfo &getBPI() const { return LBPI->getCalculated(); }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// Helper for client passes to set up the analysis usage on behalf of this
+ /// pass.
+ static void getLazyBPIAnalysisUsage(AnalysisUsage &AU);
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void print(raw_ostream &OS, const Module *M) const override;
+};
+
+/// \brief Helper for client passes to initialize dependent passes for LBPI.
+void initializeLazyBPIPassPass(PassRegistry &Registry);
+}
+#endif
diff --git a/include/llvm/Analysis/LazyCallGraph.h b/include/llvm/Analysis/LazyCallGraph.h
index 9f62eaa2e9f8..566e526f89b3 100644
--- a/include/llvm/Analysis/LazyCallGraph.h
+++ b/include/llvm/Analysis/LazyCallGraph.h
@@ -44,6 +44,7 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
@@ -149,6 +150,9 @@ public:
/// around but clear them.
operator bool() const;
+ /// Returnss the \c Kind of the edge.
+ Kind getKind() const;
+
/// Test whether the edge represents a direct call to a function.
///
/// This requires that the edge is not null.
@@ -177,6 +181,7 @@ public:
private:
friend class LazyCallGraph::Node;
+ friend class LazyCallGraph::RefSCC;
PointerIntPair<PointerUnion<Function *, Node *>, 1, Kind> Value;
@@ -194,6 +199,7 @@ public:
class Node {
friend class LazyCallGraph;
friend class LazyCallGraph::SCC;
+ friend class LazyCallGraph::RefSCC;
LazyCallGraph *G;
Function &F;
@@ -223,6 +229,11 @@ public:
/// Internal helper to remove the edge to the given function.
void removeEdgeInternal(Function &ChildF);
+ void clear() {
+ Edges.clear();
+ EdgeIndexMap.clear();
+ }
+
/// Print the name of this node's function.
friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
return OS << N.F.getName();
@@ -248,6 +259,11 @@ public:
}
const Edge &operator[](Node &N) const { return (*this)[N.getFunction()]; }
+ const Edge *lookup(Function &F) const {
+ auto EI = EdgeIndexMap.find(&F);
+ return EI != EdgeIndexMap.end() ? &Edges[EI->second] : nullptr;
+ }
+
call_edge_iterator call_begin() const {
return call_edge_iterator(Edges.begin(), Edges.end());
}
@@ -410,6 +426,32 @@ public:
RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
+ /// Test if this SCC is a parent of \a C.
+ ///
+ /// Note that this is linear in the number of edges departing the current
+ /// SCC.
+ bool isParentOf(const SCC &C) const;
+
+ /// Test if this SCC is an ancestor of \a C.
+ ///
+ /// Note that in the worst case this is linear in the number of edges
+ /// departing the current SCC and every SCC in the entire graph reachable
+ /// from this SCC. Thus this very well may walk every edge in the entire
+ /// call graph! Do not call this in a tight loop!
+ bool isAncestorOf(const SCC &C) const;
+
+ /// Test if this SCC is a child of \a C.
+ ///
+ /// See the comments for \c isParentOf for detailed notes about the
+ /// complexity of this routine.
+ bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
+
+ /// Test if this SCC is a descendant of \a C.
+ ///
+ /// See the comments for \c isParentOf for detailed notes about the
+ /// complexity of this routine.
+ bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
+
/// Provide a short name by printing this SCC to a std::string.
///
/// This copes with the fact that we don't have a name per-se for an SCC
@@ -453,6 +495,12 @@ public:
/// formRefSCCFast on the graph itself.
RefSCC(LazyCallGraph &G);
+ void clear() {
+ Parents.clear();
+ SCCs.clear();
+ SCCIndices.clear();
+ }
+
/// Print a short description useful for debugging or logging.
///
/// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
@@ -494,6 +542,10 @@ public:
void verify();
#endif
+ /// Handle any necessary parent set updates after inserting a trivial ref
+ /// or call edge.
+ void handleTrivialEdgeInsertion(Node &SourceN, Node &TargetN);
+
public:
typedef pointee_iterator<SmallVectorImpl<SCC *>::const_iterator> iterator;
typedef iterator_range<iterator> range;
@@ -518,7 +570,7 @@ public:
return make_range(parent_begin(), parent_end());
}
- /// Test if this SCC is a parent of \a C.
+ /// Test if this RefSCC is a parent of \a C.
bool isParentOf(const RefSCC &C) const { return C.isChildOf(*this); }
/// Test if this RefSCC is an ancestor of \a C.
@@ -532,9 +584,9 @@ public:
/// Test if this RefSCC is a descendant of \a C.
bool isDescendantOf(const RefSCC &C) const;
- /// Provide a short name by printing this SCC to a std::string.
+ /// Provide a short name by printing this RefSCC to a std::string.
///
- /// This copes with the fact that we don't have a name per-se for an SCC
+ /// This copes with the fact that we don't have a name per-se for an RefSCC
/// while still making the use of this in debugging and logging useful.
std::string getName() const {
std::string Name;
@@ -548,7 +600,7 @@ public:
/// \name Mutation API
///
/// These methods provide the core API for updating the call graph in the
- /// presence of a (potentially still in-flight) DFS-found SCCs.
+ /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
///
/// Note that these methods sometimes have complex runtimes, so be careful
/// how you call them.
@@ -568,18 +620,34 @@ public:
SmallVector<SCC *, 1> switchInternalEdgeToCall(Node &SourceN,
Node &TargetN);
- /// Make an existing internal call edge into a ref edge.
+ /// Make an existing internal call edge between separate SCCs into a ref
+ /// edge.
+ ///
+ /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
+ /// the call edge between them to a ref edge is a trivial operation that
+ /// does not require any structural changes to the call graph.
+ void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
+
+ /// Make an existing internal call edge within a single SCC into a ref
+ /// edge.
+ ///
+ /// Since SourceN and TargetN are part of a single SCC, this SCC may be
+ /// split up due to breaking a cycle in the call edges that formed it. If
+ /// that happens, then this routine will insert new SCCs into the postorder
+ /// list *before* the SCC of TargetN (previously the SCC of both). This
+ /// preserves postorder as the TargetN can reach all of the other nodes by
+ /// definition of previously being in a single SCC formed by the cycle from
+ /// SourceN to TargetN.
+ ///
+ /// The newly added SCCs are added *immediately* and contiguously
+ /// prior to the TargetN SCC and return the range covering the new SCCs in
+ /// the RefSCC's postorder sequence. You can directly iterate the returned
+ /// range to observe all of the new SCCs in postorder.
///
- /// If SourceN and TargetN are part of a single SCC, it may be split up due
- /// to breaking a cycle in the call edges that formed it. If that happens,
- /// then this routine will insert new SCCs into the postorder list *before*
- /// the SCC of TargetN (previously the SCC of both). This preserves
- /// postorder as the TargetN can reach all of the other nodes by definition
- /// of previously being in a single SCC formed by the cycle from SourceN to
- /// TargetN. The newly added nodes are added *immediately* and contiguously
- /// prior to the TargetN SCC and so they may be iterated starting from
- /// there.
- void switchInternalEdgeToRef(Node &SourceN, Node &TargetN);
+ /// Note that if SourceN and TargetN are in separate SCCs, the simpler
+ /// routine `switchTrivialInternalEdgeToRef` should be used instead.
+ iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
+ Node &TargetN);
/// Make an existing outgoing ref edge into a call edge.
///
@@ -699,15 +767,41 @@ public:
SmallVector<RefSCC *, 1> removeInternalRefEdge(Node &SourceN,
Node &TargetN);
+ /// A convenience wrapper around the above to handle trivial cases of
+ /// inserting a new call edge.
+ ///
+ /// This is trivial whenever the target is in the same SCC as the source or
+ /// the edge is an outgoing edge to some descendant SCC. In these cases
+ /// there is no change to the cyclic structure of SCCs or RefSCCs.
+ ///
+ /// To further make calling this convenient, it also handles inserting
+ /// already existing edges.
+ void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
+
+ /// A convenience wrapper around the above to handle trivial cases of
+ /// inserting a new ref edge.
+ ///
+ /// This is trivial whenever the target is in the same RefSCC as the source
+ /// or the edge is an outgoing edge to some descendant RefSCC. In these
+ /// cases there is no change to the cyclic structure of the RefSCCs.
+ ///
+ /// To further make calling this convenient, it also handles inserting
+ /// already existing edges.
+ void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
+
///@}
};
- /// A post-order depth-first SCC iterator over the call graph.
+ /// A post-order depth-first RefSCC iterator over the call graph.
///
- /// This iterator triggers the Tarjan DFS-based formation of the SCC DAG for
- /// the call graph, walking it lazily in depth-first post-order. That is, it
- /// always visits SCCs for a callee prior to visiting the SCC for a caller
- /// (when they are in different SCCs).
+ /// This iterator triggers the Tarjan DFS-based formation of the RefSCC (and
+ /// SCC) DAG for the call graph, walking it lazily in depth-first post-order.
+ /// That is, it always visits RefSCCs for the target of a reference edge
+ /// prior to visiting the RefSCC for a source of the edge (when they are in
+ /// different RefSCCs).
+ ///
+ /// When forming each RefSCC, the call edges within it are used to form SCCs
+ /// within it, so iterating this also controls the lazy formation of SCCs.
class postorder_ref_scc_iterator
: public iterator_facade_base<postorder_ref_scc_iterator,
std::forward_iterator_tag, RefSCC> {
@@ -718,27 +812,39 @@ public:
struct IsAtEndT {};
LazyCallGraph *G;
- RefSCC *C;
+ RefSCC *RC;
- // Build the begin iterator for a node.
- postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G) {
- C = G.getNextRefSCCInPostOrder();
- }
+ /// Build the begin iterator for a node.
+ postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
- // Build the end iterator for a node. This is selected purely by overload.
+ /// Build the end iterator for a node. This is selected purely by overload.
postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/)
- : G(&G), C(nullptr) {}
+ : G(&G), RC(nullptr) {}
+
+ /// Get the post-order RefSCC at the given index of the postorder walk,
+ /// populating it if necessary.
+ static RefSCC *getRC(LazyCallGraph &G, int Index) {
+ if (Index == (int)G.PostOrderRefSCCs.size())
+ if (!G.buildNextRefSCCInPostOrder())
+ // We're at the end.
+ return nullptr;
+
+ assert(Index < (int)G.PostOrderRefSCCs.size() &&
+ "Built the next post-order RefSCC without growing list!");
+ return G.PostOrderRefSCCs[Index];
+ }
public:
bool operator==(const postorder_ref_scc_iterator &Arg) const {
- return G == Arg.G && C == Arg.C;
+ return G == Arg.G && RC == Arg.RC;
}
- reference operator*() const { return *C; }
+ reference operator*() const { return *RC; }
using iterator_facade_base::operator++;
postorder_ref_scc_iterator &operator++() {
- C = G->getNextRefSCCInPostOrder();
+ assert(RC && "Cannot increment the end iterator!");
+ RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
return *this;
}
};
@@ -777,7 +883,7 @@ public:
/// Lookup a function's SCC in the graph.
///
- /// \returns null if the function hasn't been assigned an SCC via the SCC
+ /// \returns null if the function hasn't been assigned an SCC via the RefSCC
/// iterator walk.
SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
@@ -809,8 +915,9 @@ public:
/// call graph. They can be used to update the core node-graph during
/// a node-based inorder traversal that precedes any SCC-based traversal.
///
- /// Once you begin manipulating a call graph's SCCs, you must perform all
- /// mutation of the graph via the SCC methods.
+ /// Once you begin manipulating a call graph's SCCs, most mutation of the
+ /// graph must be performed via a RefSCC method. There are some exceptions
+ /// below.
/// Update the call graph after inserting a new edge.
void insertEdge(Node &Caller, Function &Callee, Edge::Kind EK);
@@ -830,6 +937,72 @@ public:
///@}
+ ///@{
+ /// \name General Mutation API
+ ///
+ /// There are a very limited set of mutations allowed on the graph as a whole
+ /// once SCCs have started to be formed. These routines have strict contracts
+ /// but may be called at any point.
+
+ /// Remove a dead function from the call graph (typically to delete it).
+ ///
+ /// Note that the function must have an empty use list, and the call graph
+ /// must be up-to-date prior to calling this. That means it is by itself in
+ /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
+ /// changes result from calling this routine other than potentially removing
+ /// entry points into the call graph.
+ ///
+ /// If SCC formation has begun, this function must not be part of the current
+ /// DFS in order to call this safely. Typically, the function will have been
+ /// fully visited by the DFS prior to calling this routine.
+ void removeDeadFunction(Function &F);
+
+ ///@}
+
+ ///@{
+ /// \name Static helpers for code doing updates to the call graph.
+ ///
+ /// These helpers are used to implement parts of the call graph but are also
+ /// useful to code doing updates or otherwise wanting to walk the IR in the
+ /// same patterns as when we build the call graph.
+
+ /// Recursively visits the defined functions whose address is reachable from
+ /// every constant in the \p Worklist.
+ ///
+ /// Doesn't recurse through any constants already in the \p Visited set, and
+ /// updates that set with every constant visited.
+ ///
+ /// For each defined function, calls \p Callback with that function.
+ template <typename CallbackT>
+ static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
+ SmallPtrSetImpl<Constant *> &Visited,
+ CallbackT Callback) {
+ while (!Worklist.empty()) {
+ Constant *C = Worklist.pop_back_val();
+
+ if (Function *F = dyn_cast<Function>(C)) {
+ if (!F->isDeclaration())
+ Callback(*F);
+ continue;
+ }
+
+ if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
+ // The blockaddress constant expression is a weird special case, we
+ // can't generically walk its operands the way we do for all other
+ // constants.
+ if (Visited.insert(BA->getFunction()).second)
+ Worklist.push_back(BA->getFunction());
+ continue;
+ }
+
+ for (Value *Op : C->operand_values())
+ if (Visited.insert(cast<Constant>(Op)).second)
+ Worklist.push_back(cast<Constant>(Op));
+ }
+ }
+
+ ///@}
+
private:
typedef SmallVectorImpl<Node *>::reverse_iterator node_stack_iterator;
typedef iterator_range<node_stack_iterator> node_stack_range;
@@ -858,6 +1031,15 @@ private:
/// Allocator that holds all the call graph RefSCCs.
SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
+ /// The post-order sequence of RefSCCs.
+ ///
+ /// This list is lazily formed the first time we walk the graph.
+ SmallVector<RefSCC *, 16> PostOrderRefSCCs;
+
+ /// A map from RefSCC to the index for it in the postorder sequence of
+ /// RefSCCs.
+ DenseMap<RefSCC *, int> RefSCCIndices;
+
/// The leaf RefSCCs of the graph.
///
/// These are all of the RefSCCs which have no children.
@@ -869,7 +1051,7 @@ private:
/// Set of entry nodes not-yet-processed into RefSCCs.
SmallVector<Function *, 4> RefSCCEntryNodes;
- /// Stack of nodes the DFS has walked but not yet put into a SCC.
+ /// Stack of nodes the DFS has walked but not yet put into a RefSCC.
SmallVector<Node *, 4> PendingRefSCCStack;
/// Counter for the next DFS number to assign.
@@ -905,8 +1087,24 @@ private:
/// and updates the root leaf list.
void connectRefSCC(RefSCC &RC);
- /// Retrieve the next node in the post-order RefSCC walk of the call graph.
- RefSCC *getNextRefSCCInPostOrder();
+ /// Get the index of a RefSCC within the postorder traversal.
+ ///
+ /// Requires that this RefSCC is a valid one in the (perhaps partial)
+ /// postorder traversed part of the graph.
+ int getRefSCCIndex(RefSCC &RC) {
+ auto IndexIt = RefSCCIndices.find(&RC);
+ assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
+ assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
+ "Index does not point back at RC!");
+ return IndexIt->second;
+ }
+
+ /// Builds the next node in the post-order RefSCC walk of the call graph and
+ /// appends it to the \c PostOrderRefSCCs vector.
+ ///
+ /// Returns true if a new RefSCC was successfully constructed, and false if
+ /// there are no more RefSCCs to build in the graph.
+ bool buildNextRefSCCInPostOrder();
};
inline LazyCallGraph::Edge::Edge() : Value() {}
@@ -917,9 +1115,14 @@ inline LazyCallGraph::Edge::operator bool() const {
return !Value.getPointer().isNull();
}
+inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
+ assert(*this && "Queried a null edge!");
+ return Value.getInt();
+}
+
inline bool LazyCallGraph::Edge::isCall() const {
assert(*this && "Queried a null edge!");
- return Value.getInt() == Call;
+ return getKind() == Call;
}
inline Function &LazyCallGraph::Edge::getFunction() const {
@@ -953,26 +1156,26 @@ inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode(LazyCallGraph &G) {
// Provide GraphTraits specializations for call graphs.
template <> struct GraphTraits<LazyCallGraph::Node *> {
- typedef LazyCallGraph::Node NodeType;
+ typedef LazyCallGraph::Node *NodeRef;
typedef LazyCallGraph::edge_iterator ChildIteratorType;
- static NodeType *getEntryNode(NodeType *N) { return N; }
- static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
- static ChildIteratorType child_end(NodeType *N) { return N->end(); }
+ static NodeRef getEntryNode(NodeRef N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
template <> struct GraphTraits<LazyCallGraph *> {
- typedef LazyCallGraph::Node NodeType;
+ typedef LazyCallGraph::Node *NodeRef;
typedef LazyCallGraph::edge_iterator ChildIteratorType;
- static NodeType *getEntryNode(NodeType *N) { return N; }
- static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
- static ChildIteratorType child_end(NodeType *N) { return N->end(); }
+ static NodeRef getEntryNode(NodeRef N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
/// An analysis pass which computes the call graph for a module.
class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// Inform generic clients of the result type.
diff --git a/include/llvm/Analysis/LazyValueInfo.h b/include/llvm/Analysis/LazyValueInfo.h
index c85cf2c5da56..610791023a7d 100644
--- a/include/llvm/Analysis/LazyValueInfo.h
+++ b/include/llvm/Analysis/LazyValueInfo.h
@@ -109,7 +109,7 @@ public:
Result run(Function &F, FunctionAnalysisManager &FAM);
private:
- static char PassID;
+ static AnalysisKey Key;
friend struct AnalysisInfoMixin<LazyValueAnalysis>;
};
diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h
index 39f80f489e12..139bf3c2116f 100644
--- a/include/llvm/Analysis/Loads.h
+++ b/include/llvm/Analysis/Loads.h
@@ -71,15 +71,13 @@ extern cl::opt<unsigned> DefMaxInstsToScan;
/// the only relevant load gets deleted.)
///
/// \param Load The load we want to replace.
-/// \param ScanBB The basic block to scan. FIXME: This is redundant.
+/// \param ScanBB The basic block to scan.
/// \param [in,out] ScanFrom The location to start scanning from. When this
/// function returns, it points at the last instruction scanned.
/// \param MaxInstsToScan The maximum number of instructions to scan. If this
/// is zero, the whole block will be scanned.
/// \param AA Optional pointer to alias analysis, to make the scan more
/// precise.
-/// \param [out] AATags The aliasing metadata for the operation which produced
-/// the value. FIXME: This is basically useless.
/// \param [out] IsLoadCSE Whether the returned value is a load from the same
/// location in memory, as opposed to the value operand of a store.
///
@@ -89,7 +87,6 @@ Value *FindAvailableLoadedValue(LoadInst *Load,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan = DefMaxInstsToScan,
AliasAnalysis *AA = nullptr,
- AAMDNodes *AATags = nullptr,
bool *IsLoadCSE = nullptr);
}
diff --git a/include/llvm/Analysis/LoopAccessAnalysis.h b/include/llvm/Analysis/LoopAccessAnalysis.h
index 619fab283102..76066f6003e7 100644
--- a/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -20,7 +20,9 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/raw_ostream.h"
@@ -34,6 +36,7 @@ class Loop;
class SCEV;
class SCEVUnionPredicate;
class LoopAccessInfo;
+class OptimizationRemarkEmitter;
/// Optimization analysis message produced during vectorization. Messages inform
/// the user why vectorization did not occur.
@@ -63,10 +66,9 @@ public:
/// \brief Emit an analysis note for \p PassName with the debug location from
/// the instruction in \p Message if available. Otherwise use the location of
/// \p TheLoop.
- static void emitAnalysis(const LoopAccessReport &Message,
- const Function *TheFunction,
- const Loop *TheLoop,
- const char *PassName);
+ static void emitAnalysis(const LoopAccessReport &Message, const Loop *TheLoop,
+ const char *PassName,
+ OptimizationRemarkEmitter &ORE);
};
/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
@@ -516,38 +518,6 @@ public:
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI,
AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI);
- // FIXME:
- // Hack for MSVC 2013 which sems like it can't synthesize this even
- // with default keyword:
- // LoopAccessInfo(LoopAccessInfo &&LAI) = default;
- LoopAccessInfo(LoopAccessInfo &&LAI)
- : PSE(std::move(LAI.PSE)), PtrRtChecking(std::move(LAI.PtrRtChecking)),
- DepChecker(std::move(LAI.DepChecker)), TheLoop(LAI.TheLoop),
- NumLoads(LAI.NumLoads), NumStores(LAI.NumStores),
- MaxSafeDepDistBytes(LAI.MaxSafeDepDistBytes), CanVecMem(LAI.CanVecMem),
- StoreToLoopInvariantAddress(LAI.StoreToLoopInvariantAddress),
- Report(std::move(LAI.Report)),
- SymbolicStrides(std::move(LAI.SymbolicStrides)),
- StrideSet(std::move(LAI.StrideSet)) {}
- // LoopAccessInfo &operator=(LoopAccessInfo &&LAI) = default;
- LoopAccessInfo &operator=(LoopAccessInfo &&LAI) {
- assert(this != &LAI);
-
- PSE = std::move(LAI.PSE);
- PtrRtChecking = std::move(LAI.PtrRtChecking);
- DepChecker = std::move(LAI.DepChecker);
- TheLoop = LAI.TheLoop;
- NumLoads = LAI.NumLoads;
- NumStores = LAI.NumStores;
- MaxSafeDepDistBytes = LAI.MaxSafeDepDistBytes;
- CanVecMem = LAI.CanVecMem;
- StoreToLoopInvariantAddress = LAI.StoreToLoopInvariantAddress;
- Report = std::move(LAI.Report);
- SymbolicStrides = std::move(LAI.SymbolicStrides);
- StrideSet = std::move(LAI.StrideSet);
- return *this;
- }
-
/// Return true we can analyze the memory accesses in the loop and there are
/// no memory dependence cycles.
bool canVectorizeMemory() const { return CanVecMem; }
@@ -594,7 +564,7 @@ public:
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
- const Optional<LoopAccessReport> &getReport() const { return Report; }
+ const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
/// \brief the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
@@ -640,7 +610,13 @@ private:
/// pass.
bool canAnalyzeLoop();
- void emitAnalysis(LoopAccessReport &Message);
+ /// \brief Save the analysis remark.
+ ///
+ /// LAA does not directly emits the remarks. Instead it stores it which the
+ /// client can retrieve and presents as its own analysis
+ /// (e.g. -Rpass-analysis=loop-vectorize).
+ OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
+ Instruction *Instr = nullptr);
/// \brief Collect memory access with loop invariant strides.
///
@@ -674,7 +650,7 @@ private:
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
- Optional<LoopAccessReport> Report;
+ std::unique_ptr<OptimizationRemarkAnalysis> Report;
/// \brief If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
@@ -712,7 +688,7 @@ const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
/// run-time assumptions.
int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap = ValueToValueMap(),
- bool Assume = false);
+ bool Assume = false, bool ShouldCheckWrap = true);
/// \brief Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass.
@@ -773,11 +749,11 @@ private:
class LoopAccessAnalysis
: public AnalysisInfoMixin<LoopAccessAnalysis> {
friend AnalysisInfoMixin<LoopAccessAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef LoopAccessInfo Result;
- Result run(Loop &, AnalysisManager<Loop> &);
+ Result run(Loop &, LoopAnalysisManager &);
static StringRef name() { return "LoopAccessAnalysis"; }
};
@@ -788,7 +764,7 @@ class LoopAccessInfoPrinterPass
public:
explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
inline Instruction *MemoryDepChecker::Dependence::getSource(
diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h
index 35dc6bcb6864..0c99c6297c1e 100644
--- a/include/llvm/Analysis/LoopInfo.h
+++ b/include/llvm/Analysis/LoopInfo.h
@@ -168,6 +168,19 @@ public:
return false;
}
+ /// Returns true if \p BB is a loop-latch.
+ /// A latch block is a block that contains a branch back to the header.
+ /// This function is useful when there are multiple latches in a loop
+ /// because \fn getLoopLatch will return nullptr in that case.
+ bool isLoopLatch(const BlockT *BB) const {
+ assert(contains(BB) && "block does not belong to the loop");
+
+ BlockT *Header = getHeader();
+ auto PredBegin = GraphTraits<Inverse<BlockT*> >::child_begin(Header);
+ auto PredEnd = GraphTraits<Inverse<BlockT*> >::child_end(Header);
+ return std::find(PredBegin, PredEnd, BB) != PredEnd;
+ }
+
/// Calculate the number of back edges to the loop header.
unsigned getNumBackEdges() const {
unsigned NumBackEdges = 0;
@@ -316,7 +329,7 @@ public:
/// Blocks as appropriate. This does not update the mapping in the LoopInfo
/// class.
void removeBlockFromLoop(BlockT *BB) {
- auto I = std::find(Blocks.begin(), Blocks.end(), BB);
+ auto I = find(Blocks, BB);
assert(I != Blocks.end() && "N is not in this list!");
Blocks.erase(I);
@@ -329,7 +342,8 @@ public:
/// Verify loop structure of this loop and all nested loops.
void verifyLoopNest(DenseSet<const LoopT*> *Loops) const;
- void print(raw_ostream &OS, unsigned Depth = 0) const;
+ /// Print loop with all the BBs inside it.
+ void print(raw_ostream &OS, unsigned Depth = 0, bool Verbose = false) const;
protected:
friend class LoopInfoBase<BlockT, LoopT>;
@@ -353,6 +367,27 @@ extern template class LoopBase<BasicBlock, Loop>;
/// in the CFG are neccessarily loops.
class Loop : public LoopBase<BasicBlock, Loop> {
public:
+ /// \brief A range representing the start and end location of a loop.
+ class LocRange {
+ DebugLoc Start;
+ DebugLoc End;
+
+ public:
+ LocRange() {}
+ LocRange(DebugLoc Start) : Start(std::move(Start)), End(std::move(Start)) {}
+ LocRange(DebugLoc Start, DebugLoc End) : Start(std::move(Start)),
+ End(std::move(End)) {}
+
+ const DebugLoc &getStart() const { return Start; }
+ const DebugLoc &getEnd() const { return End; }
+
+ /// \brief Check for null.
+ ///
+ explicit operator bool() const {
+ return Start && End;
+ }
+ };
+
Loop() {}
/// Return true if the specified value is loop invariant.
@@ -398,7 +433,7 @@ public:
bool isLCSSAForm(DominatorTree &DT) const;
/// Return true if this Loop and all inner subloops are in LCSSA form.
- bool isRecursivelyLCSSAForm(DominatorTree &DT) const;
+ bool isRecursivelyLCSSAForm(DominatorTree &DT, const LoopInfo &LI) const;
/// Return true if the Loop is in the form that the LoopSimplify form
/// transforms loops to, which is sometimes called normal form.
@@ -451,6 +486,7 @@ public:
BasicBlock *getUniqueExitBlock() const;
void dump() const;
+ void dumpVerbose() const;
/// Return the debug location of the start of this loop.
/// This looks for a BB terminating instruction with a known debug
@@ -459,6 +495,9 @@ public:
/// it returns an unknown location.
DebugLoc getStartLoc() const;
+ /// Return the source code span of the loop.
+ LocRange getLocRange() const;
+
StringRef getName() const {
if (BasicBlock *Header = getHeader())
if (Header->hasName())
@@ -579,7 +618,7 @@ public:
/// loop.
void changeTopLevelLoop(LoopT *OldLoop,
LoopT *NewLoop) {
- auto I = std::find(TopLevelLoops.begin(), TopLevelLoops.end(), OldLoop);
+ auto I = find(TopLevelLoops, OldLoop);
assert(I != TopLevelLoops.end() && "Old loop not at top level!");
*I = NewLoop;
assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
@@ -620,7 +659,7 @@ public:
// Debugging
void print(raw_ostream &OS) const;
- void verify() const;
+ void verify(const DominatorTreeBase<BlockT> &DomTree) const;
};
// Implementation in LoopInfoImpl.h
@@ -746,40 +785,32 @@ public:
// Allow clients to walk the list of nested loops...
template <> struct GraphTraits<const Loop*> {
- typedef const Loop NodeType;
+ typedef const Loop *NodeRef;
typedef LoopInfo::iterator ChildIteratorType;
- static NodeType *getEntryNode(const Loop *L) { return L; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->end();
- }
+ static NodeRef getEntryNode(const Loop *L) { return L; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
template <> struct GraphTraits<Loop*> {
- typedef Loop NodeType;
+ typedef Loop *NodeRef;
typedef LoopInfo::iterator ChildIteratorType;
- static NodeType *getEntryNode(Loop *L) { return L; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->end();
- }
+ static NodeRef getEntryNode(Loop *L) { return L; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
/// \brief Analysis pass that exposes the \c LoopInfo for a function.
class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
friend AnalysisInfoMixin<LoopAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef LoopInfo Result;
- LoopInfo run(Function &F, AnalysisManager<Function> &AM);
+ LoopInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c LoopAnalysis results.
@@ -788,7 +819,12 @@ class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
public:
explicit LoopPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for the \c LoopAnalysis results.
+struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief The legacy pass manager's analysis pass to compute loop information.
diff --git a/include/llvm/Analysis/LoopInfoImpl.h b/include/llvm/Analysis/LoopInfoImpl.h
index 816a15452dae..833a2202a568 100644
--- a/include/llvm/Analysis/LoopInfoImpl.h
+++ b/include/llvm/Analysis/LoopInfoImpl.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Dominators.h"
@@ -137,7 +138,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
- typename InvBlockTraits::NodeType *N = *PI;
+ typename InvBlockTraits::NodeRef N = *PI;
if (!contains(N)) { // If the block is not in the loop...
if (Out && Out != N)
return nullptr; // Multiple predecessors outside the loop
@@ -162,7 +163,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
InvBlockTraits::child_end(Header);
BlockT *Latch = nullptr;
for (; PI != PE; ++PI) {
- typename InvBlockTraits::NodeType *N = *PI;
+ typename InvBlockTraits::NodeRef N = *PI;
if (contains(N)) {
if (Latch) return nullptr;
Latch = N;
@@ -185,8 +186,13 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
- assert((Blocks.empty() || LIB[getHeader()] == this) &&
- "Incorrect LI specified for this loop!");
+#ifndef NDEBUG
+ if (!Blocks.empty()) {
+ auto SameHeader = LIB[getHeader()];
+ assert(contains(SameHeader) && getHeader() == SameHeader->getHeader()
+ && "Incorrect LI specified for this loop!");
+ }
+#endif
assert(NewBB && "Cannot add a null basic block to the loop!");
assert(!LIB[NewBB] && "BasicBlock already in the loop!");
@@ -211,8 +217,7 @@ void LoopBase<BlockT, LoopT>::
replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
assert(OldChild->ParentLoop == this && "This loop is already broken!");
assert(!NewChild->ParentLoop && "NewChild already has a parent!");
- typename std::vector<LoopT *>::iterator I =
- std::find(SubLoops.begin(), SubLoops.end(), OldChild);
+ typename std::vector<LoopT *>::iterator I = find(SubLoops, OldChild);
assert(I != SubLoops.end() && "OldChild not in loop!");
*I = NewChild;
OldChild->ParentLoop = nullptr;
@@ -228,9 +233,9 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
// Setup for using a depth-first iterator to visit every block in the loop.
SmallVector<BlockT*, 8> ExitBBs;
getExitBlocks(ExitBBs);
- llvm::SmallPtrSet<BlockT*, 8> VisitSet;
+ df_iterator_default_set<BlockT*> VisitSet;
VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
- df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
+ df_ext_iterator<BlockT*, df_iterator_default_set<BlockT*>>
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
@@ -240,28 +245,23 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
BlockT *BB = *BI;
- bool HasInsideLoopSuccs = false;
- bool HasInsideLoopPreds = false;
- SmallVector<BlockT *, 2> OutsideLoopPreds;
- typedef GraphTraits<BlockT*> BlockTraits;
- for (typename BlockTraits::ChildIteratorType SI =
- BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
- SI != SE; ++SI)
- if (contains(*SI)) {
- HasInsideLoopSuccs = true;
- break;
- }
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
- PI != PE; ++PI) {
- BlockT *N = *PI;
- if (contains(N))
- HasInsideLoopPreds = true;
- else
- OutsideLoopPreds.push_back(N);
- }
+ assert(std::any_of(GraphTraits<BlockT*>::child_begin(BB),
+ GraphTraits<BlockT*>::child_end(BB),
+ [&](BlockT *B){return contains(B);}) &&
+ "Loop block has no in-loop successors!");
+
+ assert(std::any_of(GraphTraits<Inverse<BlockT*> >::child_begin(BB),
+ GraphTraits<Inverse<BlockT*> >::child_end(BB),
+ [&](BlockT *B){return contains(B);}) &&
+ "Loop block has no in-loop predecessors!");
+
+ SmallVector<BlockT *, 2> OutsideLoopPreds;
+ std::for_each(GraphTraits<Inverse<BlockT*> >::child_begin(BB),
+ GraphTraits<Inverse<BlockT*> >::child_end(BB),
+ [&](BlockT *B){if (!contains(B))
+ OutsideLoopPreds.push_back(B);
+ });
if (BB == getHeader()) {
assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
@@ -275,8 +275,6 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
assert(CB != OutsideLoopPreds[i] &&
"Loop has multiple entry points!");
}
- assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
- assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
assert(BB != &getHeader()->getParent()->front() &&
"Loop contains function entry block!");
@@ -296,8 +294,7 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
// Check the parent loop pointer.
if (ParentLoop) {
- assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
- ParentLoop->end() &&
+ assert(is_contained(*ParentLoop, this) &&
"Loop is not a subloop of its parent!");
}
#endif
@@ -316,17 +313,24 @@ void LoopBase<BlockT, LoopT>::verifyLoopNest(
}
template<class BlockT, class LoopT>
-void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth) const {
+void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth,
+ bool Verbose) const {
OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
<< " containing: ";
+ BlockT *H = getHeader();
for (unsigned i = 0; i < getBlocks().size(); ++i) {
- if (i) OS << ",";
BlockT *BB = getBlocks()[i];
- BB->printAsOperand(OS, false);
- if (BB == getHeader()) OS << "<header>";
- if (BB == getLoopLatch()) OS << "<latch>";
- if (isLoopExiting(BB)) OS << "<exiting>";
+ if (!Verbose) {
+ if (i) OS << ",";
+ BB->printAsOperand(OS, false);
+ } else OS << "\n";
+
+ if (BB == H) OS << "<header>";
+ if (isLoopLatch(BB)) OS << "<latch>";
+ if (isLoopExiting(BB)) OS << "<exiting>";
+ if (Verbose)
+ BB->print(OS);
}
OS << "\n";
@@ -516,8 +520,26 @@ void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
#endif
}
-template<class BlockT, class LoopT>
-void LoopInfoBase<BlockT, LoopT>::verify() const {
+template <typename T>
+bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
+ std::sort(BB1.begin(), BB1.end());
+ std::sort(BB2.begin(), BB2.end());
+ return BB1 == BB2;
+}
+
+template <class BlockT, class LoopT>
+static void
+addInnerLoopsToHeadersMap(DenseMap<BlockT *, const LoopT *> &LoopHeaders,
+ const LoopInfoBase<BlockT, LoopT> &LI,
+ const LoopT &L) {
+ LoopHeaders[L.getHeader()] = &L;
+ for (LoopT *SL : L)
+ addInnerLoopsToHeadersMap(LoopHeaders, LI, *SL);
+}
+
+template <class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::verify(
+ const DominatorTreeBase<BlockT> &DomTree) const {
DenseSet<const LoopT*> Loops;
for (iterator I = begin(), E = end(); I != E; ++I) {
assert(!(*I)->getParentLoop() && "Top-level loop has a parent!");
@@ -532,6 +554,48 @@ void LoopInfoBase<BlockT, LoopT>::verify() const {
assert(Loops.count(L) && "orphaned loop");
assert(L->contains(BB) && "orphaned block");
}
+
+ // Recompute LoopInfo to verify loops structure.
+ LoopInfoBase<BlockT, LoopT> OtherLI;
+ OtherLI.analyze(DomTree);
+
+ DenseMap<BlockT *, const LoopT *> LoopHeaders1;
+ DenseMap<BlockT *, const LoopT *> LoopHeaders2;
+
+ for (LoopT *L : *this)
+ addInnerLoopsToHeadersMap(LoopHeaders1, *this, *L);
+ for (LoopT *L : OtherLI)
+ addInnerLoopsToHeadersMap(LoopHeaders2, OtherLI, *L);
+ assert(LoopHeaders1.size() == LoopHeaders2.size() &&
+ "LoopInfo is incorrect.");
+
+ auto compareLoops = [&](const LoopT *L1, const LoopT *L2) {
+ BlockT *H1 = L1->getHeader();
+ BlockT *H2 = L2->getHeader();
+ if (H1 != H2)
+ return false;
+ std::vector<BlockT *> BB1 = L1->getBlocks();
+ std::vector<BlockT *> BB2 = L2->getBlocks();
+ if (!compareVectors(BB1, BB2))
+ return false;
+
+ std::vector<BlockT *> SubLoopHeaders1;
+ std::vector<BlockT *> SubLoopHeaders2;
+ for (LoopT *L : *L1)
+ SubLoopHeaders1.push_back(L->getHeader());
+ for (LoopT *L : *L2)
+ SubLoopHeaders2.push_back(L->getHeader());
+
+ if (!compareVectors(SubLoopHeaders1, SubLoopHeaders2))
+ return false;
+ return true;
+ };
+
+ for (auto &I : LoopHeaders1) {
+ BlockT *H = I.first;
+ bool LoopsMatch = compareLoops(LoopHeaders1[H], LoopHeaders2[H]);
+ assert(LoopsMatch && "LoopInfo is incorrect.");
+ }
#endif
}
diff --git a/include/llvm/Analysis/LoopIterator.h b/include/llvm/Analysis/LoopIterator.h
index e3dd96354c65..461f74351821 100644
--- a/include/llvm/Analysis/LoopIterator.h
+++ b/include/llvm/Analysis/LoopIterator.h
@@ -31,6 +31,66 @@ namespace llvm {
class LoopBlocksTraversal;
+// A traits type that is intended to be used in graph algorithms. The graph
+// traits starts at the loop header, and traverses the BasicBlocks that are in
+// the loop body, but not the loop header. Since the loop header is skipped,
+// the back edges are excluded.
+//
+// TODO: Explore the possibility to implement LoopBlocksTraversal in terms of
+// LoopBodyTraits, so that insertEdge doesn't have to be specialized.
+struct LoopBodyTraits {
+ using NodeRef = std::pair<const Loop *, BasicBlock *>;
+
+ // This wraps a const Loop * into the iterator, so we know which edges to
+ // filter out.
+ class WrappedSuccIterator
+ : public iterator_adaptor_base<
+ WrappedSuccIterator, succ_iterator,
+ typename std::iterator_traits<succ_iterator>::iterator_category,
+ NodeRef, std::ptrdiff_t, NodeRef *, NodeRef> {
+ using BaseT = iterator_adaptor_base<
+ WrappedSuccIterator, succ_iterator,
+ typename std::iterator_traits<succ_iterator>::iterator_category,
+ NodeRef, std::ptrdiff_t, NodeRef *, NodeRef>;
+
+ const Loop *L;
+
+ public:
+ WrappedSuccIterator(succ_iterator Begin, const Loop *L)
+ : BaseT(Begin), L(L) {}
+
+ NodeRef operator*() const { return {L, *I}; }
+ };
+
+ struct LoopBodyFilter {
+ bool operator()(NodeRef N) const {
+ const Loop *L = N.first;
+ return N.second != L->getHeader() && L->contains(N.second);
+ }
+ };
+
+ using ChildIteratorType =
+ filter_iterator<WrappedSuccIterator, LoopBodyFilter>;
+
+ static NodeRef getEntryNode(const Loop &G) { return {&G, G.getHeader()}; }
+
+ static ChildIteratorType child_begin(NodeRef Node) {
+ return make_filter_range(make_range<WrappedSuccIterator>(
+ {succ_begin(Node.second), Node.first},
+ {succ_end(Node.second), Node.first}),
+ LoopBodyFilter{})
+ .begin();
+ }
+
+ static ChildIteratorType child_end(NodeRef Node) {
+ return make_filter_range(make_range<WrappedSuccIterator>(
+ {succ_begin(Node.second), Node.first},
+ {succ_end(Node.second), Node.first}),
+ LoopBodyFilter{})
+ .end();
+ }
+};
+
/// Store the result of a depth first search within basic blocks contained by a
/// single loop.
///
@@ -114,7 +174,7 @@ template<> class po_iterator_storage<LoopBlocksTraversal, true> {
public:
po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
// These functions are defined below.
- bool insertEdge(BasicBlock *From, BasicBlock *To);
+ bool insertEdge(Optional<BasicBlock *> From, BasicBlock *To);
void finishPostorder(BasicBlock *BB);
};
@@ -166,8 +226,8 @@ public:
}
};
-inline bool po_iterator_storage<LoopBlocksTraversal, true>::
-insertEdge(BasicBlock *From, BasicBlock *To) {
+inline bool po_iterator_storage<LoopBlocksTraversal, true>::insertEdge(
+ Optional<BasicBlock *> From, BasicBlock *To) {
return LBT.visitPreorder(To);
}
diff --git a/include/llvm/Analysis/LoopPass.h b/include/llvm/Analysis/LoopPass.h
index 89debec04e94..496ae189e57b 100644
--- a/include/llvm/Analysis/LoopPass.h
+++ b/include/llvm/Analysis/LoopPass.h
@@ -107,9 +107,7 @@ public:
// LPPassManager needs LoopInfo.
void getAnalysisUsage(AnalysisUsage &Info) const override;
- const char *getPassName() const override {
- return "Loop Pass Manager";
- }
+ StringRef getPassName() const override { return "Loop Pass Manager"; }
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
@@ -157,6 +155,22 @@ private:
Loop *CurrentLoop;
};
+// This pass is required by the LCSSA transformation. It is used inside
+// LPPassManager to check if current pass preserves LCSSA form, and if it does
+// pass manager calls lcssa verification for the current loop.
+struct LCSSAVerificationPass : public FunctionPass {
+ static char ID;
+ LCSSAVerificationPass() : FunctionPass(ID) {
+ initializeLCSSAVerificationPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override { return false; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Analysis/LoopPassManager.h b/include/llvm/Analysis/LoopPassManager.h
index a89551851259..ae9c16502feb 100644
--- a/include/llvm/Analysis/LoopPassManager.h
+++ b/include/llvm/Analysis/LoopPassManager.h
@@ -16,7 +16,11 @@
#define LLVM_ANALYSIS_LOOPPASSMANAGER_H
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
@@ -38,11 +42,21 @@ extern template class AnalysisManager<Loop>;
/// pass manager infrastructure.
typedef AnalysisManager<Loop> LoopAnalysisManager;
-extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
/// A proxy from a \c LoopAnalysisManager to a \c Function.
typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
LoopAnalysisManagerFunctionProxy;
+/// Specialization of the invalidate method for the \c
+/// LoopAnalysisManagerFunctionProxy's result.
+template <>
+bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
+ Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
+
+// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
+// template.
+extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
+
extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>;
/// A proxy from a \c FunctionAnalysisManager to a \c Loop.
typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>
@@ -64,21 +78,6 @@ class FunctionToLoopPassAdaptor
public:
explicit FunctionToLoopPassAdaptor(LoopPassT Pass)
: Pass(std::move(Pass)) {}
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- FunctionToLoopPassAdaptor(const FunctionToLoopPassAdaptor &Arg)
- : Pass(Arg.Pass) {}
- FunctionToLoopPassAdaptor(FunctionToLoopPassAdaptor &&Arg)
- : Pass(std::move(Arg.Pass)) {}
- friend void swap(FunctionToLoopPassAdaptor &LHS,
- FunctionToLoopPassAdaptor &RHS) {
- using std::swap;
- swap(LHS.Pass, RHS.Pass);
- }
- FunctionToLoopPassAdaptor &operator=(FunctionToLoopPassAdaptor RHS) {
- swap(*this, RHS);
- return *this;
- }
/// \brief Runs the loop passes across every loop in the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
@@ -88,6 +87,14 @@ public:
// Get the loop structure for this function
LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
+ // Also precompute all of the function analyses used by loop passes.
+ // FIXME: These should be handed into the loop passes when the loop pass
+ // management layer is reworked to follow the design of CGSCC.
+ (void)AM.getResult<AAManager>(F);
+ (void)AM.getResult<DominatorTreeAnalysis>(F);
+ (void)AM.getResult<ScalarEvolutionAnalysis>(F);
+ (void)AM.getResult<TargetLibraryAnalysis>(F);
+
PreservedAnalyses PA = PreservedAnalyses::all();
// We want to visit the loops in reverse post-order. We'll build the stack
@@ -104,24 +111,24 @@ public:
// post-order.
for (auto *L : reverse(Loops)) {
PreservedAnalyses PassPA = Pass.run(*L, LAM);
- assert(PassPA.preserved(getLoopPassPreservedAnalyses()) &&
- "Loop passes must preserve all relevant analyses");
+ // FIXME: We should verify the set of analyses relevant to Loop passes
+ // are preserved.
// We know that the loop pass couldn't have invalidated any other loop's
// analyses (that's the contract of a loop pass), so directly handle the
- // loop analysis manager's invalidation here. Also, update the
- // preserved analyses to reflect that once invalidated these can again
- // be preserved.
- PassPA = LAM.invalidate(*L, std::move(PassPA));
+ // loop analysis manager's invalidation here.
+ LAM.invalidate(*L, PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
}
- // By definition we preserve the proxy. This precludes *any* invalidation of
- // loop analyses by the proxy, but that's OK because we've taken care to
- // invalidate analyses in the loop analysis manager incrementally above.
+ // By definition we preserve the proxy. We also preserve all analyses on
+ // Loops. This precludes *any* invalidation of loop analyses by the proxy,
+ // but that's OK because we've taken care to invalidate analyses in the
+ // loop analysis manager incrementally above.
+ PA.preserveSet<AllAnalysesOn<Loop>>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
return PA;
}
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index 140b731c59de..b58f07e69475 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -141,6 +141,16 @@ bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, bool RoundToAlign = false,
ObjSizeMode Mode = ObjSizeMode::Exact);
+/// Try to turn a call to @llvm.objectsize into an integer value of the given
+/// Type. Returns null on failure.
+/// If MustSucceed is true, this function will not return null, and may return
+/// conservative values governed by the second argument of the call to
+/// objectsize.
+ConstantInt *lowerObjectSizeCall(IntrinsicInst *ObjectSize,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI,
+ bool MustSucceed);
+
typedef std::pair<APInt, APInt> SizeOffsetType;
/// \brief Evaluate the size and offset of an object pointed to by a Value*
diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h
index b19dabbfc32e..33dbd22f7a20 100644
--- a/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -70,7 +70,7 @@ class MemDepResult {
/// 1. This could be a load or store for dependence queries on
/// load/store. The value loaded or stored is the produced value.
/// Note that the pointer operand may be different than that of the
- /// queried pointer due to must aliases and phi translation. Note
+ /// queried pointer due to must aliases and phi translation. Note
/// that the def may not be the same type as the query, the pointers
/// may just be must aliases.
/// 2. For loads and stores, this could be an allocation instruction. In
@@ -350,9 +350,18 @@ public:
DominatorTree &DT)
: AA(AA), AC(AC), TLI(TLI), DT(DT) {}
+ /// Handle invalidation in the new PM.
+ bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
+
+ /// Some methods limit the number of instructions they will examine.
+ /// The return value of this method is the default limit that will be
+ /// used if no limit is explicitly passed in.
+ unsigned getDefaultBlockScanLimit() const;
+
/// Returns the instruction on which a memory operation depends.
///
- /// See the class comment for more details. It is illegal to call this on
+ /// See the class comment for more details. It is illegal to call this on
/// non-memory instructions.
MemDepResult getDependency(Instruction *QueryInst);
@@ -409,19 +418,25 @@ public:
/// operations. If isLoad is false, this routine ignores may-aliases
/// with reads from read-only locations. If possible, pass the query
/// instruction as well; this function may take advantage of the metadata
- /// annotated to the query instruction to refine the result.
+ /// annotated to the query instruction to refine the result. \p Limit
+ /// can be used to set the maximum number of instructions that will be
+ /// examined to find the pointer dependency. On return, it will be set to
+ /// the number of instructions left to examine. If a null pointer is passed
+ /// in, the limit will default to the value of -memdep-block-scan-limit.
///
/// Note that this is an uncached query, and thus may be inefficient.
MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad,
BasicBlock::iterator ScanIt,
BasicBlock *BB,
- Instruction *QueryInst = nullptr);
+ Instruction *QueryInst = nullptr,
+ unsigned *Limit = nullptr);
MemDepResult getSimplePointerDependencyFrom(const MemoryLocation &MemLoc,
bool isLoad,
BasicBlock::iterator ScanIt,
BasicBlock *BB,
- Instruction *QueryInst);
+ Instruction *QueryInst,
+ unsigned *Limit = nullptr);
/// This analysis looks for other loads and stores with invariant.group
/// metadata and the same pointer operand. Returns Unknown if it does not
@@ -474,12 +489,12 @@ private:
class MemoryDependenceAnalysis
: public AnalysisInfoMixin<MemoryDependenceAnalysis> {
friend AnalysisInfoMixin<MemoryDependenceAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef MemoryDependenceResults Result;
- MemoryDependenceResults run(Function &F, AnalysisManager<Function> &AM);
+ MemoryDependenceResults run(Function &F, FunctionAnalysisManager &AM);
};
/// A wrapper analysis pass for the legacy pass manager that exposes a \c
diff --git a/include/llvm/Analysis/ModuleSummaryAnalysis.h b/include/llvm/Analysis/ModuleSummaryAnalysis.h
index 9f03610ba5b1..4f77170d9f68 100644
--- a/include/llvm/Analysis/ModuleSummaryAnalysis.h
+++ b/include/llvm/Analysis/ModuleSummaryAnalysis.h
@@ -16,48 +16,39 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
namespace llvm {
-
class BlockFrequencyInfo;
+class ProfileSummaryInfo;
-/// Class to build a module summary index for the given Module, possibly from
-/// a Pass.
-class ModuleSummaryIndexBuilder {
- /// The index being built
- std::unique_ptr<ModuleSummaryIndex> Index;
- /// The module for which we are building an index
- const Module *M;
+/// Direct function to compute a \c ModuleSummaryIndex from a given module.
+///
+/// If operating within a pass manager which has defined ways to compute the \c
+/// BlockFrequencyInfo for a given function, that can be provided via
+/// a std::function callback. Otherwise, this routine will manually construct
+/// that information.
+ModuleSummaryIndex buildModuleSummaryIndex(
+ const Module &M,
+ std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
+ ProfileSummaryInfo *PSI);
+
+/// Analysis pass to provide the ModuleSummaryIndex object.
+class ModuleSummaryIndexAnalysis
+ : public AnalysisInfoMixin<ModuleSummaryIndexAnalysis> {
+ friend AnalysisInfoMixin<ModuleSummaryIndexAnalysis>;
+ static AnalysisKey Key;
public:
- /// Default constructor
- ModuleSummaryIndexBuilder() = default;
-
- /// Constructor that builds an index for the given Module. An optional
- /// callback can be supplied to obtain the frequency info for a function.
- ModuleSummaryIndexBuilder(
- const Module *M,
- std::function<BlockFrequencyInfo *(const Function &F)> Ftor = nullptr);
-
- /// Get a reference to the index owned by builder
- ModuleSummaryIndex &getIndex() const { return *Index; }
+ typedef ModuleSummaryIndex Result;
- /// Take ownership of the built index
- std::unique_ptr<ModuleSummaryIndex> takeIndex() { return std::move(Index); }
-
-private:
- /// Compute summary for given function with optional frequency information
- void computeFunctionSummary(const Function &F,
- BlockFrequencyInfo *BFI = nullptr);
-
- /// Compute summary for given variable with optional frequency information
- void computeVariableSummary(const GlobalVariable &V);
+ Result run(Module &M, ModuleAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the ModuleSummaryIndex object.
class ModuleSummaryIndexWrapperPass : public ModulePass {
- std::unique_ptr<ModuleSummaryIndexBuilder> IndexBuilder;
+ Optional<ModuleSummaryIndex> Index;
public:
static char ID;
@@ -65,10 +56,8 @@ public:
ModuleSummaryIndexWrapperPass();
/// Get the index built by pass
- ModuleSummaryIndex &getIndex() { return IndexBuilder->getIndex(); }
- const ModuleSummaryIndex &getIndex() const {
- return IndexBuilder->getIndex();
- }
+ ModuleSummaryIndex &getIndex() { return *Index; }
+ const ModuleSummaryIndex &getIndex() const { return *Index; }
bool runOnModule(Module &M) override;
bool doFinalization(Module &M) override;
@@ -81,11 +70,6 @@ public:
// object for the module, to be written to bitcode or LLVM assembly.
//
ModulePass *createModuleSummaryIndexWrapperPass();
-
-/// Returns true if \p M is eligible for ThinLTO promotion.
-///
-/// Currently we check if it has any any InlineASM that uses an internal symbol.
-bool moduleCanBeRenamedForThinLTO(const Module &M);
}
#endif
diff --git a/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/include/llvm/Analysis/ObjCARCAliasAnalysis.h
index 067a964bcce1..db524ff64ecd 100644
--- a/include/llvm/Analysis/ObjCARCAliasAnalysis.h
+++ b/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -48,7 +48,10 @@ public:
/// Handle invalidation events from the new pass manager.
///
/// By definition, this result is stateless and so remains valid.
- bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
+ return false;
+ }
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
@@ -63,12 +66,12 @@ public:
/// Analysis pass providing a never-invalidated alias analysis result.
class ObjCARCAA : public AnalysisInfoMixin<ObjCARCAA> {
friend AnalysisInfoMixin<ObjCARCAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef ObjCARCAAResult Result;
- ObjCARCAAResult run(Function &F, AnalysisManager<Function> &AM);
+ ObjCARCAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the ObjCARCAAResult object.
diff --git a/include/llvm/Analysis/OptimizationDiagnosticInfo.h b/include/llvm/Analysis/OptimizationDiagnosticInfo.h
index b455a6527bf6..39269269c244 100644
--- a/include/llvm/Analysis/OptimizationDiagnosticInfo.h
+++ b/include/llvm/Analysis/OptimizationDiagnosticInfo.h
@@ -16,24 +16,44 @@
#define LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
namespace llvm {
-class BlockFrequencyInfo;
class DebugLoc;
-class Function;
class LLVMContext;
class Loop;
class Pass;
class Twine;
class Value;
+/// The optimization diagnostic interface.
+///
+/// It allows reporting when optimizations are performed and when they are not
+/// along with the reasons for it. Hotness information of the corresponding
+/// code region can be included in the remark if DiagnosticHotnessRequested is
+/// enabled in the LLVM context.
class OptimizationRemarkEmitter {
public:
OptimizationRemarkEmitter(Function *F, BlockFrequencyInfo *BFI)
: F(F), BFI(BFI) {}
+ /// \brief This variant can be used to generate ORE on demand (without the
+ /// analysis pass).
+ ///
+ /// Note that this ctor has a very different cost depending on whether
+ /// F->getContext().getDiagnosticHotnessRequested() is on or not. If it's off
+ /// the operation is free.
+ ///
+ /// Whereas if DiagnosticHotnessRequested is on, it is fairly expensive
+ /// operation since BFI and all its required analyses are computed. This is
+ /// for example useful for CGSCC passes that can't use function analyses
+ /// passes in the old PM.
+ OptimizationRemarkEmitter(Function *F);
+
OptimizationRemarkEmitter(OptimizationRemarkEmitter &&Arg)
: F(Arg.F), BFI(Arg.BFI) {}
@@ -43,33 +63,187 @@ public:
return *this;
}
+ /// The new interface to emit remarks.
+ void emit(DiagnosticInfoOptimizationBase &OptDiag);
+
+ /// Emit an optimization-applied message.
+ ///
+ /// \p PassName is the name of the pass emitting the message. If -Rpass= is
+ /// given and \p PassName matches the regular expression in -Rpass, then the
+ /// remark will be emitted. \p Fn is the function triggering the remark, \p
+ /// DLoc is the debug location where the diagnostic is generated. \p V is the
+ /// IR Value that identifies the code region. \p Msg is the message string to
+ /// use.
+ void emitOptimizationRemark(const char *PassName, const DebugLoc &DLoc,
+ const Value *V, const Twine &Msg);
+
+ /// \brief Same as above but derives the IR Value for the code region and the
+ /// debug location from the Loop parameter \p L.
+ void emitOptimizationRemark(const char *PassName, Loop *L, const Twine &Msg);
+
+ /// \brief Same as above but derives the debug location and the code region
+ /// from the debug location and the basic block of \p Inst, respectively.
+ void emitOptimizationRemark(const char *PassName, Instruction *Inst,
+ const Twine &Msg) {
+ emitOptimizationRemark(PassName, Inst->getDebugLoc(), Inst->getParent(),
+ Msg);
+ }
+
/// Emit an optimization-missed message.
///
/// \p PassName is the name of the pass emitting the message. If
/// -Rpass-missed= is given and the name matches the regular expression in
- /// -Rpass, then the remark will be emitted. \p Fn is the function triggering
- /// the remark, \p DLoc is the debug location where the diagnostic is
- /// generated. \p V is the IR Value that identifies the code region. \p Msg is
- /// the message string to use.
+ /// -Rpass, then the remark will be emitted. \p DLoc is the debug location
+ /// where the diagnostic is generated. \p V is the IR Value that identifies
+ /// the code region. \p Msg is the message string to use. If \p IsVerbose is
+ /// true, the message is considered verbose and will only be emitted when
+ /// verbose output is turned on.
void emitOptimizationRemarkMissed(const char *PassName, const DebugLoc &DLoc,
- Value *V, const Twine &Msg);
+ const Value *V, const Twine &Msg,
+ bool IsVerbose = false);
/// \brief Same as above but derives the IR Value for the code region and the
/// debug location from the Loop parameter \p L.
void emitOptimizationRemarkMissed(const char *PassName, Loop *L,
- const Twine &Msg);
+ const Twine &Msg, bool IsVerbose = false);
+
+ /// \brief Same as above but derives the debug location and the code region
+ /// from the debug location and the basic block of \p Inst, respectively.
+ void emitOptimizationRemarkMissed(const char *PassName, Instruction *Inst,
+ const Twine &Msg, bool IsVerbose = false) {
+ emitOptimizationRemarkMissed(PassName, Inst->getDebugLoc(),
+ Inst->getParent(), Msg, IsVerbose);
+ }
+
+ /// Emit an optimization analysis remark message.
+ ///
+ /// \p PassName is the name of the pass emitting the message. If
+ /// -Rpass-analysis= is given and \p PassName matches the regular expression
+ /// in -Rpass, then the remark will be emitted. \p DLoc is the debug location
+ /// where the diagnostic is generated. \p V is the IR Value that identifies
+ /// the code region. \p Msg is the message string to use. If \p IsVerbose is
+ /// true, the message is considered verbose and will only be emitted when
+ /// verbose output is turned on.
+ void emitOptimizationRemarkAnalysis(const char *PassName,
+ const DebugLoc &DLoc, const Value *V,
+ const Twine &Msg, bool IsVerbose = false);
+
+ /// \brief Same as above but derives the IR Value for the code region and the
+ /// debug location from the Loop parameter \p L.
+ void emitOptimizationRemarkAnalysis(const char *PassName, Loop *L,
+ const Twine &Msg, bool IsVerbose = false);
+
+ /// \brief Same as above but derives the debug location and the code region
+ /// from the debug location and the basic block of \p Inst, respectively.
+ void emitOptimizationRemarkAnalysis(const char *PassName, Instruction *Inst,
+ const Twine &Msg,
+ bool IsVerbose = false) {
+ emitOptimizationRemarkAnalysis(PassName, Inst->getDebugLoc(),
+ Inst->getParent(), Msg, IsVerbose);
+ }
+
+ /// \brief This variant allows specifying what should be emitted for missed
+ /// and analysis remarks in one call.
+ ///
+ /// \p PassName is the name of the pass emitting the message. If
+ /// -Rpass-missed= is given and \p PassName matches the regular expression, \p
+ /// MsgForMissedRemark is emitted.
+ ///
+ /// If -Rpass-analysis= is given and \p PassName matches the regular
+ /// expression, \p MsgForAnalysisRemark is emitted.
+ ///
+ /// The debug location and the code region is derived from \p Inst. If \p
+ /// IsVerbose is true, the message is considered verbose and will only be
+ /// emitted when verbose output is turned on.
+ void emitOptimizationRemarkMissedAndAnalysis(
+ const char *PassName, Instruction *Inst, const Twine &MsgForMissedRemark,
+ const Twine &MsgForAnalysisRemark, bool IsVerbose = false) {
+ emitOptimizationRemarkAnalysis(PassName, Inst, MsgForAnalysisRemark,
+ IsVerbose);
+ emitOptimizationRemarkMissed(PassName, Inst, MsgForMissedRemark, IsVerbose);
+ }
+
+ /// \brief Emit an optimization analysis remark related to floating-point
+ /// non-commutativity.
+ ///
+ /// \p PassName is the name of the pass emitting the message. If
+ /// -Rpass-analysis= is given and \p PassName matches the regular expression
+ /// in -Rpass, then the remark will be emitted. \p Fn is the function
+ /// triggering the remark, \p DLoc is the debug location where the diagnostic
+ /// is generated.\p V is the IR Value that identifies the code region. \p Msg
+ /// is the message string to use.
+ void emitOptimizationRemarkAnalysisFPCommute(const char *PassName,
+ const DebugLoc &DLoc,
+ const Value *V,
+ const Twine &Msg);
+
+ /// \brief Emit an optimization analysis remark related to pointer aliasing.
+ ///
+ /// \p PassName is the name of the pass emitting the message. If
+ /// -Rpass-analysis= is given and \p PassName matches the regular expression
+ /// in -Rpass, then the remark will be emitted. \p Fn is the function
+ /// triggering the remark, \p DLoc is the debug location where the diagnostic
+ /// is generated.\p V is the IR Value that identifies the code region. \p Msg
+ /// is the message string to use.
+ void emitOptimizationRemarkAnalysisAliasing(const char *PassName,
+ const DebugLoc &DLoc,
+ const Value *V, const Twine &Msg);
+
+ /// \brief Same as above but derives the IR Value for the code region and the
+ /// debug location from the Loop parameter \p L.
+ void emitOptimizationRemarkAnalysisAliasing(const char *PassName, Loop *L,
+ const Twine &Msg);
+
+ /// \brief Whether we allow for extra compile-time budget to perform more
+ /// analysis to produce fewer false positives.
+ ///
+ /// This is useful when reporting missed optimizations. In this case we can
+ /// use the extra analysis (1) to filter trivial false positives or (2) to
+ /// provide more context so that non-trivial false positives can be quickly
+ /// detected by the user.
+ bool allowExtraAnalysis() const {
+ // For now, only allow this with -fsave-optimization-record since the -Rpass
+ // options are handled in the front-end.
+ return F->getContext().getDiagnosticsOutputFile();
+ }
private:
Function *F;
BlockFrequencyInfo *BFI;
- Optional<uint64_t> computeHotness(Value *V);
+ /// If we generate BFI on demand, we need to free it when ORE is freed.
+ std::unique_ptr<BlockFrequencyInfo> OwnedBFI;
+
+ /// Compute hotness from IR value (currently assumed to be a block) if PGO is
+ /// available.
+ Optional<uint64_t> computeHotness(const Value *V);
+
+ /// Similar but use value from \p OptDiag and update hotness there.
+ void computeHotness(DiagnosticInfoOptimizationBase &OptDiag);
+
+ /// \brief Only allow verbose messages if we know we're filtering by hotness
+ /// (BFI is only set in this case).
+ bool shouldEmitVerbose() { return BFI != nullptr; }
OptimizationRemarkEmitter(const OptimizationRemarkEmitter &) = delete;
void operator=(const OptimizationRemarkEmitter &) = delete;
};
+/// \brief Add a small namespace to avoid name clashes with the classes used in
+/// the streaming interface. We want these to be short for better
+/// write/readability.
+namespace ore {
+using NV = DiagnosticInfoOptimizationBase::Argument;
+using setIsVerbose = DiagnosticInfoOptimizationBase::setIsVerbose;
+using setExtraArgs = DiagnosticInfoOptimizationBase::setExtraArgs;
+}
+
+/// OptimizationRemarkEmitter legacy analysis pass
+///
+/// Note that this pass shouldn't generally be marked as preserved by other
+/// passes. It's holding onto BFI, so if the pass does not preserve BFI, BFI
+/// could be freed.
class OptimizationRemarkEmitterWrapperPass : public FunctionPass {
std::unique_ptr<OptimizationRemarkEmitter> ORE;
@@ -91,14 +265,14 @@ public:
class OptimizationRemarkEmitterAnalysis
: public AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis> {
friend AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef OptimizationRemarkEmitter Result;
/// \brief Run the analysis pass over a function and produce BFI.
- Result run(Function &F, AnalysisManager<Function> &AM);
+ Result run(Function &F, FunctionAnalysisManager &AM);
};
}
#endif // LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
diff --git a/include/llvm/Analysis/PostDominators.h b/include/llvm/Analysis/PostDominators.h
index 99240a40408e..34361dac8c16 100644
--- a/include/llvm/Analysis/PostDominators.h
+++ b/include/llvm/Analysis/PostDominators.h
@@ -26,21 +26,13 @@ struct PostDominatorTree : public DominatorTreeBase<BasicBlock> {
typedef DominatorTreeBase<BasicBlock> Base;
PostDominatorTree() : DominatorTreeBase<BasicBlock>(true) {}
-
- PostDominatorTree(PostDominatorTree &&Arg)
- : Base(std::move(static_cast<Base &>(Arg))) {}
-
- PostDominatorTree &operator=(PostDominatorTree &&RHS) {
- Base::operator=(std::move(static_cast<Base &>(RHS)));
- return *this;
- }
};
/// \brief Analysis pass which computes a \c PostDominatorTree.
class PostDominatorTreeAnalysis
: public AnalysisInfoMixin<PostDominatorTreeAnalysis> {
friend AnalysisInfoMixin<PostDominatorTreeAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
@@ -58,7 +50,7 @@ class PostDominatorTreePrinterPass
public:
explicit PostDominatorTreePrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
struct PostDominatorTreeWrapperPass : public FunctionPass {
@@ -89,7 +81,7 @@ FunctionPass* createPostDomTree();
template <> struct GraphTraits<PostDominatorTree*>
: public GraphTraits<DomTreeNode*> {
- static NodeType *getEntryNode(PostDominatorTree *DT) {
+ static NodeRef getEntryNode(PostDominatorTree *DT) {
return DT->getRootNode();
}
diff --git a/include/llvm/Analysis/ProfileSummaryInfo.h b/include/llvm/Analysis/ProfileSummaryInfo.h
index cd624c8404da..d7fe76e278e3 100644
--- a/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -27,6 +27,8 @@
#include <memory>
namespace llvm {
+class BasicBlock;
+class BlockFrequencyInfo;
class ProfileSummary;
/// \brief Analysis providing profile information.
///
@@ -51,14 +53,16 @@ public:
ProfileSummaryInfo(Module &M) : M(M) {}
ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
: M(Arg.M), Summary(std::move(Arg.Summary)) {}
+ /// \brief Returns true if \p F has hot function entry.
+ bool isFunctionEntryHot(const Function *F);
+ /// \brief Returns true if \p F has cold function entry.
+ bool isFunctionEntryCold(const Function *F);
/// \brief Returns true if \p F is a hot function.
- bool isHotFunction(const Function *F);
- /// \brief Returns true if \p F is a cold function.
- bool isColdFunction(const Function *F);
- /// \brief Returns true if count \p C is considered hot.
bool isHotCount(uint64_t C);
/// \brief Returns true if count \p C is considered cold.
bool isColdCount(uint64_t C);
+ /// \brief Returns true if BasicBlock \p B is considered hot.
+ bool isHotBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
};
/// An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
@@ -69,7 +73,12 @@ public:
static char ID;
ProfileSummaryInfoWrapperPass();
- ProfileSummaryInfo *getPSI(Module &M);
+ ProfileSummaryInfo *getPSI() {
+ return &*PSI;
+ }
+
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
@@ -81,21 +90,11 @@ class ProfileSummaryAnalysis
public:
typedef ProfileSummaryInfo Result;
- ProfileSummaryAnalysis() {}
- ProfileSummaryAnalysis(const ProfileSummaryAnalysis &Arg) {}
- ProfileSummaryAnalysis(ProfileSummaryAnalysis &&Arg) {}
- ProfileSummaryAnalysis &operator=(const ProfileSummaryAnalysis &RHS) {
- return *this;
- }
- ProfileSummaryAnalysis &operator=(ProfileSummaryAnalysis &&RHS) {
- return *this;
- }
-
Result run(Module &M, ModuleAnalysisManager &);
private:
friend AnalysisInfoMixin<ProfileSummaryAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
};
/// \brief Printer pass that uses \c ProfileSummaryAnalysis.
@@ -105,7 +104,7 @@ class ProfileSummaryPrinterPass
public:
explicit ProfileSummaryPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Analysis/RegionInfo.h b/include/llvm/Analysis/RegionInfo.h
index 91bfd435f08c..f2f27a137a88 100644
--- a/include/llvm/Analysis/RegionInfo.h
+++ b/include/llvm/Analysis/RegionInfo.h
@@ -39,6 +39,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
@@ -278,7 +279,7 @@ class RegionBase : public RegionNodeBase<Tr> {
// The subregions of this region.
RegionSet children;
- typedef std::map<BlockT *, RegionNodeT *> BBNodeMapT;
+ typedef std::map<BlockT *, std::unique_ptr<RegionNodeT>> BBNodeMapT;
// Save the BasicBlock RegionNodes that are element of this Region.
mutable BBNodeMapT BBNodeMap;
@@ -567,10 +568,10 @@ public:
public:
typedef block_iterator_wrapper<IsConst> Self;
- typedef typename super::pointer pointer;
+ typedef typename super::value_type value_type;
// Construct the begin iterator.
- block_iterator_wrapper(pointer Entry, pointer Exit)
+ block_iterator_wrapper(value_type Entry, value_type Exit)
: super(df_begin(Entry)) {
// Mark the exit of the region as visited, so that the children of the
// exit and the exit itself, i.e. the block outside the region will never
@@ -579,7 +580,7 @@ public:
}
// Construct the end iterator.
- block_iterator_wrapper() : super(df_end<pointer>((BlockT *)nullptr)) {}
+ block_iterator_wrapper() : super(df_end<value_type>((BlockT *)nullptr)) {}
/*implicit*/ block_iterator_wrapper(super I) : super(I) {}
@@ -625,18 +626,26 @@ public:
/// are direct children of this Region. It does not iterate over any
/// RegionNodes that are also element of a subregion of this Region.
//@{
- typedef df_iterator<RegionNodeT *, SmallPtrSet<RegionNodeT *, 8>, false,
- GraphTraits<RegionNodeT *>> element_iterator;
+ typedef df_iterator<RegionNodeT *, df_iterator_default_set<RegionNodeT *>,
+ false, GraphTraits<RegionNodeT *>>
+ element_iterator;
- typedef df_iterator<const RegionNodeT *, SmallPtrSet<const RegionNodeT *, 8>,
- false,
- GraphTraits<const RegionNodeT *>> const_element_iterator;
+ typedef df_iterator<const RegionNodeT *,
+ df_iterator_default_set<const RegionNodeT *>, false,
+ GraphTraits<const RegionNodeT *>>
+ const_element_iterator;
element_iterator element_begin();
element_iterator element_end();
+ iterator_range<element_iterator> elements() {
+ return make_range(element_begin(), element_end());
+ }
const_element_iterator element_begin() const;
const_element_iterator element_end() const;
+ iterator_range<const_element_iterator> elements() const {
+ return make_range(element_begin(), element_end());
+ }
//@}
};
@@ -669,7 +678,6 @@ class RegionInfoBase {
friend class MachineRegionInfo;
typedef DenseMap<BlockT *, BlockT *> BBtoBBMap;
typedef DenseMap<BlockT *, RegionT *> BBtoRegionMap;
- typedef SmallPtrSet<RegionT *, 4> RegionSet;
RegionInfoBase();
virtual ~RegionInfoBase();
@@ -925,12 +933,12 @@ public:
/// \brief Analysis pass that exposes the \c RegionInfo for a function.
class RegionInfoAnalysis : public AnalysisInfoMixin<RegionInfoAnalysis> {
friend AnalysisInfoMixin<RegionInfoAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef RegionInfo Result;
- RegionInfo run(Function &F, AnalysisManager<Function> &AM);
+ RegionInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c RegionInfo.
@@ -939,12 +947,12 @@ class RegionInfoPrinterPass : public PassInfoMixin<RegionInfoPrinterPass> {
public:
explicit RegionInfoPrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for the \c RegionInfo.
struct RegionInfoVerifierPass : PassInfoMixin<RegionInfoVerifierPass> {
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
template <>
diff --git a/include/llvm/Analysis/RegionInfoImpl.h b/include/llvm/Analysis/RegionInfoImpl.h
index 15dd1a2000e6..a16c534484b3 100644
--- a/include/llvm/Analysis/RegionInfoImpl.h
+++ b/include/llvm/Analysis/RegionInfoImpl.h
@@ -38,12 +38,6 @@ RegionBase<Tr>::RegionBase(BlockT *Entry, BlockT *Exit,
template <class Tr>
RegionBase<Tr>::~RegionBase() {
- // Free the cached nodes.
- for (typename BBNodeMapT::iterator it = BBNodeMap.begin(),
- ie = BBNodeMap.end();
- it != ie; ++it)
- delete it->second;
-
// Only clean the cache for this Region. Caches of child Regions will be
// cleaned when the child Regions are deleted.
BBNodeMap.clear();
@@ -71,10 +65,9 @@ void RegionBase<Tr>::replaceEntryRecursive(BlockT *NewEntry) {
RegionQueue.pop_back();
R->replaceEntry(NewEntry);
- for (typename RegionT::const_iterator RI = R->begin(), RE = R->end();
- RI != RE; ++RI) {
- if ((*RI)->getEntry() == OldEntry)
- RegionQueue.push_back(RI->get());
+ for (std::unique_ptr<RegionT> &Child : *R) {
+ if (Child->getEntry() == OldEntry)
+ RegionQueue.push_back(Child.get());
}
}
}
@@ -90,10 +83,9 @@ void RegionBase<Tr>::replaceExitRecursive(BlockT *NewExit) {
RegionQueue.pop_back();
R->replaceExit(NewExit);
- for (typename RegionT::const_iterator RI = R->begin(), RE = R->end();
- RI != RE; ++RI) {
- if ((*RI)->getExit() == OldExit)
- RegionQueue.push_back(RI->get());
+ for (std::unique_ptr<RegionT> &Child : *R) {
+ if (Child->getExit() == OldExit)
+ RegionQueue.push_back(Child.get());
}
}
}
@@ -160,13 +152,10 @@ typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopInfoT *LI,
template <class Tr>
typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getEnteringBlock() const {
BlockT *entry = getEntry();
- BlockT *Pred;
BlockT *enteringBlock = nullptr;
- for (PredIterTy PI = InvBlockTraits::child_begin(entry),
- PE = InvBlockTraits::child_end(entry);
- PI != PE; ++PI) {
- Pred = *PI;
+ for (BlockT *Pred : make_range(InvBlockTraits::child_begin(entry),
+ InvBlockTraits::child_end(entry))) {
if (DT->getNode(Pred) && !contains(Pred)) {
if (enteringBlock)
return nullptr;
@@ -181,16 +170,13 @@ typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getEnteringBlock() const {
template <class Tr>
typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getExitingBlock() const {
BlockT *exit = getExit();
- BlockT *Pred;
BlockT *exitingBlock = nullptr;
if (!exit)
return nullptr;
- for (PredIterTy PI = InvBlockTraits::child_begin(exit),
- PE = InvBlockTraits::child_end(exit);
- PI != PE; ++PI) {
- Pred = *PI;
+ for (BlockT *Pred : make_range(InvBlockTraits::child_begin(exit),
+ InvBlockTraits::child_end(exit))) {
if (contains(Pred)) {
if (exitingBlock)
return nullptr;
@@ -239,19 +225,17 @@ void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
BlockT *entry = getEntry(), *exit = getExit();
- for (SuccIterTy SI = BlockTraits::child_begin(BB),
- SE = BlockTraits::child_end(BB);
- SI != SE; ++SI) {
- if (!contains(*SI) && exit != *SI)
+ for (BlockT *Succ :
+ make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
+ if (!contains(Succ) && exit != Succ)
llvm_unreachable("Broken region found: edges leaving the region must go "
"to the exit node!");
}
if (entry != BB) {
- for (PredIterTy SI = InvBlockTraits::child_begin(BB),
- SE = InvBlockTraits::child_end(BB);
- SI != SE; ++SI) {
- if (!contains(*SI))
+ for (BlockT *Pred : make_range(InvBlockTraits::child_begin(BB),
+ InvBlockTraits::child_end(BB))) {
+ if (!contains(Pred))
llvm_unreachable("Broken region found: edges entering the region must "
"go to the entry node!");
}
@@ -266,11 +250,10 @@ void RegionBase<Tr>::verifyWalk(BlockT *BB, std::set<BlockT *> *visited) const {
verifyBBInRegion(BB);
- for (SuccIterTy SI = BlockTraits::child_begin(BB),
- SE = BlockTraits::child_end(BB);
- SI != SE; ++SI) {
- if (*SI != exit && visited->find(*SI) == visited->end())
- verifyWalk(*SI, visited);
+ for (BlockT *Succ :
+ make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
+ if (Succ != exit && visited->find(Succ) == visited->end())
+ verifyWalk(Succ, visited);
}
}
@@ -288,9 +271,8 @@ void RegionBase<Tr>::verifyRegion() const {
template <class Tr>
void RegionBase<Tr>::verifyRegionNest() const {
- for (typename RegionT::const_iterator RI = begin(), RE = end(); RI != RE;
- ++RI)
- (*RI)->verifyRegionNest();
+ for (const std::unique_ptr<RegionT> &R : *this)
+ R->verifyRegionNest();
verifyRegion();
}
@@ -345,13 +327,13 @@ typename Tr::RegionNodeT *RegionBase<Tr>::getBBNode(BlockT *BB) const {
typename BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
- if (at != BBNodeMap.end())
- return at->second;
-
- auto Deconst = const_cast<RegionBase<Tr> *>(this);
- RegionNodeT *NewNode = new RegionNodeT(static_cast<RegionT *>(Deconst), BB);
- BBNodeMap.insert(std::make_pair(BB, NewNode));
- return NewNode;
+ if (at == BBNodeMap.end()) {
+ auto Deconst = const_cast<RegionBase<Tr> *>(this);
+ typename BBNodeMapT::value_type V = {
+ BB, make_unique<RegionNodeT>(static_cast<RegionT *>(Deconst), BB)};
+ at = BBNodeMap.insert(std::move(V)).first;
+ }
+ return at->second.get();
}
template <class Tr>
@@ -365,9 +347,9 @@ typename Tr::RegionNodeT *RegionBase<Tr>::getNode(BlockT *BB) const {
template <class Tr>
void RegionBase<Tr>::transferChildrenTo(RegionT *To) {
- for (iterator I = begin(), E = end(); I != E; ++I) {
- (*I)->parent = To;
- To->children.push_back(std::move(*I));
+ for (std::unique_ptr<RegionT> &R : *this) {
+ R->parent = To;
+ To->children.push_back(std::move(R));
}
children.clear();
}
@@ -375,9 +357,10 @@ void RegionBase<Tr>::transferChildrenTo(RegionT *To) {
template <class Tr>
void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
assert(!SubRegion->parent && "SubRegion already has a parent!");
- assert(std::find_if(begin(), end(), [&](const std::unique_ptr<RegionT> &R) {
- return R.get() == SubRegion;
- }) == children.end() &&
+ assert(find_if(*this,
+ [&](const std::unique_ptr<RegionT> &R) {
+ return R.get() == SubRegion;
+ }) == children.end() &&
"Subregion already exists!");
SubRegion->parent = static_cast<RegionT *>(this);
@@ -389,9 +372,9 @@ void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
assert(SubRegion->children.empty() &&
"SubRegions that contain children are not supported");
- for (element_iterator I = element_begin(), E = element_end(); I != E; ++I) {
- if (!(*I)->isSubRegion()) {
- BlockT *BB = (*I)->template getNodeAs<BlockT>();
+ for (RegionNodeT *Element : elements()) {
+ if (!Element->isSubRegion()) {
+ BlockT *BB = Element->template getNodeAs<BlockT>();
if (SubRegion->contains(BB))
RI->setRegionFor(BB, SubRegion);
@@ -399,12 +382,12 @@ void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
}
std::vector<std::unique_ptr<RegionT>> Keep;
- for (iterator I = begin(), E = end(); I != E; ++I) {
- if (SubRegion->contains(I->get()) && I->get() != SubRegion) {
- (*I)->parent = SubRegion;
- SubRegion->children.push_back(std::move(*I));
+ for (std::unique_ptr<RegionT> &R : *this) {
+ if (SubRegion->contains(R.get()) && R.get() != SubRegion) {
+ R->parent = SubRegion;
+ SubRegion->children.push_back(std::move(R));
} else
- Keep.push_back(std::move(*I));
+ Keep.push_back(std::move(R));
}
children.clear();
@@ -418,9 +401,10 @@ template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::removeSubRegion(RegionT *Child) {
assert(Child->parent == this && "Child is not a child of this region!");
Child->parent = nullptr;
- typename RegionSet::iterator I = std::find_if(
- children.begin(), children.end(),
- [&](const std::unique_ptr<RegionT> &R) { return R.get() == Child; });
+ typename RegionSet::iterator I =
+ find_if(children, [&](const std::unique_ptr<RegionT> &R) {
+ return R.get() == Child;
+ });
assert(I != children.end() && "Region does not exit. Unable to remove.");
children.erase(children.begin() + (I - begin()));
return Child;
@@ -446,10 +430,9 @@ typename Tr::RegionT *RegionBase<Tr>::getExpandedRegion() const {
RegionT *R = RI->getRegionFor(exit);
if (R->getEntry() != exit) {
- for (PredIterTy PI = InvBlockTraits::child_begin(getExit()),
- PE = InvBlockTraits::child_end(getExit());
- PI != PE; ++PI)
- if (!contains(*PI))
+ for (BlockT *Pred : make_range(InvBlockTraits::child_begin(getExit()),
+ InvBlockTraits::child_end(getExit())))
+ if (!contains(Pred))
return nullptr;
if (Tr::getNumSuccessors(exit) == 1)
return new RegionT(getEntry(), *BlockTraits::child_begin(exit), RI, DT);
@@ -459,10 +442,9 @@ typename Tr::RegionT *RegionBase<Tr>::getExpandedRegion() const {
while (R->getParent() && R->getParent()->getEntry() == exit)
R = R->getParent();
- for (PredIterTy PI = InvBlockTraits::child_begin(getExit()),
- PE = InvBlockTraits::child_end(getExit());
- PI != PE; ++PI) {
- if (!(contains(*PI) || R->contains(*PI)))
+ for (BlockT *Pred : make_range(InvBlockTraits::child_begin(getExit()),
+ InvBlockTraits::child_end(getExit()))) {
+ if (!(contains(Pred) || R->contains(Pred)))
return nullptr;
}
@@ -487,9 +469,8 @@ void RegionBase<Tr>::print(raw_ostream &OS, bool print_tree, unsigned level,
for (const auto *BB : blocks())
OS << BB->getName() << ", "; // TODO: remove the last ","
} else if (Style == PrintRN) {
- for (const_element_iterator I = element_begin(), E = element_end();
- I != E; ++I) {
- OS << **I << ", "; // TODO: remove the last ",
+ for (const RegionNodeT *Element : elements()) {
+ OS << *Element << ", "; // TODO: remove the last ",
}
}
@@ -497,8 +478,8 @@ void RegionBase<Tr>::print(raw_ostream &OS, bool print_tree, unsigned level,
}
if (print_tree) {
- for (const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
- (*RI)->print(OS, print_tree, level + 1, Style);
+ for (const std::unique_ptr<RegionT> &R : *this)
+ R->print(OS, print_tree, level + 1, Style);
}
if (Style != PrintNone)
@@ -514,15 +495,9 @@ void RegionBase<Tr>::dump() const {
template <class Tr>
void RegionBase<Tr>::clearNodeCache() {
- // Free the cached nodes.
- for (typename BBNodeMapT::iterator I = BBNodeMap.begin(),
- IE = BBNodeMap.end();
- I != IE; ++I)
- delete I->second;
-
BBNodeMap.clear();
- for (typename RegionT::iterator RI = begin(), RE = end(); RI != RE; ++RI)
- (*RI)->clearNodeCache();
+ for (std::unique_ptr<RegionT> &R : *this)
+ R->clearNodeCache();
}
//===----------------------------------------------------------------------===//
@@ -541,12 +516,12 @@ RegionInfoBase<Tr>::~RegionInfoBase() {
template <class Tr>
void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R) const {
assert(R && "Re must be non-null");
- for (auto I = R->element_begin(), E = R->element_end(); I != E; ++I) {
- if (I->isSubRegion()) {
- const RegionT *SR = I->template getNodeAs<RegionT>();
+ for (const typename Tr::RegionNodeT *Element : R->elements()) {
+ if (Element->isSubRegion()) {
+ const RegionT *SR = Element->template getNodeAs<RegionT>();
verifyBBMap(SR);
} else {
- BlockT *BB = I->template getNodeAs<BlockT>();
+ BlockT *BB = Element->template getNodeAs<BlockT>();
if (getRegionFor(BB) != R)
llvm_unreachable("BB map does not match region nesting");
}
@@ -556,10 +531,8 @@ void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R) const {
template <class Tr>
bool RegionInfoBase<Tr>::isCommonDomFrontier(BlockT *BB, BlockT *entry,
BlockT *exit) const {
- for (PredIterTy PI = InvBlockTraits::child_begin(BB),
- PE = InvBlockTraits::child_end(BB);
- PI != PE; ++PI) {
- BlockT *P = *PI;
+ for (BlockT *P : make_range(InvBlockTraits::child_begin(BB),
+ InvBlockTraits::child_end(BB))) {
if (DT->dominates(entry, P) && !DT->dominates(exit, P))
return false;
}
@@ -590,20 +563,18 @@ bool RegionInfoBase<Tr>::isRegion(BlockT *entry, BlockT *exit) const {
DST *exitSuccs = &DF->find(exit)->second;
// Do not allow edges leaving the region.
- for (typename DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
- SI != SE; ++SI) {
- if (*SI == exit || *SI == entry)
+ for (BlockT *Succ : *entrySuccs) {
+ if (Succ == exit || Succ == entry)
continue;
- if (exitSuccs->find(*SI) == exitSuccs->end())
+ if (exitSuccs->find(Succ) == exitSuccs->end())
return false;
- if (!isCommonDomFrontier(*SI, entry, exit))
+ if (!isCommonDomFrontier(Succ, entry, exit))
return false;
}
// Do not allow edges pointing into the region.
- for (typename DST::iterator SI = exitSuccs->begin(), SE = exitSuccs->end();
- SI != SE; ++SI) {
- if (DT->properlyDominates(entry, *SI) && *SI != exit)
+ for (BlockT *Succ : *exitSuccs) {
+ if (DT->properlyDominates(entry, Succ) && Succ != exit)
return false;
}
@@ -663,7 +634,7 @@ typename Tr::RegionT *RegionInfoBase<Tr>::createRegion(BlockT *entry,
RegionT *region =
new RegionT(entry, exit, static_cast<RegionInfoT *>(this), DT);
- BBtoRegion.insert(std::make_pair(entry, region));
+ BBtoRegion.insert({entry, region});
#ifdef EXPENSIVE_CHECKS
region->verifyRegion();
@@ -758,9 +729,8 @@ void RegionInfoBase<Tr>::buildRegionsTree(DomTreeNodeT *N, RegionT *region) {
BBtoRegion[BB] = region;
}
- for (typename DomTreeNodeT::iterator CI = N->begin(), CE = N->end(); CI != CE;
- ++CI) {
- buildRegionsTree(*CI, region);
+ for (DomTreeNodeBase<BlockT> *C : *N) {
+ buildRegionsTree(C, region);
}
}
@@ -850,10 +820,9 @@ RegionInfoBase<Tr>::getMaxRegionExit(BlockT *BB) const {
ExitR->getParent()->getEntry() == Exit)
ExitR = ExitR->getParent();
- for (PredIterTy PI = InvBlockTraits::child_begin(Exit),
- PE = InvBlockTraits::child_end(Exit);
- PI != PE; ++PI) {
- if (!R->contains(*PI) && !ExitR->contains(*PI))
+ for (BlockT *Pred : make_range(InvBlockTraits::child_begin(Exit),
+ InvBlockTraits::child_end(Exit))) {
+ if (!R->contains(Pred) && !ExitR->contains(Pred))
break;
}
diff --git a/include/llvm/Analysis/RegionIterator.h b/include/llvm/Analysis/RegionIterator.h
index ced58dfabdd5..de2f3bf3f12b 100644
--- a/include/llvm/Analysis/RegionIterator.h
+++ b/include/llvm/Analysis/RegionIterator.h
@@ -30,10 +30,10 @@ namespace llvm {
///
/// For a subregion RegionNode there is just one successor. The RegionNode
/// representing the exit of the subregion.
-template<class NodeType, class BlockT, class RegionT>
-class RNSuccIterator : public std::iterator<std::forward_iterator_tag,
- NodeType, ptrdiff_t> {
- typedef std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> super;
+template <class NodeRef, class BlockT, class RegionT>
+class RNSuccIterator
+ : public std::iterator<std::forward_iterator_tag, NodeRef> {
+ typedef std::iterator<std::forward_iterator_tag, NodeRef> super;
typedef GraphTraits<BlockT*> BlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
@@ -49,8 +49,14 @@ class RNSuccIterator : public std::iterator<std::forward_iterator_tag,
ItRgEnd // At the end of the regionnode successor.
};
+ static_assert(std::is_pointer<NodeRef>::value,
+ "FIXME: Currently RNSuccIterator only supports NodeRef as "
+ "pointers due to the use of pointer-specific data structures "
+ "(e.g. PointerIntPair and SmallPtrSet) internally. Generalize "
+ "it to support non-pointer types");
+
// Use two bit to represent the mode iterator.
- PointerIntPair<NodeType*, 2, ItMode> Node;
+ PointerIntPair<NodeRef, 2, ItMode> Node;
// The block successor iterator.
SuccIterTy BItor;
@@ -62,15 +68,15 @@ class RNSuccIterator : public std::iterator<std::forward_iterator_tag,
Node.setInt(ItRgEnd);
}
- NodeType* getNode() const{ return Node.getPointer(); }
+ NodeRef getNode() const { return Node.getPointer(); }
// isRegionMode - Is the current iterator in region mode?
bool isRegionMode() const { return Node.getInt() != ItBB; }
// Get the immediate successor. This function may return a Basic Block
// RegionNode or a subregion RegionNode.
- NodeType* getISucc(BlockT* BB) const {
- NodeType *succ;
+ NodeRef getISucc(BlockT *BB) const {
+ NodeRef succ;
succ = getNode()->getParent()->getNode(BB);
assert(succ && "BB not in Region or entered subregion!");
return succ;
@@ -87,14 +93,14 @@ class RNSuccIterator : public std::iterator<std::forward_iterator_tag,
return getNode()->getParent()->getExit() == BB;
}
public:
- typedef RNSuccIterator<NodeType, BlockT, RegionT> Self;
+ typedef RNSuccIterator<NodeRef, BlockT, RegionT> Self;
- typedef typename super::pointer pointer;
+ typedef typename super::value_type value_type;
/// @brief Create begin iterator of a RegionNode.
- inline RNSuccIterator(NodeType* node)
- : Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
- BItor(BlockTraits::child_begin(node->getEntry())) {
+ inline RNSuccIterator(NodeRef node)
+ : Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
+ BItor(BlockTraits::child_begin(node->getEntry())) {
// Skip the exit block
if (!isRegionMode())
@@ -106,9 +112,9 @@ public:
}
/// @brief Create an end iterator.
- inline RNSuccIterator(NodeType* node, bool)
- : Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
- BItor(BlockTraits::child_end(node->getEntry())) {}
+ inline RNSuccIterator(NodeRef node, bool)
+ : Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
+ BItor(BlockTraits::child_end(node->getEntry())) {}
inline bool operator==(const Self& x) const {
assert(isRegionMode() == x.isRegionMode() && "Broken iterator!");
@@ -120,7 +126,7 @@ public:
inline bool operator!=(const Self& x) const { return !operator==(x); }
- inline pointer operator*() const {
+ inline value_type operator*() const {
BlockT *BB = isRegionMode() ? getRegionSucc() : *BItor;
assert(!isExit(BB) && "Iterator out of range!");
return getISucc(BB);
@@ -154,43 +160,41 @@ public:
/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
/// are contained in the Region and its subregions. This is close to a virtual
/// control flow graph of the Region.
-template<class NodeType, class BlockT, class RegionT>
-class RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>
- : public std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> {
- typedef std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> super;
+template <class NodeRef, class BlockT, class RegionT>
+class RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>
+ : public std::iterator<std::forward_iterator_tag, NodeRef> {
+ typedef std::iterator<std::forward_iterator_tag, NodeRef> super;
typedef GraphTraits<BlockT*> BlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
- NodeType* Node;
+ NodeRef Node;
SuccIterTy Itor;
public:
- typedef RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT> Self;
- typedef typename super::pointer pointer;
+ typedef RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT> Self;
+ typedef typename super::value_type value_type;
/// @brief Create the iterator from a RegionNode.
///
/// Note that the incoming node must be a bb node, otherwise it will trigger
/// an assertion when we try to get a BasicBlock.
- inline RNSuccIterator(NodeType* node) :
- Node(node),
- Itor(BlockTraits::child_begin(node->getEntry())) {
- assert(!Node->isSubRegion()
- && "Subregion node not allowed in flat iterating mode!");
- assert(Node->getParent() && "A BB node must have a parent!");
-
- // Skip the exit block of the iterating region.
- while (BlockTraits::child_end(Node->getEntry()) != Itor
- && Node->getParent()->getExit() == *Itor)
- ++Itor;
+ inline RNSuccIterator(NodeRef node)
+ : Node(node), Itor(BlockTraits::child_begin(node->getEntry())) {
+ assert(!Node->isSubRegion() &&
+ "Subregion node not allowed in flat iterating mode!");
+ assert(Node->getParent() && "A BB node must have a parent!");
+
+ // Skip the exit block of the iterating region.
+ while (BlockTraits::child_end(Node->getEntry()) != Itor &&
+ Node->getParent()->getExit() == *Itor)
+ ++Itor;
}
/// @brief Create an end iterator
- inline RNSuccIterator(NodeType* node, bool) :
- Node(node),
- Itor(BlockTraits::child_end(node->getEntry())) {
- assert(!Node->isSubRegion()
- && "Subregion node not allowed in flat iterating mode!");
+ inline RNSuccIterator(NodeRef node, bool)
+ : Node(node), Itor(BlockTraits::child_end(node->getEntry())) {
+ assert(!Node->isSubRegion() &&
+ "Subregion node not allowed in flat iterating mode!");
}
inline bool operator==(const Self& x) const {
@@ -202,7 +206,7 @@ public:
inline bool operator!=(const Self& x) const { return !operator==(x); }
- inline pointer operator*() const {
+ inline value_type operator*() const {
BlockT *BB = *Itor;
// Get the iterating region.
@@ -232,14 +236,14 @@ public:
}
};
-template<class NodeType, class BlockT, class RegionT>
-inline RNSuccIterator<NodeType, BlockT, RegionT> succ_begin(NodeType* Node) {
- return RNSuccIterator<NodeType, BlockT, RegionT>(Node);
+template <class NodeRef, class BlockT, class RegionT>
+inline RNSuccIterator<NodeRef, BlockT, RegionT> succ_begin(NodeRef Node) {
+ return RNSuccIterator<NodeRef, BlockT, RegionT>(Node);
}
-template<class NodeType, class BlockT, class RegionT>
-inline RNSuccIterator<NodeType, BlockT, RegionT> succ_end(NodeType* Node) {
- return RNSuccIterator<NodeType, BlockT, RegionT>(Node, true);
+template <class NodeRef, class BlockT, class RegionT>
+inline RNSuccIterator<NodeRef, BlockT, RegionT> succ_end(NodeRef Node) {
+ return RNSuccIterator<NodeRef, BlockT, RegionT>(Node, true);
}
//===--------------------------------------------------------------------===//
@@ -249,58 +253,60 @@ inline RNSuccIterator<NodeType, BlockT, RegionT> succ_end(NodeType* Node) {
// NodeT can either be region node or const region node, otherwise child_begin
// and child_end fail.
-#define RegionNodeGraphTraits(NodeT, BlockT, RegionT) \
- template<> struct GraphTraits<NodeT*> { \
- typedef NodeT NodeType; \
- typedef RNSuccIterator<NodeType, BlockT, RegionT> ChildIteratorType; \
- static NodeType *getEntryNode(NodeType* N) { return N; } \
- static inline ChildIteratorType child_begin(NodeType *N) { \
- return RNSuccIterator<NodeType, BlockT, RegionT>(N); \
- } \
- static inline ChildIteratorType child_end(NodeType *N) { \
- return RNSuccIterator<NodeType, BlockT, RegionT>(N, true); \
- } \
-}; \
-template<> struct GraphTraits<FlatIt<NodeT*>> { \
- typedef NodeT NodeType; \
- typedef RNSuccIterator<FlatIt<NodeT>, BlockT, RegionT > ChildIteratorType; \
- static NodeType *getEntryNode(NodeType* N) { return N; } \
- static inline ChildIteratorType child_begin(NodeType *N) { \
- return RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>(N); \
- } \
- static inline ChildIteratorType child_end(NodeType *N) { \
- return RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>(N, true); \
- } \
-}
+#define RegionNodeGraphTraits(NodeT, BlockT, RegionT) \
+ template <> struct GraphTraits<NodeT *> { \
+ typedef NodeT *NodeRef; \
+ typedef RNSuccIterator<NodeRef, BlockT, RegionT> ChildIteratorType; \
+ static NodeRef getEntryNode(NodeRef N) { return N; } \
+ static inline ChildIteratorType child_begin(NodeRef N) { \
+ return RNSuccIterator<NodeRef, BlockT, RegionT>(N); \
+ } \
+ static inline ChildIteratorType child_end(NodeRef N) { \
+ return RNSuccIterator<NodeRef, BlockT, RegionT>(N, true); \
+ } \
+ }; \
+ template <> struct GraphTraits<FlatIt<NodeT *>> { \
+ typedef NodeT *NodeRef; \
+ typedef RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT> \
+ ChildIteratorType; \
+ static NodeRef getEntryNode(NodeRef N) { return N; } \
+ static inline ChildIteratorType child_begin(NodeRef N) { \
+ return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N); \
+ } \
+ static inline ChildIteratorType child_end(NodeRef N) { \
+ return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N, true); \
+ } \
+ }
-#define RegionGraphTraits(RegionT, NodeT) \
-template<> struct GraphTraits<RegionT*> \
- : public GraphTraits<NodeT*> { \
- typedef df_iterator<NodeType*> nodes_iterator; \
- static NodeType *getEntryNode(RegionT* R) { \
- return R->getNode(R->getEntry()); \
- } \
- static nodes_iterator nodes_begin(RegionT* R) { \
- return nodes_iterator::begin(getEntryNode(R)); \
- } \
- static nodes_iterator nodes_end(RegionT* R) { \
- return nodes_iterator::end(getEntryNode(R)); \
- } \
-}; \
-template<> struct GraphTraits<FlatIt<RegionT*> > \
- : public GraphTraits<FlatIt<NodeT*> > { \
- typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false, \
- GraphTraits<FlatIt<NodeType*> > > nodes_iterator; \
- static NodeType *getEntryNode(RegionT* R) { \
- return R->getBBNode(R->getEntry()); \
- } \
- static nodes_iterator nodes_begin(RegionT* R) { \
- return nodes_iterator::begin(getEntryNode(R)); \
- } \
- static nodes_iterator nodes_end(RegionT* R) { \
- return nodes_iterator::end(getEntryNode(R)); \
- } \
-}
+#define RegionGraphTraits(RegionT, NodeT) \
+ template <> struct GraphTraits<RegionT *> : public GraphTraits<NodeT *> { \
+ typedef df_iterator<NodeRef> nodes_iterator; \
+ static NodeRef getEntryNode(RegionT *R) { \
+ return R->getNode(R->getEntry()); \
+ } \
+ static nodes_iterator nodes_begin(RegionT *R) { \
+ return nodes_iterator::begin(getEntryNode(R)); \
+ } \
+ static nodes_iterator nodes_end(RegionT *R) { \
+ return nodes_iterator::end(getEntryNode(R)); \
+ } \
+ }; \
+ template <> \
+ struct GraphTraits<FlatIt<RegionT *>> \
+ : public GraphTraits<FlatIt<NodeT *>> { \
+ typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false, \
+ GraphTraits<FlatIt<NodeRef>>> \
+ nodes_iterator; \
+ static NodeRef getEntryNode(RegionT *R) { \
+ return R->getBBNode(R->getEntry()); \
+ } \
+ static nodes_iterator nodes_begin(RegionT *R) { \
+ return nodes_iterator::begin(getEntryNode(R)); \
+ } \
+ static nodes_iterator nodes_end(RegionT *R) { \
+ return nodes_iterator::end(getEntryNode(R)); \
+ } \
+ }
RegionNodeGraphTraits(RegionNode, BasicBlock, Region);
RegionNodeGraphTraits(const RegionNode, BasicBlock, Region);
@@ -310,10 +316,11 @@ RegionGraphTraits(const Region, const RegionNode);
template <> struct GraphTraits<RegionInfo*>
: public GraphTraits<FlatIt<RegionNode*> > {
- typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
- GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+ typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
+ GraphTraits<FlatIt<NodeRef>>>
+ nodes_iterator;
- static NodeType *getEntryNode(RegionInfo *RI) {
+ static NodeRef getEntryNode(RegionInfo *RI) {
return GraphTraits<FlatIt<Region*> >::getEntryNode(RI->getTopLevelRegion());
}
static nodes_iterator nodes_begin(RegionInfo* RI) {
@@ -326,10 +333,11 @@ template <> struct GraphTraits<RegionInfo*>
template <> struct GraphTraits<RegionInfoPass*>
: public GraphTraits<RegionInfo *> {
- typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
- GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+ typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
+ GraphTraits<FlatIt<NodeRef>>>
+ nodes_iterator;
- static NodeType *getEntryNode(RegionInfoPass *RI) {
+ static NodeRef getEntryNode(RegionInfoPass *RI) {
return GraphTraits<RegionInfo*>::getEntryNode(&RI->getRegionInfo());
}
static nodes_iterator nodes_begin(RegionInfoPass* RI) {
diff --git a/include/llvm/Analysis/RegionPass.h b/include/llvm/Analysis/RegionPass.h
index bd51c49e87db..b5f38139abf2 100644
--- a/include/llvm/Analysis/RegionPass.h
+++ b/include/llvm/Analysis/RegionPass.h
@@ -101,9 +101,7 @@ public:
/// RGPassManager needs RegionInfo.
void getAnalysisUsage(AnalysisUsage &Info) const override;
- const char *getPassName() const override {
- return "Region Pass Manager";
- }
+ StringRef getPassName() const override { return "Region Pass Manager"; }
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 535b623d31ac..9dcffe1ac5fb 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -36,1769 +36,1723 @@
#include "llvm/Support/DataTypes.h"
namespace llvm {
- class APInt;
- class AssumptionCache;
- class Constant;
- class ConstantInt;
- class DominatorTree;
- class Type;
- class ScalarEvolution;
- class DataLayout;
- class TargetLibraryInfo;
- class LLVMContext;
- class Operator;
- class SCEV;
- class SCEVAddRecExpr;
- class SCEVConstant;
- class SCEVExpander;
- class SCEVPredicate;
- class SCEVUnknown;
- class Function;
-
- template <> struct FoldingSetTrait<SCEV>;
- template <> struct FoldingSetTrait<SCEVPredicate>;
-
- /// This class represents an analyzed expression in the program. These are
- /// opaque objects that the client is not allowed to do much with directly.
- ///
- class SCEV : public FoldingSetNode {
- friend struct FoldingSetTrait<SCEV>;
-
- /// A reference to an Interned FoldingSetNodeID for this node. The
- /// ScalarEvolution's BumpPtrAllocator holds the data.
- FoldingSetNodeIDRef FastID;
-
- // The SCEV baseclass this node corresponds to
- const unsigned short SCEVType;
-
- protected:
- /// This field is initialized to zero and may be used in subclasses to store
- /// miscellaneous information.
- unsigned short SubclassData;
-
- private:
- SCEV(const SCEV &) = delete;
- void operator=(const SCEV &) = delete;
+class APInt;
+class AssumptionCache;
+class Constant;
+class ConstantInt;
+class DominatorTree;
+class Type;
+class ScalarEvolution;
+class DataLayout;
+class TargetLibraryInfo;
+class LLVMContext;
+class Operator;
+class SCEV;
+class SCEVAddRecExpr;
+class SCEVConstant;
+class SCEVExpander;
+class SCEVPredicate;
+class SCEVUnknown;
+class Function;
+
+template <> struct FoldingSetTrait<SCEV>;
+template <> struct FoldingSetTrait<SCEVPredicate>;
+
+/// This class represents an analyzed expression in the program. These are
+/// opaque objects that the client is not allowed to do much with directly.
+///
+class SCEV : public FoldingSetNode {
+ friend struct FoldingSetTrait<SCEV>;
+
+ /// A reference to an Interned FoldingSetNodeID for this node. The
+ /// ScalarEvolution's BumpPtrAllocator holds the data.
+ FoldingSetNodeIDRef FastID;
+
+ // The SCEV baseclass this node corresponds to
+ const unsigned short SCEVType;
+
+protected:
+ /// This field is initialized to zero and may be used in subclasses to store
+ /// miscellaneous information.
+ unsigned short SubclassData;
+
+private:
+ SCEV(const SCEV &) = delete;
+ void operator=(const SCEV &) = delete;
+
+public:
+ /// NoWrapFlags are bitfield indices into SubclassData.
+ ///
+ /// Add and Mul expressions may have no-unsigned-wrap <NUW> or
+ /// no-signed-wrap <NSW> properties, which are derived from the IR
+ /// operator. NSW is a misnomer that we use to mean no signed overflow or
+ /// underflow.
+ ///
+ /// AddRec expressions may have a no-self-wraparound <NW> property if, in
+ /// the integer domain, abs(step) * max-iteration(loop) <=
+ /// unsigned-max(bitwidth). This means that the recurrence will never reach
+ /// its start value if the step is non-zero. Computing the same value on
+ /// each iteration is not considered wrapping, and recurrences with step = 0
+ /// are trivially <NW>. <NW> is independent of the sign of step and the
+ /// value the add recurrence starts with.
+ ///
+ /// Note that NUW and NSW are also valid properties of a recurrence, and
+ /// either implies NW. For convenience, NW will be set for a recurrence
+ /// whenever either NUW or NSW are set.
+ enum NoWrapFlags {
+ FlagAnyWrap = 0, // No guarantee.
+ FlagNW = (1 << 0), // No self-wrap.
+ FlagNUW = (1 << 1), // No unsigned wrap.
+ FlagNSW = (1 << 2), // No signed wrap.
+ NoWrapMask = (1 << 3) - 1
+ };
- public:
- /// NoWrapFlags are bitfield indices into SubclassData.
- ///
- /// Add and Mul expressions may have no-unsigned-wrap <NUW> or
- /// no-signed-wrap <NSW> properties, which are derived from the IR
- /// operator. NSW is a misnomer that we use to mean no signed overflow or
- /// underflow.
- ///
- /// AddRec expressions may have a no-self-wraparound <NW> property if, in
- /// the integer domain, abs(step) * max-iteration(loop) <=
- /// unsigned-max(bitwidth). This means that the recurrence will never reach
- /// its start value if the step is non-zero. Computing the same value on
- /// each iteration is not considered wrapping, and recurrences with step = 0
- /// are trivially <NW>. <NW> is independent of the sign of step and the
- /// value the add recurrence starts with.
- ///
- /// Note that NUW and NSW are also valid properties of a recurrence, and
- /// either implies NW. For convenience, NW will be set for a recurrence
- /// whenever either NUW or NSW are set.
- enum NoWrapFlags { FlagAnyWrap = 0, // No guarantee.
- FlagNW = (1 << 0), // No self-wrap.
- FlagNUW = (1 << 1), // No unsigned wrap.
- FlagNSW = (1 << 2), // No signed wrap.
- NoWrapMask = (1 << 3) -1 };
+ explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy)
+ : FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
- explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
- FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
+ unsigned getSCEVType() const { return SCEVType; }
- unsigned getSCEVType() const { return SCEVType; }
+ /// Return the LLVM type of this SCEV expression.
+ ///
+ Type *getType() const;
- /// Return the LLVM type of this SCEV expression.
- ///
- Type *getType() const;
+ /// Return true if the expression is a constant zero.
+ ///
+ bool isZero() const;
- /// Return true if the expression is a constant zero.
- ///
- bool isZero() const;
+ /// Return true if the expression is a constant one.
+ ///
+ bool isOne() const;
- /// Return true if the expression is a constant one.
- ///
- bool isOne() const;
+ /// Return true if the expression is a constant all-ones value.
+ ///
+ bool isAllOnesValue() const;
- /// Return true if the expression is a constant all-ones value.
- ///
- bool isAllOnesValue() const;
+ /// Return true if the specified scev is negated, but not a constant.
+ bool isNonConstantNegative() const;
- /// Return true if the specified scev is negated, but not a constant.
- bool isNonConstantNegative() const;
+ /// Print out the internal representation of this scalar to the specified
+ /// stream. This should really only be used for debugging purposes.
+ void print(raw_ostream &OS) const;
- /// Print out the internal representation of this scalar to the specified
- /// stream. This should really only be used for debugging purposes.
- void print(raw_ostream &OS) const;
+ /// This method is used for debugging.
+ ///
+ void dump() const;
+};
+
+// Specialize FoldingSetTrait for SCEV to avoid needing to compute
+// temporary FoldingSetNodeID values.
+template <> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
+ static void Profile(const SCEV &X, FoldingSetNodeID &ID) { ID = X.FastID; }
+ static bool Equals(const SCEV &X, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) {
+ return ID == X.FastID;
+ }
+ static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
+ return X.FastID.ComputeHash();
+ }
+};
- /// This method is used for debugging.
- ///
- void dump() const;
- };
+inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
+ S.print(OS);
+ return OS;
+}
- // Specialize FoldingSetTrait for SCEV to avoid needing to compute
- // temporary FoldingSetNodeID values.
- template<> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
- static void Profile(const SCEV &X, FoldingSetNodeID& ID) {
- ID = X.FastID;
- }
- static bool Equals(const SCEV &X, const FoldingSetNodeID &ID,
- unsigned IDHash, FoldingSetNodeID &TempID) {
- return ID == X.FastID;
- }
- static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
- return X.FastID.ComputeHash();
- }
- };
+/// An object of this class is returned by queries that could not be answered.
+/// For example, if you ask for the number of iterations of a linked-list
+/// traversal loop, you will get one of these. None of the standard SCEV
+/// operations are valid on this class, it is just a marker.
+struct SCEVCouldNotCompute : public SCEV {
+ SCEVCouldNotCompute();
- inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
- S.print(OS);
- return OS;
- }
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S);
+};
- /// An object of this class is returned by queries that could not be answered.
- /// For example, if you ask for the number of iterations of a linked-list
- /// traversal loop, you will get one of these. None of the standard SCEV
- /// operations are valid on this class, it is just a marker.
- struct SCEVCouldNotCompute : public SCEV {
- SCEVCouldNotCompute();
+/// This class represents an assumption made using SCEV expressions which can
+/// be checked at run-time.
+class SCEVPredicate : public FoldingSetNode {
+ friend struct FoldingSetTrait<SCEVPredicate>;
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S);
- };
+ /// A reference to an Interned FoldingSetNodeID for this node. The
+ /// ScalarEvolution's BumpPtrAllocator holds the data.
+ FoldingSetNodeIDRef FastID;
- /// This class represents an assumption made using SCEV expressions which can
- /// be checked at run-time.
- class SCEVPredicate : public FoldingSetNode {
- friend struct FoldingSetTrait<SCEVPredicate>;
+public:
+ enum SCEVPredicateKind { P_Union, P_Equal, P_Wrap };
- /// A reference to an Interned FoldingSetNodeID for this node. The
- /// ScalarEvolution's BumpPtrAllocator holds the data.
- FoldingSetNodeIDRef FastID;
+protected:
+ SCEVPredicateKind Kind;
+ ~SCEVPredicate() = default;
+ SCEVPredicate(const SCEVPredicate &) = default;
+ SCEVPredicate &operator=(const SCEVPredicate &) = default;
- public:
- enum SCEVPredicateKind { P_Union, P_Equal, P_Wrap };
+public:
+ SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
- protected:
- SCEVPredicateKind Kind;
- ~SCEVPredicate() = default;
- SCEVPredicate(const SCEVPredicate&) = default;
- SCEVPredicate &operator=(const SCEVPredicate&) = default;
+ SCEVPredicateKind getKind() const { return Kind; }
- public:
- SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
+ /// Returns the estimated complexity of this predicate. This is roughly
+ /// measured in the number of run-time checks required.
+ virtual unsigned getComplexity() const { return 1; }
- SCEVPredicateKind getKind() const { return Kind; }
+ /// Returns true if the predicate is always true. This means that no
+ /// assumptions were made and nothing needs to be checked at run-time.
+ virtual bool isAlwaysTrue() const = 0;
- /// Returns the estimated complexity of this predicate. This is roughly
- /// measured in the number of run-time checks required.
- virtual unsigned getComplexity() const { return 1; }
+ /// Returns true if this predicate implies \p N.
+ virtual bool implies(const SCEVPredicate *N) const = 0;
- /// Returns true if the predicate is always true. This means that no
- /// assumptions were made and nothing needs to be checked at run-time.
- virtual bool isAlwaysTrue() const = 0;
+ /// Prints a textual representation of this predicate with an indentation of
+ /// \p Depth.
+ virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
- /// Returns true if this predicate implies \p N.
- virtual bool implies(const SCEVPredicate *N) const = 0;
+ /// Returns the SCEV to which this predicate applies, or nullptr if this is
+ /// a SCEVUnionPredicate.
+ virtual const SCEV *getExpr() const = 0;
+};
- /// Prints a textual representation of this predicate with an indentation of
- /// \p Depth.
- virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
+inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
+ P.print(OS);
+ return OS;
+}
- /// Returns the SCEV to which this predicate applies, or nullptr if this is
- /// a SCEVUnionPredicate.
- virtual const SCEV *getExpr() const = 0;
- };
+// Specialize FoldingSetTrait for SCEVPredicate to avoid needing to compute
+// temporary FoldingSetNodeID values.
+template <>
+struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
- inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
- P.print(OS);
- return OS;
+ static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
+ ID = X.FastID;
}
- // Specialize FoldingSetTrait for SCEVPredicate to avoid needing to compute
- // temporary FoldingSetNodeID values.
- template <>
- struct FoldingSetTrait<SCEVPredicate>
- : DefaultFoldingSetTrait<SCEVPredicate> {
-
- static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
- ID = X.FastID;
- }
-
- static bool Equals(const SCEVPredicate &X, const FoldingSetNodeID &ID,
- unsigned IDHash, FoldingSetNodeID &TempID) {
- return ID == X.FastID;
- }
- static unsigned ComputeHash(const SCEVPredicate &X,
- FoldingSetNodeID &TempID) {
- return X.FastID.ComputeHash();
- }
+ static bool Equals(const SCEVPredicate &X, const FoldingSetNodeID &ID,
+ unsigned IDHash, FoldingSetNodeID &TempID) {
+ return ID == X.FastID;
+ }
+ static unsigned ComputeHash(const SCEVPredicate &X,
+ FoldingSetNodeID &TempID) {
+ return X.FastID.ComputeHash();
+ }
+};
+
+/// This class represents an assumption that two SCEV expressions are equal,
+/// and this can be checked at run-time. We assume that the left hand side is
+/// a SCEVUnknown and the right hand side a constant.
+class SCEVEqualPredicate final : public SCEVPredicate {
+ /// We assume that LHS == RHS, where LHS is a SCEVUnknown and RHS a
+ /// constant.
+ const SCEVUnknown *LHS;
+ const SCEVConstant *RHS;
+
+public:
+ SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEVUnknown *LHS,
+ const SCEVConstant *RHS);
+
+ /// Implementation of the SCEVPredicate interface
+ bool implies(const SCEVPredicate *N) const override;
+ void print(raw_ostream &OS, unsigned Depth = 0) const override;
+ bool isAlwaysTrue() const override;
+ const SCEV *getExpr() const override;
+
+ /// Returns the left hand side of the equality.
+ const SCEVUnknown *getLHS() const { return LHS; }
+
+ /// Returns the right hand side of the equality.
+ const SCEVConstant *getRHS() const { return RHS; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVPredicate *P) {
+ return P->getKind() == P_Equal;
+ }
+};
+
+/// This class represents an assumption made on an AddRec expression. Given an
+/// affine AddRec expression {a,+,b}, we assume that it has the nssw or nusw
+/// flags (defined below) in the first X iterations of the loop, where X is a
+/// SCEV expression returned by getPredicatedBackedgeTakenCount).
+///
+/// Note that this does not imply that X is equal to the backedge taken
+/// count. This means that if we have a nusw predicate for i32 {0,+,1} with a
+/// predicated backedge taken count of X, we only guarantee that {0,+,1} has
+/// nusw in the first X iterations. {0,+,1} may still wrap in the loop if we
+/// have more than X iterations.
+class SCEVWrapPredicate final : public SCEVPredicate {
+public:
+ /// Similar to SCEV::NoWrapFlags, but with slightly different semantics
+ /// for FlagNUSW. The increment is considered to be signed, and a + b
+ /// (where b is the increment) is considered to wrap if:
+ /// zext(a + b) != zext(a) + sext(b)
+ ///
+ /// If Signed is a function that takes an n-bit tuple and maps to the
+ /// integer domain as the tuples value interpreted as twos complement,
+ /// and Unsigned a function that takes an n-bit tuple and maps to the
+ /// integer domain as as the base two value of input tuple, then a + b
+ /// has IncrementNUSW iff:
+ ///
+ /// 0 <= Unsigned(a) + Signed(b) < 2^n
+ ///
+ /// The IncrementNSSW flag has identical semantics with SCEV::FlagNSW.
+ ///
+ /// Note that the IncrementNUSW flag is not commutative: if base + inc
+ /// has IncrementNUSW, then inc + base doesn't neccessarily have this
+ /// property. The reason for this is that this is used for sign/zero
+ /// extending affine AddRec SCEV expressions when a SCEVWrapPredicate is
+ /// assumed. A {base,+,inc} expression is already non-commutative with
+ /// regards to base and inc, since it is interpreted as:
+ /// (((base + inc) + inc) + inc) ...
+ enum IncrementWrapFlags {
+ IncrementAnyWrap = 0, // No guarantee.
+ IncrementNUSW = (1 << 0), // No unsigned with signed increment wrap.
+ IncrementNSSW = (1 << 1), // No signed with signed increment wrap
+ // (equivalent with SCEV::NSW)
+ IncrementNoWrapMask = (1 << 2) - 1
};
- /// This class represents an assumption that two SCEV expressions are equal,
- /// and this can be checked at run-time. We assume that the left hand side is
- /// a SCEVUnknown and the right hand side a constant.
- class SCEVEqualPredicate final : public SCEVPredicate {
- /// We assume that LHS == RHS, where LHS is a SCEVUnknown and RHS a
- /// constant.
- const SCEVUnknown *LHS;
- const SCEVConstant *RHS;
-
- public:
- SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEVUnknown *LHS,
- const SCEVConstant *RHS);
-
- /// Implementation of the SCEVPredicate interface
- bool implies(const SCEVPredicate *N) const override;
- void print(raw_ostream &OS, unsigned Depth = 0) const override;
- bool isAlwaysTrue() const override;
- const SCEV *getExpr() const override;
+ /// Convenient IncrementWrapFlags manipulation methods.
+ LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+ clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
+ SCEVWrapPredicate::IncrementWrapFlags OffFlags) {
+ assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
+ assert((OffFlags & IncrementNoWrapMask) == OffFlags &&
+ "Invalid flags value!");
+ return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & ~OffFlags);
+ }
- /// Returns the left hand side of the equality.
- const SCEVUnknown *getLHS() const { return LHS; }
+ LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+ maskFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, int Mask) {
+ assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
+ assert((Mask & IncrementNoWrapMask) == Mask && "Invalid mask value!");
- /// Returns the right hand side of the equality.
- const SCEVConstant *getRHS() const { return RHS; }
+ return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & Mask);
+ }
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVPredicate *P) {
- return P->getKind() == P_Equal;
- }
- };
+ LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+ setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
+ SCEVWrapPredicate::IncrementWrapFlags OnFlags) {
+ assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
+ assert((OnFlags & IncrementNoWrapMask) == OnFlags &&
+ "Invalid flags value!");
- /// This class represents an assumption made on an AddRec expression. Given an
- /// affine AddRec expression {a,+,b}, we assume that it has the nssw or nusw
- /// flags (defined below) in the first X iterations of the loop, where X is a
- /// SCEV expression returned by getPredicatedBackedgeTakenCount).
- ///
- /// Note that this does not imply that X is equal to the backedge taken
- /// count. This means that if we have a nusw predicate for i32 {0,+,1} with a
- /// predicated backedge taken count of X, we only guarantee that {0,+,1} has
- /// nusw in the first X iterations. {0,+,1} may still wrap in the loop if we
- /// have more than X iterations.
- class SCEVWrapPredicate final : public SCEVPredicate {
- public:
- /// Similar to SCEV::NoWrapFlags, but with slightly different semantics
- /// for FlagNUSW. The increment is considered to be signed, and a + b
- /// (where b is the increment) is considered to wrap if:
- /// zext(a + b) != zext(a) + sext(b)
- ///
- /// If Signed is a function that takes an n-bit tuple and maps to the
- /// integer domain as the tuples value interpreted as twos complement,
- /// and Unsigned a function that takes an n-bit tuple and maps to the
- /// integer domain as as the base two value of input tuple, then a + b
- /// has IncrementNUSW iff:
- ///
- /// 0 <= Unsigned(a) + Signed(b) < 2^n
- ///
- /// The IncrementNSSW flag has identical semantics with SCEV::FlagNSW.
- ///
- /// Note that the IncrementNUSW flag is not commutative: if base + inc
- /// has IncrementNUSW, then inc + base doesn't neccessarily have this
- /// property. The reason for this is that this is used for sign/zero
- /// extending affine AddRec SCEV expressions when a SCEVWrapPredicate is
- /// assumed. A {base,+,inc} expression is already non-commutative with
- /// regards to base and inc, since it is interpreted as:
- /// (((base + inc) + inc) + inc) ...
- enum IncrementWrapFlags {
- IncrementAnyWrap = 0, // No guarantee.
- IncrementNUSW = (1 << 0), // No unsigned with signed increment wrap.
- IncrementNSSW = (1 << 1), // No signed with signed increment wrap
- // (equivalent with SCEV::NSW)
- IncrementNoWrapMask = (1 << 2) - 1
- };
-
- /// Convenient IncrementWrapFlags manipulation methods.
- static SCEVWrapPredicate::IncrementWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
- clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
- SCEVWrapPredicate::IncrementWrapFlags OffFlags) {
- assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
- assert((OffFlags & IncrementNoWrapMask) == OffFlags &&
- "Invalid flags value!");
- return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & ~OffFlags);
- }
+ return (SCEVWrapPredicate::IncrementWrapFlags)(Flags | OnFlags);
+ }
- static SCEVWrapPredicate::IncrementWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
- maskFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, int Mask) {
- assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
- assert((Mask & IncrementNoWrapMask) == Mask && "Invalid mask value!");
+ /// Returns the set of SCEVWrapPredicate no wrap flags implied by a
+ /// SCEVAddRecExpr.
+ LLVM_NODISCARD static SCEVWrapPredicate::IncrementWrapFlags
+ getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE);
+
+private:
+ const SCEVAddRecExpr *AR;
+ IncrementWrapFlags Flags;
+
+public:
+ explicit SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
+ const SCEVAddRecExpr *AR,
+ IncrementWrapFlags Flags);
+
+ /// Returns the set assumed no overflow flags.
+ IncrementWrapFlags getFlags() const { return Flags; }
+ /// Implementation of the SCEVPredicate interface
+ const SCEV *getExpr() const override;
+ bool implies(const SCEVPredicate *N) const override;
+ void print(raw_ostream &OS, unsigned Depth = 0) const override;
+ bool isAlwaysTrue() const override;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVPredicate *P) {
+ return P->getKind() == P_Wrap;
+ }
+};
+
+/// This class represents a composition of other SCEV predicates, and is the
+/// class that most clients will interact with. This is equivalent to a
+/// logical "AND" of all the predicates in the union.
+///
+/// NB! Unlike other SCEVPredicate sub-classes this class does not live in the
+/// ScalarEvolution::Preds folding set. This is why the \c add function is sound.
+class SCEVUnionPredicate final : public SCEVPredicate {
+private:
+ typedef DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>
+ PredicateMap;
+
+ /// Vector with references to all predicates in this union.
+ SmallVector<const SCEVPredicate *, 16> Preds;
+ /// Maps SCEVs to predicates for quick look-ups.
+ PredicateMap SCEVToPreds;
+
+public:
+ SCEVUnionPredicate();
+
+ const SmallVectorImpl<const SCEVPredicate *> &getPredicates() const {
+ return Preds;
+ }
- return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & Mask);
- }
+ /// Adds a predicate to this union.
+ void add(const SCEVPredicate *N);
- static SCEVWrapPredicate::IncrementWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
- setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
- SCEVWrapPredicate::IncrementWrapFlags OnFlags) {
- assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
- assert((OnFlags & IncrementNoWrapMask) == OnFlags &&
- "Invalid flags value!");
+ /// Returns a reference to a vector containing all predicates which apply to
+ /// \p Expr.
+ ArrayRef<const SCEVPredicate *> getPredicatesForExpr(const SCEV *Expr);
- return (SCEVWrapPredicate::IncrementWrapFlags)(Flags | OnFlags);
- }
+ /// Implementation of the SCEVPredicate interface
+ bool isAlwaysTrue() const override;
+ bool implies(const SCEVPredicate *N) const override;
+ void print(raw_ostream &OS, unsigned Depth) const override;
+ const SCEV *getExpr() const override;
- /// Returns the set of SCEVWrapPredicate no wrap flags implied by a
- /// SCEVAddRecExpr.
- static SCEVWrapPredicate::IncrementWrapFlags
- getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE);
+ /// We estimate the complexity of a union predicate as the size number of
+ /// predicates in the union.
+ unsigned getComplexity() const override { return Preds.size(); }
- private:
- const SCEVAddRecExpr *AR;
- IncrementWrapFlags Flags;
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVPredicate *P) {
+ return P->getKind() == P_Union;
+ }
+};
+
+/// The main scalar evolution driver. Because client code (intentionally)
+/// can't do much with the SCEV objects directly, they must ask this class
+/// for services.
+class ScalarEvolution {
+public:
+ /// An enum describing the relationship between a SCEV and a loop.
+ enum LoopDisposition {
+ LoopVariant, ///< The SCEV is loop-variant (unknown).
+ LoopInvariant, ///< The SCEV is loop-invariant.
+ LoopComputable ///< The SCEV varies predictably with the loop.
+ };
- public:
- explicit SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
- const SCEVAddRecExpr *AR,
- IncrementWrapFlags Flags);
-
- /// Returns the set assumed no overflow flags.
- IncrementWrapFlags getFlags() const { return Flags; }
- /// Implementation of the SCEVPredicate interface
- const SCEV *getExpr() const override;
- bool implies(const SCEVPredicate *N) const override;
- void print(raw_ostream &OS, unsigned Depth = 0) const override;
- bool isAlwaysTrue() const override;
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVPredicate *P) {
- return P->getKind() == P_Wrap;
- }
+ /// An enum describing the relationship between a SCEV and a basic block.
+ enum BlockDisposition {
+ DoesNotDominateBlock, ///< The SCEV does not dominate the block.
+ DominatesBlock, ///< The SCEV dominates the block.
+ ProperlyDominatesBlock ///< The SCEV properly dominates the block.
};
- /// This class represents a composition of other SCEV predicates, and is the
- /// class that most clients will interact with. This is equivalent to a
- /// logical "AND" of all the predicates in the union.
- class SCEVUnionPredicate final : public SCEVPredicate {
- private:
- typedef DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>
- PredicateMap;
+ /// Convenient NoWrapFlags manipulation that hides enum casts and is
+ /// visible in the ScalarEvolution name space.
+ LLVM_NODISCARD static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags,
+ int Mask) {
+ return (SCEV::NoWrapFlags)(Flags & Mask);
+ }
+ LLVM_NODISCARD static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags,
+ SCEV::NoWrapFlags OnFlags) {
+ return (SCEV::NoWrapFlags)(Flags | OnFlags);
+ }
+ LLVM_NODISCARD static SCEV::NoWrapFlags
+ clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
+ return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
+ }
- /// Vector with references to all predicates in this union.
- SmallVector<const SCEVPredicate *, 16> Preds;
- /// Maps SCEVs to predicates for quick look-ups.
- PredicateMap SCEVToPreds;
+private:
+ /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
+ /// Value is deleted.
+ class SCEVCallbackVH final : public CallbackVH {
+ ScalarEvolution *SE;
+ void deleted() override;
+ void allUsesReplacedWith(Value *New) override;
public:
- SCEVUnionPredicate();
-
- const SmallVectorImpl<const SCEVPredicate *> &getPredicates() const {
- return Preds;
- }
-
- /// Adds a predicate to this union.
- void add(const SCEVPredicate *N);
-
- /// Returns a reference to a vector containing all predicates which apply to
- /// \p Expr.
- ArrayRef<const SCEVPredicate *> getPredicatesForExpr(const SCEV *Expr);
-
- /// Implementation of the SCEVPredicate interface
- bool isAlwaysTrue() const override;
- bool implies(const SCEVPredicate *N) const override;
- void print(raw_ostream &OS, unsigned Depth) const override;
- const SCEV *getExpr() const override;
-
- /// We estimate the complexity of a union predicate as the size number of
- /// predicates in the union.
- unsigned getComplexity() const override { return Preds.size(); }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SCEVPredicate *P) {
- return P->getKind() == P_Union;
- }
+ SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
};
- /// The main scalar evolution driver. Because client code (intentionally)
- /// can't do much with the SCEV objects directly, they must ask this class
- /// for services.
- class ScalarEvolution {
- public:
- /// An enum describing the relationship between a SCEV and a loop.
- enum LoopDisposition {
- LoopVariant, ///< The SCEV is loop-variant (unknown).
- LoopInvariant, ///< The SCEV is loop-invariant.
- LoopComputable ///< The SCEV varies predictably with the loop.
- };
-
- /// An enum describing the relationship between a SCEV and a basic block.
- enum BlockDisposition {
- DoesNotDominateBlock, ///< The SCEV does not dominate the block.
- DominatesBlock, ///< The SCEV dominates the block.
- ProperlyDominatesBlock ///< The SCEV properly dominates the block.
- };
-
- /// Convenient NoWrapFlags manipulation that hides enum casts and is
- /// visible in the ScalarEvolution name space.
- static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
- maskFlags(SCEV::NoWrapFlags Flags, int Mask) {
- return (SCEV::NoWrapFlags)(Flags & Mask);
- }
- static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
- setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags) {
- return (SCEV::NoWrapFlags)(Flags | OnFlags);
- }
- static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
- clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
- return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
- }
+ friend class SCEVCallbackVH;
+ friend class SCEVExpander;
+ friend class SCEVUnknown;
- private:
- /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
- /// Value is deleted.
- class SCEVCallbackVH final : public CallbackVH {
- ScalarEvolution *SE;
- void deleted() override;
- void allUsesReplacedWith(Value *New) override;
- public:
- SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
- };
-
- friend class SCEVCallbackVH;
- friend class SCEVExpander;
- friend class SCEVUnknown;
-
- /// The function we are analyzing.
- ///
- Function &F;
+ /// The function we are analyzing.
+ ///
+ Function &F;
- /// Does the module have any calls to the llvm.experimental.guard intrinsic
- /// at all? If this is false, we avoid doing work that will only help if
- /// thare are guards present in the IR.
- ///
- bool HasGuards;
+ /// Does the module have any calls to the llvm.experimental.guard intrinsic
+ /// at all? If this is false, we avoid doing work that will only help if
+ /// thare are guards present in the IR.
+ ///
+ bool HasGuards;
- /// The target library information for the target we are targeting.
- ///
- TargetLibraryInfo &TLI;
+ /// The target library information for the target we are targeting.
+ ///
+ TargetLibraryInfo &TLI;
- /// The tracker for @llvm.assume intrinsics in this function.
- AssumptionCache &AC;
+ /// The tracker for @llvm.assume intrinsics in this function.
+ AssumptionCache &AC;
- /// The dominator tree.
- ///
- DominatorTree &DT;
+ /// The dominator tree.
+ ///
+ DominatorTree &DT;
- /// The loop information for the function we are currently analyzing.
- ///
- LoopInfo &LI;
+ /// The loop information for the function we are currently analyzing.
+ ///
+ LoopInfo &LI;
- /// This SCEV is used to represent unknown trip counts and things.
- std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
+ /// This SCEV is used to represent unknown trip counts and things.
+ std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
- /// The typedef for HasRecMap.
- ///
- typedef DenseMap<const SCEV *, bool> HasRecMapType;
+ /// The typedef for HasRecMap.
+ ///
+ typedef DenseMap<const SCEV *, bool> HasRecMapType;
- /// This is a cache to record whether a SCEV contains any scAddRecExpr.
- HasRecMapType HasRecMap;
+ /// This is a cache to record whether a SCEV contains any scAddRecExpr.
+ HasRecMapType HasRecMap;
- /// The typedef for ExprValueMap.
- ///
- typedef DenseMap<const SCEV *, SetVector<Value *>> ExprValueMapType;
+ /// The typedef for ExprValueMap.
+ ///
+ typedef std::pair<Value *, ConstantInt *> ValueOffsetPair;
+ typedef DenseMap<const SCEV *, SetVector<ValueOffsetPair>> ExprValueMapType;
- /// ExprValueMap -- This map records the original values from which
- /// the SCEV expr is generated from.
- ExprValueMapType ExprValueMap;
+ /// ExprValueMap -- This map records the original values from which
+ /// the SCEV expr is generated from.
+ ///
+ /// We want to represent the mapping as SCEV -> ValueOffsetPair instead
+ /// of SCEV -> Value:
+ /// Suppose we know S1 expands to V1, and
+ /// S1 = S2 + C_a
+ /// S3 = S2 + C_b
+ /// where C_a and C_b are different SCEVConstants. Then we'd like to
+ /// expand S3 as V1 - C_a + C_b instead of expanding S2 literally.
+ /// It is helpful when S2 is a complex SCEV expr.
+ ///
+ /// In order to do that, we represent ExprValueMap as a mapping from
+ /// SCEV to ValueOffsetPair. We will save both S1->{V1, 0} and
+ /// S2->{V1, C_a} into the map when we create SCEV for V1. When S3
+ /// is expanded, it will first expand S2 to V1 - C_a because of
+ /// S2->{V1, C_a} in the map, then expand S3 to V1 - C_a + C_b.
+ ///
+ /// Note: S->{V, Offset} in the ExprValueMap means S can be expanded
+ /// to V - Offset.
+ ExprValueMapType ExprValueMap;
- /// The typedef for ValueExprMap.
- ///
- typedef DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *> >
+ /// The typedef for ValueExprMap.
+ ///
+ typedef DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>
ValueExprMapType;
- /// This is a cache of the values we have analyzed so far.
- ///
- ValueExprMapType ValueExprMap;
-
- /// Mark predicate values currently being processed by isImpliedCond.
- DenseSet<Value*> PendingLoopPredicates;
-
- /// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
- /// conditions dominating the backedge of a loop.
- bool WalkingBEDominatingConds;
-
- /// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
- /// predicate by splitting it into a set of independent predicates.
- bool ProvingSplitPredicate;
-
- /// Information about the number of loop iterations for which a loop exit's
- /// branch condition evaluates to the not-taken path. This is a temporary
- /// pair of exact and max expressions that are eventually summarized in
- /// ExitNotTakenInfo and BackedgeTakenInfo.
- struct ExitLimit {
- const SCEV *Exact;
- const SCEV *Max;
-
- /// A predicate union guard for this ExitLimit. The result is only
- /// valid if this predicate evaluates to 'true' at run-time.
- SCEVUnionPredicate Pred;
-
- /*implicit*/ ExitLimit(const SCEV *E) : Exact(E), Max(E) {}
-
- ExitLimit(const SCEV *E, const SCEV *M, SCEVUnionPredicate &P)
- : Exact(E), Max(M), Pred(P) {
- assert((isa<SCEVCouldNotCompute>(Exact) ||
- !isa<SCEVCouldNotCompute>(Max)) &&
- "Exact is not allowed to be less precise than Max");
- }
-
- /// Test whether this ExitLimit contains any computed information, or
- /// whether it's all SCEVCouldNotCompute values.
- bool hasAnyInfo() const {
- return !isa<SCEVCouldNotCompute>(Exact) ||
- !isa<SCEVCouldNotCompute>(Max);
- }
-
- /// Test whether this ExitLimit contains all information.
- bool hasFullInfo() const { return !isa<SCEVCouldNotCompute>(Exact); }
- };
-
- /// Forward declaration of ExitNotTakenExtras
- struct ExitNotTakenExtras;
-
- /// Information about the number of times a particular loop exit may be
- /// reached before exiting the loop.
- struct ExitNotTakenInfo {
- AssertingVH<BasicBlock> ExitingBlock;
- const SCEV *ExactNotTaken;
-
- ExitNotTakenExtras *ExtraInfo;
- bool Complete;
-
- ExitNotTakenInfo()
- : ExitingBlock(nullptr), ExactNotTaken(nullptr), ExtraInfo(nullptr),
- Complete(true) {}
-
- ExitNotTakenInfo(BasicBlock *ExitBlock, const SCEV *Expr,
- ExitNotTakenExtras *Ptr)
- : ExitingBlock(ExitBlock), ExactNotTaken(Expr), ExtraInfo(Ptr),
- Complete(true) {}
-
- /// Return true if all loop exits are computable.
- bool isCompleteList() const { return Complete; }
-
- /// Sets the incomplete property, indicating that one of the loop exits
- /// doesn't have a corresponding ExitNotTakenInfo entry.
- void setIncomplete() { Complete = false; }
-
- /// Returns a pointer to the predicate associated with this information,
- /// or nullptr if this doesn't exist (meaning always true).
- SCEVUnionPredicate *getPred() const {
- if (ExtraInfo)
- return &ExtraInfo->Pred;
-
- return nullptr;
- }
-
- /// Return true if the SCEV predicate associated with this information
- /// is always true.
- bool hasAlwaysTruePred() const {
- return !getPred() || getPred()->isAlwaysTrue();
- }
-
- /// Defines a simple forward iterator for ExitNotTakenInfo.
- class ExitNotTakenInfoIterator
- : public std::iterator<std::forward_iterator_tag, ExitNotTakenInfo> {
- const ExitNotTakenInfo *Start;
- unsigned Position;
-
- public:
- ExitNotTakenInfoIterator(const ExitNotTakenInfo *Start,
- unsigned Position)
- : Start(Start), Position(Position) {}
-
- const ExitNotTakenInfo &operator*() const {
- if (Position == 0)
- return *Start;
-
- return Start->ExtraInfo->Exits[Position - 1];
- }
-
- const ExitNotTakenInfo *operator->() const {
- if (Position == 0)
- return Start;
-
- return &Start->ExtraInfo->Exits[Position - 1];
- }
-
- bool operator==(const ExitNotTakenInfoIterator &RHS) const {
- return Start == RHS.Start && Position == RHS.Position;
- }
-
- bool operator!=(const ExitNotTakenInfoIterator &RHS) const {
- return Start != RHS.Start || Position != RHS.Position;
- }
-
- ExitNotTakenInfoIterator &operator++() { // Preincrement
- if (!Start)
- return *this;
-
- unsigned Elements =
- Start->ExtraInfo ? Start->ExtraInfo->Exits.size() + 1 : 1;
-
- ++Position;
-
- // We've run out of elements.
- if (Position == Elements) {
- Start = nullptr;
- Position = 0;
- }
-
- return *this;
- }
- ExitNotTakenInfoIterator operator++(int) { // Postincrement
- ExitNotTakenInfoIterator Tmp = *this;
- ++*this;
- return Tmp;
- }
- };
-
- /// Iterators
- ExitNotTakenInfoIterator begin() const {
- return ExitNotTakenInfoIterator(this, 0);
- }
- ExitNotTakenInfoIterator end() const {
- return ExitNotTakenInfoIterator(nullptr, 0);
- }
- };
-
- /// Describes the extra information that a ExitNotTakenInfo can have.
- struct ExitNotTakenExtras {
- /// The predicate associated with the ExitNotTakenInfo struct.
- SCEVUnionPredicate Pred;
-
- /// The extra exits in the loop. Only the ExitNotTakenExtras structure
- /// pointed to by the first ExitNotTakenInfo struct (associated with the
- /// first loop exit) will populate this vector to prevent having
- /// redundant information.
- SmallVector<ExitNotTakenInfo, 4> Exits;
- };
-
- /// A struct containing the information attached to a backedge.
- struct EdgeInfo {
- EdgeInfo(BasicBlock *Block, const SCEV *Taken, SCEVUnionPredicate &P) :
- ExitBlock(Block), Taken(Taken), Pred(std::move(P)) {}
-
- /// The exit basic block.
- BasicBlock *ExitBlock;
-
- /// The (exact) number of time we take the edge back.
- const SCEV *Taken;
-
- /// The SCEV predicated associated with Taken. If Pred doesn't evaluate
- /// to true, the information in Taken is not valid (or equivalent with
- /// a CouldNotCompute.
- SCEVUnionPredicate Pred;
- };
-
- /// Information about the backedge-taken count of a loop. This currently
- /// includes an exact count and a maximum count.
- ///
- class BackedgeTakenInfo {
- /// A list of computable exits and their not-taken counts. Loops almost
- /// never have more than one computable exit.
- ExitNotTakenInfo ExitNotTaken;
-
- /// An expression indicating the least maximum backedge-taken count of the
- /// loop that is known, or a SCEVCouldNotCompute. This expression is only
- /// valid if the predicates associated with all loop exits are true.
- const SCEV *Max;
-
- public:
- BackedgeTakenInfo() : Max(nullptr) {}
-
- /// Initialize BackedgeTakenInfo from a list of exact exit counts.
- BackedgeTakenInfo(SmallVectorImpl<EdgeInfo> &ExitCounts, bool Complete,
- const SCEV *MaxCount);
-
- /// Test whether this BackedgeTakenInfo contains any computed information,
- /// or whether it's all SCEVCouldNotCompute values.
- bool hasAnyInfo() const {
- return ExitNotTaken.ExitingBlock || !isa<SCEVCouldNotCompute>(Max);
- }
-
- /// Test whether this BackedgeTakenInfo contains complete information.
- bool hasFullInfo() const { return ExitNotTaken.isCompleteList(); }
-
- /// Return an expression indicating the exact backedge-taken count of the
- /// loop if it is known or SCEVCouldNotCompute otherwise. This is the
- /// number of times the loop header can be guaranteed to execute, minus
- /// one.
- ///
- /// If the SCEV predicate associated with the answer can be different
- /// from AlwaysTrue, we must add a (non null) Predicates argument.
- /// The SCEV predicate associated with the answer will be added to
- /// Predicates. A run-time check needs to be emitted for the SCEV
- /// predicate in order for the answer to be valid.
- ///
- /// Note that we should always know if we need to pass a predicate
- /// argument or not from the way the ExitCounts vector was computed.
- /// If we allowed SCEV predicates to be generated when populating this
- /// vector, this information can contain them and therefore a
- /// SCEVPredicate argument should be added to getExact.
- const SCEV *getExact(ScalarEvolution *SE,
- SCEVUnionPredicate *Predicates = nullptr) const;
-
- /// Return the number of times this loop exit may fall through to the back
- /// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
- /// this block before this number of iterations, but may exit via another
- /// block.
- const SCEV *getExact(BasicBlock *ExitingBlock, ScalarEvolution *SE) const;
-
- /// Get the max backedge taken count for the loop.
- const SCEV *getMax(ScalarEvolution *SE) const;
-
- /// Return true if any backedge taken count expressions refer to the given
- /// subexpression.
- bool hasOperand(const SCEV *S, ScalarEvolution *SE) const;
-
- /// Invalidate this result and free associated memory.
- void clear();
- };
-
- /// Cache the backedge-taken count of the loops for this function as they
- /// are computed.
- DenseMap<const Loop *, BackedgeTakenInfo> BackedgeTakenCounts;
-
- /// Cache the predicated backedge-taken count of the loops for this
- /// function as they are computed.
- DenseMap<const Loop *, BackedgeTakenInfo> PredicatedBackedgeTakenCounts;
-
- /// This map contains entries for all of the PHI instructions that we
- /// attempt to compute constant evolutions for. This allows us to avoid
- /// potentially expensive recomputation of these properties. An instruction
- /// maps to null if we are unable to compute its exit value.
- DenseMap<PHINode*, Constant*> ConstantEvolutionLoopExitValue;
-
- /// This map contains entries for all the expressions that we attempt to
- /// compute getSCEVAtScope information for, which can be expensive in
- /// extreme cases.
- DenseMap<const SCEV *,
- SmallVector<std::pair<const Loop *, const SCEV *>, 2> > ValuesAtScopes;
-
- /// Memoized computeLoopDisposition results.
- DenseMap<const SCEV *,
- SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
- LoopDispositions;
-
- /// Cache for \c loopHasNoAbnormalExits.
- DenseMap<const Loop *, bool> LoopHasNoAbnormalExits;
-
- /// Returns true if \p L contains no instruction that can abnormally exit
- /// the loop (i.e. via throwing an exception, by terminating the thread
- /// cleanly or by infinite looping in a called function). Strictly
- /// speaking, the last one is not leaving the loop, but is identical to
- /// leaving the loop for reasoning about undefined behavior.
- bool loopHasNoAbnormalExits(const Loop *L);
-
- /// Compute a LoopDisposition value.
- LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
-
- /// Memoized computeBlockDisposition results.
- DenseMap<
- const SCEV *,
- SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
- BlockDispositions;
-
- /// Compute a BlockDisposition value.
- BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
-
- /// Memoized results from getRange
- DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
-
- /// Memoized results from getRange
- DenseMap<const SCEV *, ConstantRange> SignedRanges;
-
- /// Used to parameterize getRange
- enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
-
- /// Set the memoized range for the given SCEV.
- const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
- const ConstantRange &CR) {
- DenseMap<const SCEV *, ConstantRange> &Cache =
- Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
-
- auto Pair = Cache.insert({S, CR});
- if (!Pair.second)
- Pair.first->second = CR;
- return Pair.first->second;
+ /// This is a cache of the values we have analyzed so far.
+ ///
+ ValueExprMapType ValueExprMap;
+
+ /// Mark predicate values currently being processed by isImpliedCond.
+ SmallPtrSet<Value *, 6> PendingLoopPredicates;
+
+ /// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
+ /// conditions dominating the backedge of a loop.
+ bool WalkingBEDominatingConds;
+
+ /// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
+ /// predicate by splitting it into a set of independent predicates.
+ bool ProvingSplitPredicate;
+
+ /// Information about the number of loop iterations for which a loop exit's
+ /// branch condition evaluates to the not-taken path. This is a temporary
+ /// pair of exact and max expressions that are eventually summarized in
+ /// ExitNotTakenInfo and BackedgeTakenInfo.
+ struct ExitLimit {
+ const SCEV *ExactNotTaken; // The exit is not taken exactly this many times
+ const SCEV *MaxNotTaken; // The exit is not taken at most this many times
+ bool MaxOrZero; // Not taken either exactly MaxNotTaken or zero times
+
+ /// A set of predicate guards for this ExitLimit. The result is only valid
+ /// if all of the predicates in \c Predicates evaluate to 'true' at
+ /// run-time.
+ SmallPtrSet<const SCEVPredicate *, 4> Predicates;
+
+ void addPredicate(const SCEVPredicate *P) {
+ assert(!isa<SCEVUnionPredicate>(P) && "Only add leaf predicates here!");
+ Predicates.insert(P);
}
- /// Determine the range for a particular SCEV.
- ConstantRange getRange(const SCEV *S, RangeSignHint Hint);
-
- /// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Stop}.
- /// Helper for \c getRange.
- ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Stop,
- const SCEV *MaxBECount,
- unsigned BitWidth);
-
- /// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
- /// Stop} by "factoring out" a ternary expression from the add recurrence.
- /// Helper called by \c getRange.
- ConstantRange getRangeViaFactoring(const SCEV *Start, const SCEV *Stop,
- const SCEV *MaxBECount,
- unsigned BitWidth);
+ /*implicit*/ ExitLimit(const SCEV *E)
+ : ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) {}
+
+ ExitLimit(
+ const SCEV *E, const SCEV *M, bool MaxOrZero,
+ ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
+ : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
+ assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
+ !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
+ "Exact is not allowed to be less precise than Max");
+ for (auto *PredSet : PredSetList)
+ for (auto *P : *PredSet)
+ addPredicate(P);
+ }
- /// We know that there is no SCEV for the specified value. Analyze the
- /// expression.
- const SCEV *createSCEV(Value *V);
+ ExitLimit(const SCEV *E, const SCEV *M, bool MaxOrZero,
+ const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
+ : ExitLimit(E, M, MaxOrZero, {&PredSet}) {}
- /// Provide the special handling we need to analyze PHI SCEVs.
- const SCEV *createNodeForPHI(PHINode *PN);
+ ExitLimit(const SCEV *E, const SCEV *M, bool MaxOrZero)
+ : ExitLimit(E, M, MaxOrZero, None) {}
- /// Helper function called from createNodeForPHI.
- const SCEV *createAddRecFromPHI(PHINode *PN);
+ /// Test whether this ExitLimit contains any computed information, or
+ /// whether it's all SCEVCouldNotCompute values.
+ bool hasAnyInfo() const {
+ return !isa<SCEVCouldNotCompute>(ExactNotTaken) ||
+ !isa<SCEVCouldNotCompute>(MaxNotTaken);
+ }
- /// Helper function called from createNodeForPHI.
- const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
+ /// Test whether this ExitLimit contains all information.
+ bool hasFullInfo() const {
+ return !isa<SCEVCouldNotCompute>(ExactNotTaken);
+ }
+ };
- /// Provide special handling for a select-like instruction (currently this
- /// is either a select instruction or a phi node). \p I is the instruction
- /// being processed, and it is assumed equivalent to "Cond ? TrueVal :
- /// FalseVal".
- const SCEV *createNodeForSelectOrPHI(Instruction *I, Value *Cond,
- Value *TrueVal, Value *FalseVal);
+ /// Information about the number of times a particular loop exit may be
+ /// reached before exiting the loop.
+ struct ExitNotTakenInfo {
+ AssertingVH<BasicBlock> ExitingBlock;
+ const SCEV *ExactNotTaken;
+ std::unique_ptr<SCEVUnionPredicate> Predicate;
+ bool hasAlwaysTruePredicate() const {
+ return !Predicate || Predicate->isAlwaysTrue();
+ }
- /// Provide the special handling we need to analyze GEP SCEVs.
- const SCEV *createNodeForGEP(GEPOperator *GEP);
+ explicit ExitNotTakenInfo(AssertingVH<BasicBlock> ExitingBlock,
+ const SCEV *ExactNotTaken,
+ std::unique_ptr<SCEVUnionPredicate> Predicate)
+ : ExitingBlock(ExitingBlock), ExactNotTaken(ExactNotTaken),
+ Predicate(std::move(Predicate)) {}
+ };
- /// Implementation code for getSCEVAtScope; called at most once for each
- /// SCEV+Loop pair.
- ///
- const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
-
- /// This looks up computed SCEV values for all instructions that depend on
- /// the given instruction and removes them from the ValueExprMap map if they
- /// reference SymName. This is used during PHI resolution.
- void forgetSymbolicName(Instruction *I, const SCEV *SymName);
-
- /// Return the BackedgeTakenInfo for the given loop, lazily computing new
- /// values if the loop hasn't been analyzed yet. The returned result is
- /// guaranteed not to be predicated.
- const BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
-
- /// Similar to getBackedgeTakenInfo, but will add predicates as required
- /// with the purpose of returning complete information.
- const BackedgeTakenInfo &getPredicatedBackedgeTakenInfo(const Loop *L);
-
- /// Compute the number of times the specified loop will iterate.
- /// If AllowPredicates is set, we will create new SCEV predicates as
- /// necessary in order to return an exact answer.
- BackedgeTakenInfo computeBackedgeTakenCount(const Loop *L,
- bool AllowPredicates = false);
-
- /// Compute the number of times the backedge of the specified loop will
- /// execute if it exits via the specified block. If AllowPredicates is set,
- /// this call will try to use a minimal set of SCEV predicates in order to
- /// return an exact answer.
- ExitLimit computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
- bool AllowPredicates = false);
-
- /// Compute the number of times the backedge of the specified loop will
- /// execute if its exit condition were a conditional branch of ExitCond,
- /// TBB, and FBB.
- ///
- /// \p ControlsExit is true if ExitCond directly controls the exit
- /// branch. In this case, we can assume that the loop exits only if the
- /// condition is true and can infer that failing to meet the condition prior
- /// to integer wraparound results in undefined behavior.
- ///
- /// If \p AllowPredicates is set, this call will try to use a minimal set of
- /// SCEV predicates in order to return an exact answer.
- ExitLimit computeExitLimitFromCond(const Loop *L,
- Value *ExitCond,
- BasicBlock *TBB,
- BasicBlock *FBB,
- bool ControlsExit,
- bool AllowPredicates = false);
-
- /// Compute the number of times the backedge of the specified loop will
- /// execute if its exit condition were a conditional branch of the ICmpInst
- /// ExitCond, TBB, and FBB. If AllowPredicates is set, this call will try
- /// to use a minimal set of SCEV predicates in order to return an exact
- /// answer.
- ExitLimit computeExitLimitFromICmp(const Loop *L,
- ICmpInst *ExitCond,
- BasicBlock *TBB,
- BasicBlock *FBB,
- bool IsSubExpr,
- bool AllowPredicates = false);
-
- /// Compute the number of times the backedge of the specified loop will
- /// execute if its exit condition were a switch with a single exiting case
- /// to ExitingBB.
- ExitLimit
- computeExitLimitFromSingleExitSwitch(const Loop *L, SwitchInst *Switch,
- BasicBlock *ExitingBB, bool IsSubExpr);
-
- /// Given an exit condition of 'icmp op load X, cst', try to see if we can
- /// compute the backedge-taken count.
- ExitLimit computeLoadConstantCompareExitLimit(LoadInst *LI,
- Constant *RHS,
- const Loop *L,
- ICmpInst::Predicate p);
-
- /// Compute the exit limit of a loop that is controlled by a
- /// "(IV >> 1) != 0" type comparison. We cannot compute the exact trip
- /// count in these cases (since SCEV has no way of expressing them), but we
- /// can still sometimes compute an upper bound.
- ///
- /// Return an ExitLimit for a loop whose backedge is guarded by `LHS Pred
- /// RHS`.
- ExitLimit computeShiftCompareExitLimit(Value *LHS, Value *RHS,
- const Loop *L,
- ICmpInst::Predicate Pred);
-
- /// If the loop is known to execute a constant number of times (the
- /// condition evolves only from constants), try to evaluate a few iterations
- /// of the loop until we get the exit condition gets a value of ExitWhen
- /// (true or false). If we cannot evaluate the exit count of the loop,
- /// return CouldNotCompute.
- const SCEV *computeExitCountExhaustively(const Loop *L,
- Value *Cond,
- bool ExitWhen);
-
- /// Return the number of times an exit condition comparing the specified
- /// value to zero will execute. If not computable, return CouldNotCompute.
- /// If AllowPredicates is set, this call will try to use a minimal set of
- /// SCEV predicates in order to return an exact answer.
- ExitLimit howFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr,
- bool AllowPredicates = false);
-
- /// Return the number of times an exit condition checking the specified
- /// value for nonzero will execute. If not computable, return
- /// CouldNotCompute.
- ExitLimit howFarToNonZero(const SCEV *V, const Loop *L);
-
- /// Return the number of times an exit condition containing the specified
- /// less-than comparison will execute. If not computable, return
- /// CouldNotCompute.
- ///
- /// \p isSigned specifies whether the less-than is signed.
- ///
- /// \p ControlsExit is true when the LHS < RHS condition directly controls
- /// the branch (loops exits only if condition is true). In this case, we can
- /// use NoWrapFlags to skip overflow checks.
- ///
- /// If \p AllowPredicates is set, this call will try to use a minimal set of
- /// SCEV predicates in order to return an exact answer.
- ExitLimit howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
- bool isSigned, bool ControlsExit,
- bool AllowPredicates = false);
-
- ExitLimit howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
- const Loop *L, bool isSigned, bool IsSubExpr,
- bool AllowPredicates = false);
-
- /// Return a predecessor of BB (which may not be an immediate predecessor)
- /// which has exactly one successor from which BB is reachable, or null if
- /// no such block is found.
- std::pair<BasicBlock *, BasicBlock *>
- getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the given FoundCondValue value evaluates to true.
- bool isImpliedCond(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- Value *FoundCondValue,
- bool Inverse);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
- /// true.
- bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, ICmpInst::Predicate FoundPred,
- const SCEV *FoundLHS, const SCEV *FoundRHS);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
- /// true.
- bool isImpliedCondOperands(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS, const SCEV *FoundRHS);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
- /// true.
- bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
- /// true. Utility function used by isImpliedCondOperands. Tries to get
- /// cases like "X `sgt` 0 => X - 1 `sgt` -1".
- bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS);
-
- /// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
- /// by a call to \c @llvm.experimental.guard in \p BB.
- bool isImpliedViaGuard(BasicBlock *BB, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
- /// true.
- ///
- /// This routine tries to rule out certain kinds of integer overflow, and
- /// then tries to reason about arithmetic properties of the predicates.
- bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS);
-
- /// If we know that the specified Phi is in the header of its containing
- /// loop, we know the loop executes a constant number of times, and the PHI
- /// node is just a recurrence involving constants, fold it.
- Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs,
- const Loop *L);
-
- /// Test if the given expression is known to satisfy the condition described
- /// by Pred and the known constant ranges of LHS and RHS.
- ///
- bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ /// Information about the backedge-taken count of a loop. This currently
+ /// includes an exact count and a maximum count.
+ ///
+ class BackedgeTakenInfo {
+ /// A list of computable exits and their not-taken counts. Loops almost
+ /// never have more than one computable exit.
+ SmallVector<ExitNotTakenInfo, 1> ExitNotTaken;
- /// Try to prove the condition described by "LHS Pred RHS" by ruling out
- /// integer overflow.
- ///
- /// For instance, this will return true for "A s< (A + C)<nsw>" if C is
- /// positive.
- bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
-
- /// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
- /// prove them individually.
- bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
-
- /// Try to match the Expr as "(L + R)<Flags>".
- bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
- SCEV::NoWrapFlags &Flags);
-
- /// Return true if More == (Less + C), where C is a constant. This is
- /// intended to be used as a cheaper substitute for full SCEV subtraction.
- bool computeConstantDifference(const SCEV *Less, const SCEV *More,
- APInt &C);
-
- /// Drop memoized information computed for S.
- void forgetMemoizedResults(const SCEV *S);
-
- /// Return an existing SCEV for V if there is one, otherwise return nullptr.
- const SCEV *getExistingSCEV(Value *V);
-
- /// Return false iff given SCEV contains a SCEVUnknown with NULL value-
- /// pointer.
- bool checkValidity(const SCEV *S) const;
-
- /// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
- /// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}. This is
- /// equivalent to proving no signed (resp. unsigned) wrap in
- /// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
- /// (resp. `SCEVZeroExtendExpr`).
+ /// The pointer part of \c MaxAndComplete is an expression indicating the
+ /// least maximum backedge-taken count of the loop that is known, or a
+ /// SCEVCouldNotCompute. This expression is only valid if the predicates
+ /// associated with all loop exits are true.
///
- template<typename ExtendOpTy>
- bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
- const Loop *L);
+ /// The integer part of \c MaxAndComplete is a boolean indicating if \c
+ /// ExitNotTaken has an element for every exiting block in the loop.
+ PointerIntPair<const SCEV *, 1> MaxAndComplete;
- /// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
- SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);
+ /// True iff the backedge is taken either exactly Max or zero times.
+ bool MaxOrZero;
- bool isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS,
- ICmpInst::Predicate Pred, bool &Increasing);
-
- /// Return true if, for all loop invariant X, the predicate "LHS `Pred` X"
- /// is monotonically increasing or decreasing. In the former case set
- /// `Increasing` to true and in the latter case set `Increasing` to false.
- ///
- /// A predicate is said to be monotonically increasing if may go from being
- /// false to being true as the loop iterates, but never the other way
- /// around. A predicate is said to be monotonically decreasing if may go
- /// from being true to being false as the loop iterates, but never the other
- /// way around.
- bool isMonotonicPredicate(const SCEVAddRecExpr *LHS,
- ICmpInst::Predicate Pred, bool &Increasing);
-
- /// Return SCEV no-wrap flags that can be proven based on reasoning about
- /// how poison produced from no-wrap flags on this value (e.g. a nuw add)
- /// would trigger undefined behavior on overflow.
- SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V);
-
- /// Return true if the SCEV corresponding to \p I is never poison. Proving
- /// this is more complex than proving that just \p I is never poison, since
- /// SCEV commons expressions across control flow, and you can have cases
- /// like:
- ///
- /// idx0 = a + b;
- /// ptr[idx0] = 100;
- /// if (<condition>) {
- /// idx1 = a +nsw b;
- /// ptr[idx1] = 200;
- /// }
- ///
- /// where the SCEV expression (+ a b) is guaranteed to not be poison (and
- /// hence not sign-overflow) only if "<condition>" is true. Since both
- /// `idx0` and `idx1` will be mapped to the same SCEV expression, (+ a b),
- /// it is not okay to annotate (+ a b) with <nsw> in the above example.
- bool isSCEVExprNeverPoison(const Instruction *I);
-
- /// This is like \c isSCEVExprNeverPoison but it specifically works for
- /// instructions that will get mapped to SCEV add recurrences. Return true
- /// if \p I will never generate poison under the assumption that \p I is an
- /// add recurrence on the loop \p L.
- bool isAddRecNeverPoison(const Instruction *I, const Loop *L);
+ /// \name Helper projection functions on \c MaxAndComplete.
+ /// @{
+ bool isComplete() const { return MaxAndComplete.getInt(); }
+ const SCEV *getMax() const { return MaxAndComplete.getPointer(); }
+ /// @}
public:
- ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
- DominatorTree &DT, LoopInfo &LI);
- ~ScalarEvolution();
- ScalarEvolution(ScalarEvolution &&Arg);
-
- LLVMContext &getContext() const { return F.getContext(); }
-
- /// Test if values of the given type are analyzable within the SCEV
- /// framework. This primarily includes integer types, and it can optionally
- /// include pointer types if the ScalarEvolution class has access to
- /// target-specific information.
- bool isSCEVable(Type *Ty) const;
-
- /// Return the size in bits of the specified type, for which isSCEVable must
- /// return true.
- uint64_t getTypeSizeInBits(Type *Ty) const;
-
- /// Return a type with the same bitwidth as the given type and which
- /// represents how SCEV will treat the given type, for which isSCEVable must
- /// return true. For pointer types, this is the pointer-sized integer type.
- Type *getEffectiveSCEVType(Type *Ty) const;
-
- /// Return true if the SCEV is a scAddRecExpr or it contains
- /// scAddRecExpr. The result will be cached in HasRecMap.
- ///
- bool containsAddRecurrence(const SCEV *S);
-
- /// Return the Value set from which the SCEV expr is generated.
- SetVector<Value *> *getSCEVValues(const SCEV *S);
-
- /// Erase Value from ValueExprMap and ExprValueMap.
- void eraseValueFromMap(Value *V);
-
- /// Return a SCEV expression for the full generality of the specified
- /// expression.
- const SCEV *getSCEV(Value *V);
-
- const SCEV *getConstant(ConstantInt *V);
- const SCEV *getConstant(const APInt& Val);
- const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
- const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
- const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
- const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
- const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
- const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
- const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
- return getAddExpr(Ops, Flags);
- }
- const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
- SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
- return getAddExpr(Ops, Flags);
- }
- const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
- const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
- SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
- return getMulExpr(Ops, Flags);
- }
- const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
- SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
- return getMulExpr(Ops, Flags);
- }
- const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
- const Loop *L, SCEV::NoWrapFlags Flags);
- const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L, SCEV::NoWrapFlags Flags);
- const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L, SCEV::NoWrapFlags Flags) {
- SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
- return getAddRecExpr(NewOp, L, Flags);
- }
- /// Returns an expression for a GEP
- ///
- /// \p PointeeType The type used as the basis for the pointer arithmetics
- /// \p BaseExpr The expression for the pointer operand.
- /// \p IndexExprs The expressions for the indices.
- /// \p InBounds Whether the GEP is in bounds.
- const SCEV *getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
- const SmallVectorImpl<const SCEV *> &IndexExprs,
- bool InBounds = false);
- const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUnknown(Value *V);
- const SCEV *getCouldNotCompute();
-
- /// Return a SCEV for the constant 0 of a specific type.
- const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }
-
- /// Return a SCEV for the constant 1 of a specific type.
- const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
-
- /// Return an expression for sizeof AllocTy that is type IntTy
- ///
- const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
-
- /// Return an expression for offsetof on the given field with type IntTy
- ///
- const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
+ BackedgeTakenInfo() : MaxAndComplete(nullptr, 0) {}
- /// Return the SCEV object corresponding to -V.
- ///
- const SCEV *getNegativeSCEV(const SCEV *V,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
+ BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
- /// Return the SCEV object corresponding to ~V.
- ///
- const SCEV *getNotSCEV(const SCEV *V);
-
- /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
- const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
-
- /// Return a SCEV corresponding to a conversion of the input value to the
- /// specified type. If the type must be extended, it is zero extended.
- const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
-
- /// Return a SCEV corresponding to a conversion of the input value to the
- /// specified type. If the type must be extended, it is sign extended.
- const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
-
- /// Return a SCEV corresponding to a conversion of the input value to the
- /// specified type. If the type must be extended, it is zero extended. The
- /// conversion must not be narrowing.
- const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
-
- /// Return a SCEV corresponding to a conversion of the input value to the
- /// specified type. If the type must be extended, it is sign extended. The
- /// conversion must not be narrowing.
- const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
-
- /// Return a SCEV corresponding to a conversion of the input value to the
- /// specified type. If the type must be extended, it is extended with
- /// unspecified bits. The conversion must not be narrowing.
- const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
-
- /// Return a SCEV corresponding to a conversion of the input value to the
- /// specified type. The conversion must not be widening.
- const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
-
- /// Promote the operands to the wider of the types using zero-extension, and
- /// then perform a umax operation with them.
- const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS,
- const SCEV *RHS);
-
- /// Promote the operands to the wider of the types using zero-extension, and
- /// then perform a umin operation with them.
- const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS,
- const SCEV *RHS);
-
- /// Transitively follow the chain of pointer-type operands until reaching a
- /// SCEV that does not have a single pointer operand. This returns a
- /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
- /// cases do exist.
- const SCEV *getPointerBase(const SCEV *V);
-
- /// Return a SCEV expression for the specified value at the specified scope
- /// in the program. The L value specifies a loop nest to evaluate the
- /// expression at, where null is the top-level or a specified loop is
- /// immediately inside of the loop.
- ///
- /// This method can be used to compute the exit value for a variable defined
- /// in a loop by querying what the value will hold in the parent loop.
- ///
- /// In the case that a relevant loop exit value cannot be computed, the
- /// original value V is returned.
- const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
-
- /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
- const SCEV *getSCEVAtScope(Value *V, const Loop *L);
-
- /// Test whether entry to the loop is protected by a conditional between LHS
- /// and RHS. This is used to help avoid max expressions in loop trip
- /// counts, and to eliminate casts.
- bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
-
- /// Test whether the backedge of the loop is protected by a conditional
- /// between LHS and RHS. This is used to to eliminate casts.
- bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
-
- /// Returns the maximum trip count of the loop if it is a single-exit
- /// loop and we can compute a small maximum for that loop.
- ///
- /// Implemented in terms of the \c getSmallConstantTripCount overload with
- /// the single exiting block passed to it. See that routine for details.
- unsigned getSmallConstantTripCount(Loop *L);
-
- /// Returns the maximum trip count of this loop as a normal unsigned
- /// value. Returns 0 if the trip count is unknown or not constant. This
- /// "trip count" assumes that control exits via ExitingBlock. More
- /// precisely, it is the number of times that control may reach ExitingBlock
- /// before taking the branch. For loops with multiple exits, it may not be
- /// the number times that the loop header executes if the loop exits
- /// prematurely via another branch.
- unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock);
-
- /// Returns the largest constant divisor of the trip count of the
- /// loop if it is a single-exit loop and we can compute a small maximum for
- /// that loop.
- ///
- /// Implemented in terms of the \c getSmallConstantTripMultiple overload with
- /// the single exiting block passed to it. See that routine for details.
- unsigned getSmallConstantTripMultiple(Loop *L);
-
- /// Returns the largest constant divisor of the trip count of this loop as a
- /// normal unsigned value, if possible. This means that the actual trip
- /// count is always a multiple of the returned value (don't forget the trip
- /// count could very well be zero as well!). As explained in the comments
- /// for getSmallConstantTripCount, this assumes that control exits the loop
- /// via ExitingBlock.
- unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock);
-
- /// Get the expression for the number of loop iterations for which this loop
- /// is guaranteed not to exit via ExitingBlock. Otherwise return
- /// SCEVCouldNotCompute.
- const SCEV *getExitCount(Loop *L, BasicBlock *ExitingBlock);
-
- /// If the specified loop has a predictable backedge-taken count, return it,
- /// otherwise return a SCEVCouldNotCompute object. The backedge-taken count
- /// is the number of times the loop header will be branched to from within
- /// the loop. This is one less than the trip count of the loop, since it
- /// doesn't count the first iteration, when the header is branched to from
- /// outside the loop.
- ///
- /// Note that it is not valid to call this method on a loop without a
- /// loop-invariant backedge-taken count (see
- /// hasLoopInvariantBackedgeTakenCount).
- ///
- const SCEV *getBackedgeTakenCount(const Loop *L);
-
- /// Similar to getBackedgeTakenCount, except it will add a set of
- /// SCEV predicates to Predicates that are required to be true in order for
- /// the answer to be correct. Predicates can be checked with run-time
- /// checks and can be used to perform loop versioning.
- const SCEV *getPredicatedBackedgeTakenCount(const Loop *L,
- SCEVUnionPredicate &Predicates);
-
- /// Similar to getBackedgeTakenCount, except return the least SCEV value
- /// that is known never to be less than the actual backedge taken count.
- const SCEV *getMaxBackedgeTakenCount(const Loop *L);
-
- /// Return true if the specified loop has an analyzable loop-invariant
- /// backedge-taken count.
- bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
-
- /// This method should be called by the client when it has changed a loop in
- /// a way that may effect ScalarEvolution's ability to compute a trip count,
- /// or if the loop is deleted. This call is potentially expensive for large
- /// loop bodies.
- void forgetLoop(const Loop *L);
-
- /// This method should be called by the client when it has changed a value
- /// in a way that may effect its value, or which may disconnect it from a
- /// def-use chain linking it to a loop.
- void forgetValue(Value *V);
-
- /// Called when the client has changed the disposition of values in
- /// this loop.
- ///
- /// We don't have a way to invalidate per-loop dispositions. Clear and
- /// recompute is simpler.
- void forgetLoopDispositions(const Loop *L) { LoopDispositions.clear(); }
+ typedef std::pair<BasicBlock *, ExitLimit> EdgeExitInfo;
- /// Determine the minimum number of zero bits that S is guaranteed to end in
- /// (at every loop iteration). It is, at the same time, the minimum number
- /// of times S is divisible by 2. For example, given {4,+,8} it returns 2.
- /// If S is guaranteed to be 0, it returns the bitwidth of S.
- uint32_t GetMinTrailingZeros(const SCEV *S);
+ /// Initialize BackedgeTakenInfo from a list of exact exit counts.
+ BackedgeTakenInfo(SmallVectorImpl<EdgeExitInfo> &&ExitCounts, bool Complete,
+ const SCEV *MaxCount, bool MaxOrZero);
- /// Determine the unsigned range for a particular SCEV.
- ///
- ConstantRange getUnsignedRange(const SCEV *S) {
- return getRange(S, HINT_RANGE_UNSIGNED);
+ /// Test whether this BackedgeTakenInfo contains any computed information,
+ /// or whether it's all SCEVCouldNotCompute values.
+ bool hasAnyInfo() const {
+ return !ExitNotTaken.empty() || !isa<SCEVCouldNotCompute>(getMax());
}
- /// Determine the signed range for a particular SCEV.
- ///
- ConstantRange getSignedRange(const SCEV *S) {
- return getRange(S, HINT_RANGE_SIGNED);
- }
+ /// Test whether this BackedgeTakenInfo contains complete information.
+ bool hasFullInfo() const { return isComplete(); }
+
+ /// Return an expression indicating the exact backedge-taken count of the
+ /// loop if it is known or SCEVCouldNotCompute otherwise. This is the
+ /// number of times the loop header can be guaranteed to execute, minus
+ /// one.
+ ///
+ /// If the SCEV predicate associated with the answer can be different
+ /// from AlwaysTrue, we must add a (non null) Predicates argument.
+ /// The SCEV predicate associated with the answer will be added to
+ /// Predicates. A run-time check needs to be emitted for the SCEV
+ /// predicate in order for the answer to be valid.
+ ///
+ /// Note that we should always know if we need to pass a predicate
+ /// argument or not from the way the ExitCounts vector was computed.
+ /// If we allowed SCEV predicates to be generated when populating this
+ /// vector, this information can contain them and therefore a
+ /// SCEVPredicate argument should be added to getExact.
+ const SCEV *getExact(ScalarEvolution *SE,
+ SCEVUnionPredicate *Predicates = nullptr) const;
+
+ /// Return the number of times this loop exit may fall through to the back
+ /// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
+ /// this block before this number of iterations, but may exit via another
+ /// block.
+ const SCEV *getExact(BasicBlock *ExitingBlock, ScalarEvolution *SE) const;
- /// Test if the given expression is known to be negative.
- ///
- bool isKnownNegative(const SCEV *S);
+ /// Get the max backedge taken count for the loop.
+ const SCEV *getMax(ScalarEvolution *SE) const;
- /// Test if the given expression is known to be positive.
- ///
- bool isKnownPositive(const SCEV *S);
+ /// Return true if the number of times this backedge is taken is either the
+ /// value returned by getMax or zero.
+ bool isMaxOrZero(ScalarEvolution *SE) const;
- /// Test if the given expression is known to be non-negative.
- ///
- bool isKnownNonNegative(const SCEV *S);
+ /// Return true if any backedge taken count expressions refer to the given
+ /// subexpression.
+ bool hasOperand(const SCEV *S, ScalarEvolution *SE) const;
- /// Test if the given expression is known to be non-positive.
- ///
- bool isKnownNonPositive(const SCEV *S);
+ /// Invalidate this result and free associated memory.
+ void clear();
+ };
- /// Test if the given expression is known to be non-zero.
- ///
- bool isKnownNonZero(const SCEV *S);
+ /// Cache the backedge-taken count of the loops for this function as they
+ /// are computed.
+ DenseMap<const Loop *, BackedgeTakenInfo> BackedgeTakenCounts;
+
+ /// Cache the predicated backedge-taken count of the loops for this
+ /// function as they are computed.
+ DenseMap<const Loop *, BackedgeTakenInfo> PredicatedBackedgeTakenCounts;
+
+ /// This map contains entries for all of the PHI instructions that we
+ /// attempt to compute constant evolutions for. This allows us to avoid
+ /// potentially expensive recomputation of these properties. An instruction
+ /// maps to null if we are unable to compute its exit value.
+ DenseMap<PHINode *, Constant *> ConstantEvolutionLoopExitValue;
+
+ /// This map contains entries for all the expressions that we attempt to
+ /// compute getSCEVAtScope information for, which can be expensive in
+ /// extreme cases.
+ DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
+ ValuesAtScopes;
+
+ /// Memoized computeLoopDisposition results.
+ DenseMap<const SCEV *,
+ SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
+ LoopDispositions;
+
+ struct LoopProperties {
+ /// Set to true if the loop contains no instruction that can have side
+ /// effects (i.e. via throwing an exception, volatile or atomic access).
+ bool HasNoAbnormalExits;
+
+ /// Set to true if the loop contains no instruction that can abnormally exit
+ /// the loop (i.e. via throwing an exception, by terminating the thread
+ /// cleanly or by infinite looping in a called function). Strictly
+ /// speaking, the last one is not leaving the loop, but is identical to
+ /// leaving the loop for reasoning about undefined behavior.
+ bool HasNoSideEffects;
+ };
- /// Test if the given expression is known to satisfy the condition described
- /// by Pred, LHS, and RHS.
- ///
- bool isKnownPredicate(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
-
- /// Return true if the result of the predicate LHS `Pred` RHS is loop
- /// invariant with respect to L. Set InvariantPred, InvariantLHS and
- /// InvariantLHS so that InvariantLHS `InvariantPred` InvariantRHS is the
- /// loop invariant form of LHS `Pred` RHS.
- bool isLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const Loop *L,
- ICmpInst::Predicate &InvariantPred,
- const SCEV *&InvariantLHS,
- const SCEV *&InvariantRHS);
-
- /// Simplify LHS and RHS in a comparison with predicate Pred. Return true
- /// iff any changes were made. If the operands are provably equal or
- /// unequal, LHS and RHS are set to the same value and Pred is set to either
- /// ICMP_EQ or ICMP_NE.
- ///
- bool SimplifyICmpOperands(ICmpInst::Predicate &Pred,
- const SCEV *&LHS,
- const SCEV *&RHS,
- unsigned Depth = 0);
-
- /// Return the "disposition" of the given SCEV with respect to the given
- /// loop.
- LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
-
- /// Return true if the value of the given SCEV is unchanging in the
- /// specified loop.
- bool isLoopInvariant(const SCEV *S, const Loop *L);
-
- /// Return true if the given SCEV changes value in a known way in the
- /// specified loop. This property being true implies that the value is
- /// variant in the loop AND that we can emit an expression to compute the
- /// value of the expression at any particular loop iteration.
- bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
-
- /// Return the "disposition" of the given SCEV with respect to the given
- /// block.
- BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+ /// Cache for \c getLoopProperties.
+ DenseMap<const Loop *, LoopProperties> LoopPropertiesCache;
- /// Return true if elements that makes up the given SCEV dominate the
- /// specified basic block.
- bool dominates(const SCEV *S, const BasicBlock *BB);
+ /// Return a \c LoopProperties instance for \p L, creating one if necessary.
+ LoopProperties getLoopProperties(const Loop *L);
- /// Return true if elements that makes up the given SCEV properly dominate
- /// the specified basic block.
- bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+ bool loopHasNoSideEffects(const Loop *L) {
+ return getLoopProperties(L).HasNoSideEffects;
+ }
- /// Test whether the given SCEV has Op as a direct or indirect operand.
- bool hasOperand(const SCEV *S, const SCEV *Op) const;
+ bool loopHasNoAbnormalExits(const Loop *L) {
+ return getLoopProperties(L).HasNoAbnormalExits;
+ }
- /// Return the size of an element read or written by Inst.
- const SCEV *getElementSize(Instruction *Inst);
+ /// Compute a LoopDisposition value.
+ LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
- /// Compute the array dimensions Sizes from the set of Terms extracted from
- /// the memory access function of this SCEVAddRecExpr (second step of
- /// delinearization).
- void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
- SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const;
+ /// Memoized computeBlockDisposition results.
+ DenseMap<
+ const SCEV *,
+ SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
+ BlockDispositions;
- void print(raw_ostream &OS) const;
- void verify() const;
+ /// Compute a BlockDisposition value.
+ BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
- /// Collect parametric terms occurring in step expressions (first step of
- /// delinearization).
- void collectParametricTerms(const SCEV *Expr,
- SmallVectorImpl<const SCEV *> &Terms);
+ /// Memoized results from getRange
+ DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
+ /// Memoized results from getRange
+ DenseMap<const SCEV *, ConstantRange> SignedRanges;
+ /// Used to parameterize getRange
+ enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
- /// Return in Subscripts the access functions for each dimension in Sizes
- /// (third step of delinearization).
- void computeAccessFunctions(const SCEV *Expr,
- SmallVectorImpl<const SCEV *> &Subscripts,
- SmallVectorImpl<const SCEV *> &Sizes);
+ /// Set the memoized range for the given SCEV.
+ const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
+ const ConstantRange &CR) {
+ DenseMap<const SCEV *, ConstantRange> &Cache =
+ Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
- /// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
- /// subscripts and sizes of an array access.
- ///
- /// The delinearization is a 3 step process: the first two steps compute the
- /// sizes of each subscript and the third step computes the access functions
- /// for the delinearized array:
- ///
- /// 1. Find the terms in the step functions
- /// 2. Compute the array size
- /// 3. Compute the access function: divide the SCEV by the array size
- /// starting with the innermost dimensions found in step 2. The Quotient
- /// is the SCEV to be divided in the next step of the recursion. The
- /// Remainder is the subscript of the innermost dimension. Loop over all
- /// array dimensions computed in step 2.
- ///
- /// To compute a uniform array size for several memory accesses to the same
- /// object, one can collect in step 1 all the step terms for all the memory
- /// accesses, and compute in step 2 a unique array shape. This guarantees
- /// that the array shape will be the same across all memory accesses.
- ///
- /// FIXME: We could derive the result of steps 1 and 2 from a description of
- /// the array shape given in metadata.
- ///
- /// Example:
- ///
- /// A[][n][m]
- ///
- /// for i
- /// for j
- /// for k
- /// A[j+k][2i][5i] =
- ///
- /// The initial SCEV:
- ///
- /// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
- ///
- /// 1. Find the different terms in the step functions:
- /// -> [2*m, 5, n*m, n*m]
- ///
- /// 2. Compute the array size: sort and unique them
- /// -> [n*m, 2*m, 5]
- /// find the GCD of all the terms = 1
- /// divide by the GCD and erase constant terms
- /// -> [n*m, 2*m]
- /// GCD = m
- /// divide by GCD -> [n, 2]
- /// remove constant terms
- /// -> [n]
- /// size of the array is A[unknown][n][m]
- ///
- /// 3. Compute the access function
- /// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
- /// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
- /// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
- /// The remainder is the subscript of the innermost array dimension: [5i].
- ///
- /// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
- /// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
- /// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
- /// The Remainder is the subscript of the next array dimension: [2i].
- ///
- /// The subscript of the outermost dimension is the Quotient: [j+k].
- ///
- /// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
- void delinearize(const SCEV *Expr,
- SmallVectorImpl<const SCEV *> &Subscripts,
- SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize);
-
- /// Return the DataLayout associated with the module this SCEV instance is
- /// operating on.
- const DataLayout &getDataLayout() const {
- return F.getParent()->getDataLayout();
- }
+ auto Pair = Cache.insert({S, CR});
+ if (!Pair.second)
+ Pair.first->second = CR;
+ return Pair.first->second;
+ }
- const SCEVPredicate *getEqualPredicate(const SCEVUnknown *LHS,
- const SCEVConstant *RHS);
-
- const SCEVPredicate *
- getWrapPredicate(const SCEVAddRecExpr *AR,
- SCEVWrapPredicate::IncrementWrapFlags AddedFlags);
-
- /// Re-writes the SCEV according to the Predicates in \p A.
- const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
- SCEVUnionPredicate &A);
- /// Tries to convert the \p S expression to an AddRec expression,
- /// adding additional predicates to \p Preds as required.
- const SCEVAddRecExpr *
- convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L,
- SCEVUnionPredicate &Preds);
-
- private:
- /// Compute the backedge taken count knowing the interval difference, the
- /// stride and presence of the equality in the comparison.
- const SCEV *computeBECount(const SCEV *Delta, const SCEV *Stride,
- bool Equality);
-
- /// Verify if an linear IV with positive stride can overflow when in a
- /// less-than comparison, knowing the invariant term of the comparison,
- /// the stride and the knowledge of NSW/NUW flags on the recurrence.
- bool doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
- bool IsSigned, bool NoWrap);
-
- /// Verify if an linear IV with negative stride can overflow when in a
- /// greater-than comparison, knowing the invariant term of the comparison,
- /// the stride and the knowledge of NSW/NUW flags on the recurrence.
- bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
- bool IsSigned, bool NoWrap);
-
- private:
- FoldingSet<SCEV> UniqueSCEVs;
- FoldingSet<SCEVPredicate> UniquePreds;
- BumpPtrAllocator SCEVAllocator;
-
- /// The head of a linked list of all SCEVUnknown values that have been
- /// allocated. This is used by releaseMemory to locate them all and call
- /// their destructors.
- SCEVUnknown *FirstUnknown;
- };
+ /// Determine the range for a particular SCEV.
+ ConstantRange getRange(const SCEV *S, RangeSignHint Hint);
- /// Analysis pass that exposes the \c ScalarEvolution for a function.
- class ScalarEvolutionAnalysis
- : public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
- friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;
- static char PassID;
+ /// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Stop}.
+ /// Helper for \c getRange.
+ ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Stop,
+ const SCEV *MaxBECount, unsigned BitWidth);
- public:
- typedef ScalarEvolution Result;
+ /// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
+ /// Stop} by "factoring out" a ternary expression from the add recurrence.
+ /// Helper called by \c getRange.
+ ConstantRange getRangeViaFactoring(const SCEV *Start, const SCEV *Stop,
+ const SCEV *MaxBECount, unsigned BitWidth);
- ScalarEvolution run(Function &F, AnalysisManager<Function> &AM);
- };
+ /// We know that there is no SCEV for the specified value. Analyze the
+ /// expression.
+ const SCEV *createSCEV(Value *V);
- /// Printer pass for the \c ScalarEvolutionAnalysis results.
- class ScalarEvolutionPrinterPass
- : public PassInfoMixin<ScalarEvolutionPrinterPass> {
- raw_ostream &OS;
+ /// Provide the special handling we need to analyze PHI SCEVs.
+ const SCEV *createNodeForPHI(PHINode *PN);
- public:
- explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
- };
+ /// Helper function called from createNodeForPHI.
+ const SCEV *createAddRecFromPHI(PHINode *PN);
- class ScalarEvolutionWrapperPass : public FunctionPass {
- std::unique_ptr<ScalarEvolution> SE;
+ /// Helper function called from createNodeForPHI.
+ const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
- public:
- static char ID;
-
- ScalarEvolutionWrapperPass();
+ /// Provide special handling for a select-like instruction (currently this
+ /// is either a select instruction or a phi node). \p I is the instruction
+ /// being processed, and it is assumed equivalent to "Cond ? TrueVal :
+ /// FalseVal".
+ const SCEV *createNodeForSelectOrPHI(Instruction *I, Value *Cond,
+ Value *TrueVal, Value *FalseVal);
- ScalarEvolution &getSE() { return *SE; }
- const ScalarEvolution &getSE() const { return *SE; }
+ /// Provide the special handling we need to analyze GEP SCEVs.
+ const SCEV *createNodeForGEP(GEPOperator *GEP);
- bool runOnFunction(Function &F) override;
- void releaseMemory() override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
- void print(raw_ostream &OS, const Module * = nullptr) const override;
- void verifyAnalysis() const override;
- };
+ /// Implementation code for getSCEVAtScope; called at most once for each
+ /// SCEV+Loop pair.
+ ///
+ const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
+
+ /// This looks up computed SCEV values for all instructions that depend on
+ /// the given instruction and removes them from the ValueExprMap map if they
+ /// reference SymName. This is used during PHI resolution.
+ void forgetSymbolicName(Instruction *I, const SCEV *SymName);
+
+ /// Return the BackedgeTakenInfo for the given loop, lazily computing new
+ /// values if the loop hasn't been analyzed yet. The returned result is
+ /// guaranteed not to be predicated.
+ const BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
+
+ /// Similar to getBackedgeTakenInfo, but will add predicates as required
+ /// with the purpose of returning complete information.
+ const BackedgeTakenInfo &getPredicatedBackedgeTakenInfo(const Loop *L);
+
+ /// Compute the number of times the specified loop will iterate.
+ /// If AllowPredicates is set, we will create new SCEV predicates as
+ /// necessary in order to return an exact answer.
+ BackedgeTakenInfo computeBackedgeTakenCount(const Loop *L,
+ bool AllowPredicates = false);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if it exits via the specified block. If AllowPredicates is set,
+ /// this call will try to use a minimal set of SCEV predicates in order to
+ /// return an exact answer.
+ ExitLimit computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
+ bool AllowPredicates = false);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if its exit condition were a conditional branch of ExitCond,
+ /// TBB, and FBB.
+ ///
+ /// \p ControlsExit is true if ExitCond directly controls the exit
+ /// branch. In this case, we can assume that the loop exits only if the
+ /// condition is true and can infer that failing to meet the condition prior
+ /// to integer wraparound results in undefined behavior.
+ ///
+ /// If \p AllowPredicates is set, this call will try to use a minimal set of
+ /// SCEV predicates in order to return an exact answer.
+ ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
+ BasicBlock *TBB, BasicBlock *FBB,
+ bool ControlsExit,
+ bool AllowPredicates = false);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if its exit condition were a conditional branch of the ICmpInst
+ /// ExitCond, TBB, and FBB. If AllowPredicates is set, this call will try
+ /// to use a minimal set of SCEV predicates in order to return an exact
+ /// answer.
+ ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst *ExitCond,
+ BasicBlock *TBB, BasicBlock *FBB,
+ bool IsSubExpr,
+ bool AllowPredicates = false);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if its exit condition were a switch with a single exiting case
+ /// to ExitingBB.
+ ExitLimit computeExitLimitFromSingleExitSwitch(const Loop *L,
+ SwitchInst *Switch,
+ BasicBlock *ExitingBB,
+ bool IsSubExpr);
+
+ /// Given an exit condition of 'icmp op load X, cst', try to see if we can
+ /// compute the backedge-taken count.
+ ExitLimit computeLoadConstantCompareExitLimit(LoadInst *LI, Constant *RHS,
+ const Loop *L,
+ ICmpInst::Predicate p);
+
+ /// Compute the exit limit of a loop that is controlled by a
+ /// "(IV >> 1) != 0" type comparison. We cannot compute the exact trip
+ /// count in these cases (since SCEV has no way of expressing them), but we
+ /// can still sometimes compute an upper bound.
+ ///
+ /// Return an ExitLimit for a loop whose backedge is guarded by `LHS Pred
+ /// RHS`.
+ ExitLimit computeShiftCompareExitLimit(Value *LHS, Value *RHS, const Loop *L,
+ ICmpInst::Predicate Pred);
+
+ /// If the loop is known to execute a constant number of times (the
+ /// condition evolves only from constants), try to evaluate a few iterations
+ /// of the loop until we get the exit condition gets a value of ExitWhen
+ /// (true or false). If we cannot evaluate the exit count of the loop,
+ /// return CouldNotCompute.
+ const SCEV *computeExitCountExhaustively(const Loop *L, Value *Cond,
+ bool ExitWhen);
+
+ /// Return the number of times an exit condition comparing the specified
+ /// value to zero will execute. If not computable, return CouldNotCompute.
+ /// If AllowPredicates is set, this call will try to use a minimal set of
+ /// SCEV predicates in order to return an exact answer.
+ ExitLimit howFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr,
+ bool AllowPredicates = false);
+
+ /// Return the number of times an exit condition checking the specified
+ /// value for nonzero will execute. If not computable, return
+ /// CouldNotCompute.
+ ExitLimit howFarToNonZero(const SCEV *V, const Loop *L);
+
+ /// Return the number of times an exit condition containing the specified
+ /// less-than comparison will execute. If not computable, return
+ /// CouldNotCompute.
+ ///
+ /// \p isSigned specifies whether the less-than is signed.
+ ///
+ /// \p ControlsExit is true when the LHS < RHS condition directly controls
+ /// the branch (loops exits only if condition is true). In this case, we can
+ /// use NoWrapFlags to skip overflow checks.
+ ///
+ /// If \p AllowPredicates is set, this call will try to use a minimal set of
+ /// SCEV predicates in order to return an exact answer.
+ ExitLimit howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
+ bool isSigned, bool ControlsExit,
+ bool AllowPredicates = false);
+
+ ExitLimit howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
+ bool isSigned, bool IsSubExpr,
+ bool AllowPredicates = false);
+
+ /// Return a predecessor of BB (which may not be an immediate predecessor)
+ /// which has exactly one successor from which BB is reachable, or null if
+ /// no such block is found.
+ std::pair<BasicBlock *, BasicBlock *>
+ getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the given FoundCondValue value evaluates to true.
+ bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
+ Value *FoundCondValue, bool Inverse);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
+ /// true.
+ bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
+ ICmpInst::Predicate FoundPred, const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ bool isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true. Utility function used by isImpliedCondOperands. Tries to get
+ /// cases like "X `sgt` 0 => X - 1 `sgt` -1".
+ bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
+ /// by a call to \c @llvm.experimental.guard in \p BB.
+ bool isImpliedViaGuard(BasicBlock *BB, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ ///
+ /// This routine tries to rule out certain kinds of integer overflow, and
+ /// then tries to reason about arithmetic properties of the predicates.
+ bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// If we know that the specified Phi is in the header of its containing
+ /// loop, we know the loop executes a constant number of times, and the PHI
+ /// node is just a recurrence involving constants, fold it.
+ Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt &BEs,
+ const Loop *L);
+
+ /// Test if the given expression is known to satisfy the condition described
+ /// by Pred and the known constant ranges of LHS and RHS.
+ ///
+ bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
- /// An interface layer with SCEV used to manage how we see SCEV expressions
- /// for values in the context of existing predicates. We can add new
- /// predicates, but we cannot remove them.
- ///
- /// This layer has multiple purposes:
- /// - provides a simple interface for SCEV versioning.
- /// - guarantees that the order of transformations applied on a SCEV
- /// expression for a single Value is consistent across two different
- /// getSCEV calls. This means that, for example, once we've obtained
- /// an AddRec expression for a certain value through expression
- /// rewriting, we will continue to get an AddRec expression for that
- /// Value.
- /// - lowers the number of expression rewrites.
- class PredicatedScalarEvolution {
- public:
- PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
- const SCEVUnionPredicate &getUnionPredicate() const;
+ /// Try to prove the condition described by "LHS Pred RHS" by ruling out
+ /// integer overflow.
+ ///
+ /// For instance, this will return true for "A s< (A + C)<nsw>" if C is
+ /// positive.
+ bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS);
+
+ /// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
+ /// prove them individually.
+ bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS);
+
+ /// Try to match the Expr as "(L + R)<Flags>".
+ bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
+ SCEV::NoWrapFlags &Flags);
+
+ /// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
+ /// constant, and None if it isn't.
+ ///
+ /// This is intended to be a cheaper version of getMinusSCEV. We can be
+ /// frugal here since we just bail out of actually constructing and
+ /// canonicalizing an expression in the cases where the result isn't going
+ /// to be a constant.
+ Optional<APInt> computeConstantDifference(const SCEV *LHS, const SCEV *RHS);
+
+ /// Drop memoized information computed for S.
+ void forgetMemoizedResults(const SCEV *S);
+
+ /// Return an existing SCEV for V if there is one, otherwise return nullptr.
+ const SCEV *getExistingSCEV(Value *V);
+
+ /// Return false iff given SCEV contains a SCEVUnknown with NULL value-
+ /// pointer.
+ bool checkValidity(const SCEV *S) const;
+
+ /// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
+ /// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}. This is
+ /// equivalent to proving no signed (resp. unsigned) wrap in
+ /// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
+ /// (resp. `SCEVZeroExtendExpr`).
+ ///
+ template <typename ExtendOpTy>
+ bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
+ const Loop *L);
- /// Returns the SCEV expression of V, in the context of the current SCEV
- /// predicate. The order of transformations applied on the expression of V
- /// returned by ScalarEvolution is guaranteed to be preserved, even when
- /// adding new predicates.
- const SCEV *getSCEV(Value *V);
+ /// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
+ SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);
- /// Get the (predicated) backedge count for the analyzed loop.
- const SCEV *getBackedgeTakenCount();
+ bool isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred, bool &Increasing);
- /// Adds a new predicate.
- void addPredicate(const SCEVPredicate &Pred);
+ /// Return true if, for all loop invariant X, the predicate "LHS `Pred` X"
+ /// is monotonically increasing or decreasing. In the former case set
+ /// `Increasing` to true and in the latter case set `Increasing` to false.
+ ///
+ /// A predicate is said to be monotonically increasing if may go from being
+ /// false to being true as the loop iterates, but never the other way
+ /// around. A predicate is said to be monotonically decreasing if may go
+ /// from being true to being false as the loop iterates, but never the other
+ /// way around.
+ bool isMonotonicPredicate(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred,
+ bool &Increasing);
+
+ /// Return SCEV no-wrap flags that can be proven based on reasoning about
+ /// how poison produced from no-wrap flags on this value (e.g. a nuw add)
+ /// would trigger undefined behavior on overflow.
+ SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V);
+
+ /// Return true if the SCEV corresponding to \p I is never poison. Proving
+ /// this is more complex than proving that just \p I is never poison, since
+ /// SCEV commons expressions across control flow, and you can have cases
+ /// like:
+ ///
+ /// idx0 = a + b;
+ /// ptr[idx0] = 100;
+ /// if (<condition>) {
+ /// idx1 = a +nsw b;
+ /// ptr[idx1] = 200;
+ /// }
+ ///
+ /// where the SCEV expression (+ a b) is guaranteed to not be poison (and
+ /// hence not sign-overflow) only if "<condition>" is true. Since both
+ /// `idx0` and `idx1` will be mapped to the same SCEV expression, (+ a b),
+ /// it is not okay to annotate (+ a b) with <nsw> in the above example.
+ bool isSCEVExprNeverPoison(const Instruction *I);
+
+ /// This is like \c isSCEVExprNeverPoison but it specifically works for
+ /// instructions that will get mapped to SCEV add recurrences. Return true
+ /// if \p I will never generate poison under the assumption that \p I is an
+ /// add recurrence on the loop \p L.
+ bool isAddRecNeverPoison(const Instruction *I, const Loop *L);
+
+public:
+ ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
+ DominatorTree &DT, LoopInfo &LI);
+ ~ScalarEvolution();
+ ScalarEvolution(ScalarEvolution &&Arg);
+
+ LLVMContext &getContext() const { return F.getContext(); }
+
+ /// Test if values of the given type are analyzable within the SCEV
+ /// framework. This primarily includes integer types, and it can optionally
+ /// include pointer types if the ScalarEvolution class has access to
+ /// target-specific information.
+ bool isSCEVable(Type *Ty) const;
+
+ /// Return the size in bits of the specified type, for which isSCEVable must
+ /// return true.
+ uint64_t getTypeSizeInBits(Type *Ty) const;
+
+ /// Return a type with the same bitwidth as the given type and which
+ /// represents how SCEV will treat the given type, for which isSCEVable must
+ /// return true. For pointer types, this is the pointer-sized integer type.
+ Type *getEffectiveSCEVType(Type *Ty) const;
+
+ /// Return true if the SCEV is a scAddRecExpr or it contains
+ /// scAddRecExpr. The result will be cached in HasRecMap.
+ ///
+ bool containsAddRecurrence(const SCEV *S);
+
+ /// Return the Value set from which the SCEV expr is generated.
+ SetVector<ValueOffsetPair> *getSCEVValues(const SCEV *S);
+
+ /// Erase Value from ValueExprMap and ExprValueMap.
+ void eraseValueFromMap(Value *V);
+
+ /// Return a SCEV expression for the full generality of the specified
+ /// expression.
+ const SCEV *getSCEV(Value *V);
+
+ const SCEV *getConstant(ConstantInt *V);
+ const SCEV *getConstant(const APInt &Val);
+ const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
+ const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ return getAddExpr(Ops, Flags);
+ }
+ const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ return getAddExpr(Ops, Flags);
+ }
+ const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ return getMulExpr(Ops, Flags);
+ }
+ const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ return getMulExpr(Ops, Flags);
+ }
+ const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
+ SCEV::NoWrapFlags Flags);
+ const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
+ const Loop *L, SCEV::NoWrapFlags Flags);
+ const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
+ const Loop *L, SCEV::NoWrapFlags Flags) {
+ SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
+ return getAddRecExpr(NewOp, L, Flags);
+ }
+ /// Returns an expression for a GEP
+ ///
+ /// \p GEP The GEP. The indices contained in the GEP itself are ignored,
+ /// instead we use IndexExprs.
+ /// \p IndexExprs The expressions for the indices.
+ const SCEV *getGEPExpr(GEPOperator *GEP,
+ const SmallVectorImpl<const SCEV *> &IndexExprs);
+ const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+ const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+ const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUnknown(Value *V);
+ const SCEV *getCouldNotCompute();
+
+ /// Return a SCEV for the constant 0 of a specific type.
+ const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }
+
+ /// Return a SCEV for the constant 1 of a specific type.
+ const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
+
+ /// Return an expression for sizeof AllocTy that is type IntTy
+ ///
+ const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
- /// Attempts to produce an AddRecExpr for V by adding additional SCEV
- /// predicates. If we can't transform the expression into an AddRecExpr we
- /// return nullptr and not add additional SCEV predicates to the current
- /// context.
- const SCEVAddRecExpr *getAsAddRec(Value *V);
+ /// Return an expression for offsetof on the given field with type IntTy
+ ///
+ const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
- /// Proves that V doesn't overflow by adding SCEV predicate.
- void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+ /// Return the SCEV object corresponding to -V.
+ ///
+ const SCEV *getNegativeSCEV(const SCEV *V,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
- /// Returns true if we've proved that V doesn't wrap by means of a SCEV
- /// predicate.
- bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+ /// Return the SCEV object corresponding to ~V.
+ ///
+ const SCEV *getNotSCEV(const SCEV *V);
- /// Returns the ScalarEvolution analysis used.
- ScalarEvolution *getSE() const { return &SE; }
+ /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
+ const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
- /// We need to explicitly define the copy constructor because of FlagsMap.
- PredicatedScalarEvolution(const PredicatedScalarEvolution&);
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is zero extended.
+ const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is sign extended.
+ const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is zero extended. The
+ /// conversion must not be narrowing.
+ const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is sign extended. The
+ /// conversion must not be narrowing.
+ const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is extended with
+ /// unspecified bits. The conversion must not be narrowing.
+ const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. The conversion must not be widening.
+ const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
+
+ /// Promote the operands to the wider of the types using zero-extension, and
+ /// then perform a umax operation with them.
+ const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+
+ /// Promote the operands to the wider of the types using zero-extension, and
+ /// then perform a umin operation with them.
+ const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+
+ /// Transitively follow the chain of pointer-type operands until reaching a
+ /// SCEV that does not have a single pointer operand. This returns a
+ /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
+ /// cases do exist.
+ const SCEV *getPointerBase(const SCEV *V);
+
+ /// Return a SCEV expression for the specified value at the specified scope
+ /// in the program. The L value specifies a loop nest to evaluate the
+ /// expression at, where null is the top-level or a specified loop is
+ /// immediately inside of the loop.
+ ///
+ /// This method can be used to compute the exit value for a variable defined
+ /// in a loop by querying what the value will hold in the parent loop.
+ ///
+ /// In the case that a relevant loop exit value cannot be computed, the
+ /// original value V is returned.
+ const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
+
+ /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
+ const SCEV *getSCEVAtScope(Value *V, const Loop *L);
+
+ /// Test whether entry to the loop is protected by a conditional between LHS
+ /// and RHS. This is used to help avoid max expressions in loop trip
+ /// counts, and to eliminate casts.
+ bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Test whether the backedge of the loop is protected by a conditional
+ /// between LHS and RHS. This is used to to eliminate casts.
+ bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Returns the maximum trip count of the loop if it is a single-exit
+ /// loop and we can compute a small maximum for that loop.
+ ///
+ /// Implemented in terms of the \c getSmallConstantTripCount overload with
+ /// the single exiting block passed to it. See that routine for details.
+ unsigned getSmallConstantTripCount(Loop *L);
+
+ /// Returns the maximum trip count of this loop as a normal unsigned
+ /// value. Returns 0 if the trip count is unknown or not constant. This
+ /// "trip count" assumes that control exits via ExitingBlock. More
+ /// precisely, it is the number of times that control may reach ExitingBlock
+ /// before taking the branch. For loops with multiple exits, it may not be
+ /// the number times that the loop header executes if the loop exits
+ /// prematurely via another branch.
+ unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock);
+
+ /// Returns the upper bound of the loop trip count as a normal unsigned
+ /// value.
+ /// Returns 0 if the trip count is unknown or not constant.
+ unsigned getSmallConstantMaxTripCount(Loop *L);
+
+ /// Returns the largest constant divisor of the trip count of the
+ /// loop if it is a single-exit loop and we can compute a small maximum for
+ /// that loop.
+ ///
+ /// Implemented in terms of the \c getSmallConstantTripMultiple overload with
+ /// the single exiting block passed to it. See that routine for details.
+ unsigned getSmallConstantTripMultiple(Loop *L);
+
+ /// Returns the largest constant divisor of the trip count of this loop as a
+ /// normal unsigned value, if possible. This means that the actual trip
+ /// count is always a multiple of the returned value (don't forget the trip
+ /// count could very well be zero as well!). As explained in the comments
+ /// for getSmallConstantTripCount, this assumes that control exits the loop
+ /// via ExitingBlock.
+ unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock);
+
+ /// Get the expression for the number of loop iterations for which this loop
+ /// is guaranteed not to exit via ExitingBlock. Otherwise return
+ /// SCEVCouldNotCompute.
+ const SCEV *getExitCount(Loop *L, BasicBlock *ExitingBlock);
+
+ /// If the specified loop has a predictable backedge-taken count, return it,
+ /// otherwise return a SCEVCouldNotCompute object. The backedge-taken count
+ /// is the number of times the loop header will be branched to from within
+ /// the loop. This is one less than the trip count of the loop, since it
+ /// doesn't count the first iteration, when the header is branched to from
+ /// outside the loop.
+ ///
+ /// Note that it is not valid to call this method on a loop without a
+ /// loop-invariant backedge-taken count (see
+ /// hasLoopInvariantBackedgeTakenCount).
+ ///
+ const SCEV *getBackedgeTakenCount(const Loop *L);
+
+ /// Similar to getBackedgeTakenCount, except it will add a set of
+ /// SCEV predicates to Predicates that are required to be true in order for
+ /// the answer to be correct. Predicates can be checked with run-time
+ /// checks and can be used to perform loop versioning.
+ const SCEV *getPredicatedBackedgeTakenCount(const Loop *L,
+ SCEVUnionPredicate &Predicates);
+
+ /// Similar to getBackedgeTakenCount, except return the least SCEV value
+ /// that is known never to be less than the actual backedge taken count.
+ const SCEV *getMaxBackedgeTakenCount(const Loop *L);
+
+ /// Return true if the backedge taken count is either the value returned by
+ /// getMaxBackedgeTakenCount or zero.
+ bool isBackedgeTakenCountMaxOrZero(const Loop *L);
+
+ /// Return true if the specified loop has an analyzable loop-invariant
+ /// backedge-taken count.
+ bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
+
+ /// This method should be called by the client when it has changed a loop in
+ /// a way that may effect ScalarEvolution's ability to compute a trip count,
+ /// or if the loop is deleted. This call is potentially expensive for large
+ /// loop bodies.
+ void forgetLoop(const Loop *L);
+
+ /// This method should be called by the client when it has changed a value
+ /// in a way that may effect its value, or which may disconnect it from a
+ /// def-use chain linking it to a loop.
+ void forgetValue(Value *V);
+
+ /// Called when the client has changed the disposition of values in
+ /// this loop.
+ ///
+ /// We don't have a way to invalidate per-loop dispositions. Clear and
+ /// recompute is simpler.
+ void forgetLoopDispositions(const Loop *L) { LoopDispositions.clear(); }
- /// Print the SCEV mappings done by the Predicated Scalar Evolution.
- /// The printed text is indented by \p Depth.
- void print(raw_ostream &OS, unsigned Depth) const;
+ /// Determine the minimum number of zero bits that S is guaranteed to end in
+ /// (at every loop iteration). It is, at the same time, the minimum number
+ /// of times S is divisible by 2. For example, given {4,+,8} it returns 2.
+ /// If S is guaranteed to be 0, it returns the bitwidth of S.
+ uint32_t GetMinTrailingZeros(const SCEV *S);
- private:
- /// Increments the version number of the predicate. This needs to be called
- /// every time the SCEV predicate changes.
- void updateGeneration();
+ /// Determine the unsigned range for a particular SCEV.
+ ///
+ ConstantRange getUnsignedRange(const SCEV *S) {
+ return getRange(S, HINT_RANGE_UNSIGNED);
+ }
- /// Holds a SCEV and the version number of the SCEV predicate used to
- /// perform the rewrite of the expression.
- typedef std::pair<unsigned, const SCEV *> RewriteEntry;
+ /// Determine the signed range for a particular SCEV.
+ ///
+ ConstantRange getSignedRange(const SCEV *S) {
+ return getRange(S, HINT_RANGE_SIGNED);
+ }
- /// Maps a SCEV to the rewrite result of that SCEV at a certain version
- /// number. If this number doesn't match the current Generation, we will
- /// need to do a rewrite. To preserve the transformation order of previous
- /// rewrites, we will rewrite the previous result instead of the original
- /// SCEV.
- DenseMap<const SCEV *, RewriteEntry> RewriteMap;
+ /// Test if the given expression is known to be negative.
+ ///
+ bool isKnownNegative(const SCEV *S);
- /// Records what NoWrap flags we've added to a Value *.
- ValueMap<Value *, SCEVWrapPredicate::IncrementWrapFlags> FlagsMap;
+ /// Test if the given expression is known to be positive.
+ ///
+ bool isKnownPositive(const SCEV *S);
- /// The ScalarEvolution analysis.
- ScalarEvolution &SE;
+ /// Test if the given expression is known to be non-negative.
+ ///
+ bool isKnownNonNegative(const SCEV *S);
- /// The analyzed Loop.
- const Loop &L;
+ /// Test if the given expression is known to be non-positive.
+ ///
+ bool isKnownNonPositive(const SCEV *S);
- /// The SCEVPredicate that forms our context. We will rewrite all
- /// expressions assuming that this predicate true.
- SCEVUnionPredicate Preds;
+ /// Test if the given expression is known to be non-zero.
+ ///
+ bool isKnownNonZero(const SCEV *S);
- /// Marks the version of the SCEV predicate used. When rewriting a SCEV
- /// expression we mark it with the version of the predicate. We use this to
- /// figure out if the predicate has changed from the last rewrite of the
- /// SCEV. If so, we need to perform a new rewrite.
- unsigned Generation;
+ /// Test if the given expression is known to satisfy the condition described
+ /// by Pred, LHS, and RHS.
+ ///
+ bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS);
+
+ /// Return true if the result of the predicate LHS `Pred` RHS is loop
+ /// invariant with respect to L. Set InvariantPred, InvariantLHS and
+ /// InvariantLHS so that InvariantLHS `InvariantPred` InvariantRHS is the
+ /// loop invariant form of LHS `Pred` RHS.
+ bool isLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const Loop *L,
+ ICmpInst::Predicate &InvariantPred,
+ const SCEV *&InvariantLHS,
+ const SCEV *&InvariantRHS);
+
+ /// Simplify LHS and RHS in a comparison with predicate Pred. Return true
+ /// iff any changes were made. If the operands are provably equal or
+ /// unequal, LHS and RHS are set to the same value and Pred is set to either
+ /// ICMP_EQ or ICMP_NE.
+ ///
+ bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS,
+ const SCEV *&RHS, unsigned Depth = 0);
+
+ /// Return the "disposition" of the given SCEV with respect to the given
+ /// loop.
+ LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
+
+ /// Return true if the value of the given SCEV is unchanging in the
+ /// specified loop.
+ bool isLoopInvariant(const SCEV *S, const Loop *L);
+
+ /// Return true if the given SCEV changes value in a known way in the
+ /// specified loop. This property being true implies that the value is
+ /// variant in the loop AND that we can emit an expression to compute the
+ /// value of the expression at any particular loop iteration.
+ bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
+
+ /// Return the "disposition" of the given SCEV with respect to the given
+ /// block.
+ BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+ /// Return true if elements that makes up the given SCEV dominate the
+ /// specified basic block.
+ bool dominates(const SCEV *S, const BasicBlock *BB);
+
+ /// Return true if elements that makes up the given SCEV properly dominate
+ /// the specified basic block.
+ bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+
+ /// Test whether the given SCEV has Op as a direct or indirect operand.
+ bool hasOperand(const SCEV *S, const SCEV *Op) const;
+
+ /// Return the size of an element read or written by Inst.
+ const SCEV *getElementSize(Instruction *Inst);
+
+ /// Compute the array dimensions Sizes from the set of Terms extracted from
+ /// the memory access function of this SCEVAddRecExpr (second step of
+ /// delinearization).
+ void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) const;
+
+ void print(raw_ostream &OS) const;
+ void verify() const;
+
+ /// Collect parametric terms occurring in step expressions (first step of
+ /// delinearization).
+ void collectParametricTerms(const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Terms);
+
+ /// Return in Subscripts the access functions for each dimension in Sizes
+ /// (third step of delinearization).
+ void computeAccessFunctions(const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes);
+
+ /// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
+ /// subscripts and sizes of an array access.
+ ///
+ /// The delinearization is a 3 step process: the first two steps compute the
+ /// sizes of each subscript and the third step computes the access functions
+ /// for the delinearized array:
+ ///
+ /// 1. Find the terms in the step functions
+ /// 2. Compute the array size
+ /// 3. Compute the access function: divide the SCEV by the array size
+ /// starting with the innermost dimensions found in step 2. The Quotient
+ /// is the SCEV to be divided in the next step of the recursion. The
+ /// Remainder is the subscript of the innermost dimension. Loop over all
+ /// array dimensions computed in step 2.
+ ///
+ /// To compute a uniform array size for several memory accesses to the same
+ /// object, one can collect in step 1 all the step terms for all the memory
+ /// accesses, and compute in step 2 a unique array shape. This guarantees
+ /// that the array shape will be the same across all memory accesses.
+ ///
+ /// FIXME: We could derive the result of steps 1 and 2 from a description of
+ /// the array shape given in metadata.
+ ///
+ /// Example:
+ ///
+ /// A[][n][m]
+ ///
+ /// for i
+ /// for j
+ /// for k
+ /// A[j+k][2i][5i] =
+ ///
+ /// The initial SCEV:
+ ///
+ /// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
+ ///
+ /// 1. Find the different terms in the step functions:
+ /// -> [2*m, 5, n*m, n*m]
+ ///
+ /// 2. Compute the array size: sort and unique them
+ /// -> [n*m, 2*m, 5]
+ /// find the GCD of all the terms = 1
+ /// divide by the GCD and erase constant terms
+ /// -> [n*m, 2*m]
+ /// GCD = m
+ /// divide by GCD -> [n, 2]
+ /// remove constant terms
+ /// -> [n]
+ /// size of the array is A[unknown][n][m]
+ ///
+ /// 3. Compute the access function
+ /// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
+ /// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
+ /// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
+ /// The remainder is the subscript of the innermost array dimension: [5i].
+ ///
+ /// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
+ /// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
+ /// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
+ /// The Remainder is the subscript of the next array dimension: [2i].
+ ///
+ /// The subscript of the outermost dimension is the Quotient: [j+k].
+ ///
+ /// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
+ void delinearize(const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize);
+
+ /// Return the DataLayout associated with the module this SCEV instance is
+ /// operating on.
+ const DataLayout &getDataLayout() const {
+ return F.getParent()->getDataLayout();
+ }
- /// The backedge taken count.
- const SCEV *BackedgeCount;
- };
+ const SCEVPredicate *getEqualPredicate(const SCEVUnknown *LHS,
+ const SCEVConstant *RHS);
+
+ const SCEVPredicate *
+ getWrapPredicate(const SCEVAddRecExpr *AR,
+ SCEVWrapPredicate::IncrementWrapFlags AddedFlags);
+
+ /// Re-writes the SCEV according to the Predicates in \p A.
+ const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
+ SCEVUnionPredicate &A);
+ /// Tries to convert the \p S expression to an AddRec expression,
+ /// adding additional predicates to \p Preds as required.
+ const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
+ const SCEV *S, const Loop *L,
+ SmallPtrSetImpl<const SCEVPredicate *> &Preds);
+
+private:
+ /// Compute the backedge taken count knowing the interval difference, the
+ /// stride and presence of the equality in the comparison.
+ const SCEV *computeBECount(const SCEV *Delta, const SCEV *Stride,
+ bool Equality);
+
+ /// Verify if an linear IV with positive stride can overflow when in a
+ /// less-than comparison, knowing the invariant term of the comparison,
+ /// the stride and the knowledge of NSW/NUW flags on the recurrence.
+ bool doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
+ bool NoWrap);
+
+ /// Verify if an linear IV with negative stride can overflow when in a
+ /// greater-than comparison, knowing the invariant term of the comparison,
+ /// the stride and the knowledge of NSW/NUW flags on the recurrence.
+ bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
+ bool NoWrap);
+
+private:
+ FoldingSet<SCEV> UniqueSCEVs;
+ FoldingSet<SCEVPredicate> UniquePreds;
+ BumpPtrAllocator SCEVAllocator;
+
+ /// The head of a linked list of all SCEVUnknown values that have been
+ /// allocated. This is used by releaseMemory to locate them all and call
+ /// their destructors.
+ SCEVUnknown *FirstUnknown;
+};
+
+/// Analysis pass that exposes the \c ScalarEvolution for a function.
+class ScalarEvolutionAnalysis
+ : public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
+ friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ typedef ScalarEvolution Result;
+
+ ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Printer pass for the \c ScalarEvolutionAnalysis results.
+class ScalarEvolutionPrinterPass
+ : public PassInfoMixin<ScalarEvolutionPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class ScalarEvolutionWrapperPass : public FunctionPass {
+ std::unique_ptr<ScalarEvolution> SE;
+
+public:
+ static char ID;
+
+ ScalarEvolutionWrapperPass();
+
+ ScalarEvolution &getSE() { return *SE; }
+ const ScalarEvolution &getSE() const { return *SE; }
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void print(raw_ostream &OS, const Module * = nullptr) const override;
+ void verifyAnalysis() const override;
+};
+
+/// An interface layer with SCEV used to manage how we see SCEV expressions
+/// for values in the context of existing predicates. We can add new
+/// predicates, but we cannot remove them.
+///
+/// This layer has multiple purposes:
+/// - provides a simple interface for SCEV versioning.
+/// - guarantees that the order of transformations applied on a SCEV
+/// expression for a single Value is consistent across two different
+/// getSCEV calls. This means that, for example, once we've obtained
+/// an AddRec expression for a certain value through expression
+/// rewriting, we will continue to get an AddRec expression for that
+/// Value.
+/// - lowers the number of expression rewrites.
+class PredicatedScalarEvolution {
+public:
+ PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
+ const SCEVUnionPredicate &getUnionPredicate() const;
+
+ /// Returns the SCEV expression of V, in the context of the current SCEV
+ /// predicate. The order of transformations applied on the expression of V
+ /// returned by ScalarEvolution is guaranteed to be preserved, even when
+ /// adding new predicates.
+ const SCEV *getSCEV(Value *V);
+
+ /// Get the (predicated) backedge count for the analyzed loop.
+ const SCEV *getBackedgeTakenCount();
+
+ /// Adds a new predicate.
+ void addPredicate(const SCEVPredicate &Pred);
+
+ /// Attempts to produce an AddRecExpr for V by adding additional SCEV
+ /// predicates. If we can't transform the expression into an AddRecExpr we
+ /// return nullptr and not add additional SCEV predicates to the current
+ /// context.
+ const SCEVAddRecExpr *getAsAddRec(Value *V);
+
+ /// Proves that V doesn't overflow by adding SCEV predicate.
+ void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+
+ /// Returns true if we've proved that V doesn't wrap by means of a SCEV
+ /// predicate.
+ bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+
+ /// Returns the ScalarEvolution analysis used.
+ ScalarEvolution *getSE() const { return &SE; }
+
+ /// We need to explicitly define the copy constructor because of FlagsMap.
+ PredicatedScalarEvolution(const PredicatedScalarEvolution &);
+
+ /// Print the SCEV mappings done by the Predicated Scalar Evolution.
+ /// The printed text is indented by \p Depth.
+ void print(raw_ostream &OS, unsigned Depth) const;
+
+private:
+ /// Increments the version number of the predicate. This needs to be called
+ /// every time the SCEV predicate changes.
+ void updateGeneration();
+
+ /// Holds a SCEV and the version number of the SCEV predicate used to
+ /// perform the rewrite of the expression.
+ typedef std::pair<unsigned, const SCEV *> RewriteEntry;
+
+ /// Maps a SCEV to the rewrite result of that SCEV at a certain version
+ /// number. If this number doesn't match the current Generation, we will
+ /// need to do a rewrite. To preserve the transformation order of previous
+ /// rewrites, we will rewrite the previous result instead of the original
+ /// SCEV.
+ DenseMap<const SCEV *, RewriteEntry> RewriteMap;
+
+ /// Records what NoWrap flags we've added to a Value *.
+ ValueMap<Value *, SCEVWrapPredicate::IncrementWrapFlags> FlagsMap;
+
+ /// The ScalarEvolution analysis.
+ ScalarEvolution &SE;
+
+ /// The analyzed Loop.
+ const Loop &L;
+
+ /// The SCEVPredicate that forms our context. We will rewrite all
+ /// expressions assuming that this predicate true.
+ SCEVUnionPredicate Preds;
+
+ /// Marks the version of the SCEV predicate used. When rewriting a SCEV
+ /// expression we mark it with the version of the predicate. We use this to
+ /// figure out if the predicate has changed from the last rewrite of the
+ /// SCEV. If so, we need to perform a new rewrite.
+ unsigned Generation;
+
+ /// The backedge taken count.
+ const SCEV *BackedgeCount;
+};
}
#endif
diff --git a/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h b/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
index ac10370b4131..329be51e5eac 100644
--- a/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
+++ b/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
@@ -40,12 +40,12 @@ private:
/// Analysis pass providing a never-invalidated alias analysis result.
class SCEVAA : public AnalysisInfoMixin<SCEVAA> {
friend AnalysisInfoMixin<SCEVAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef SCEVAAResult Result;
- SCEVAAResult run(Function &F, AnalysisManager<Function> &AM);
+ SCEVAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the SCEVAAResult object.
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index 1acf952ab70c..517592a3d049 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -14,12 +14,14 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/ValueHandle.h"
-#include <set>
namespace llvm {
class TargetTransformInfo;
@@ -28,8 +30,8 @@ namespace llvm {
/// all materialized values are safe to speculate.
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE);
- /// This class uses information about analyze scalars to
- /// rewrite expressions in canonical form.
+ /// This class uses information about analyze scalars to rewrite expressions
+ /// in canonical form.
///
/// Clients should create an instance of this class when rewriting is needed,
/// and destroy it when finished to allow the release of the associated
@@ -42,42 +44,41 @@ namespace llvm {
const char* IVName;
// InsertedExpressions caches Values for reuse, so must track RAUW.
- std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >
- InsertedExpressions;
+ DenseMap<std::pair<const SCEV *, Instruction *>, TrackingVH<Value>>
+ InsertedExpressions;
+
// InsertedValues only flags inserted instructions so needs no RAUW.
- std::set<AssertingVH<Value> > InsertedValues;
- std::set<AssertingVH<Value> > InsertedPostIncValues;
+ DenseSet<AssertingVH<Value>> InsertedValues;
+ DenseSet<AssertingVH<Value>> InsertedPostIncValues;
/// A memoization of the "relevant" loop for a given SCEV.
DenseMap<const SCEV *, const Loop *> RelevantLoops;
- /// \brief Addrecs referring to any of the given loops are expanded
- /// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
- /// returns the add instruction that adds one to the phi for {0,+,1}<L>,
- /// as opposed to a new phi starting at 1. This is only supported in
- /// non-canonical mode.
+ /// Addrecs referring to any of the given loops are expanded in post-inc
+ /// mode. For example, expanding {1,+,1}<L> in post-inc mode returns the add
+ /// instruction that adds one to the phi for {0,+,1}<L>, as opposed to a new
+ /// phi starting at 1. This is only supported in non-canonical mode.
PostIncLoopSet PostIncLoops;
- /// \brief When this is non-null, addrecs expanded in the loop it indicates
- /// should be inserted with increments at IVIncInsertPos.
+ /// When this is non-null, addrecs expanded in the loop it indicates should
+ /// be inserted with increments at IVIncInsertPos.
const Loop *IVIncInsertLoop;
- /// \brief When expanding addrecs in the IVIncInsertLoop loop, insert the IV
+ /// When expanding addrecs in the IVIncInsertLoop loop, insert the IV
/// increment at this position.
Instruction *IVIncInsertPos;
- /// \brief Phis that complete an IV chain. Reuse
- std::set<AssertingVH<PHINode> > ChainedPhis;
+ /// Phis that complete an IV chain. Reuse
+ DenseSet<AssertingVH<PHINode>> ChainedPhis;
- /// \brief When true, expressions are expanded in "canonical" form. In
- /// particular, addrecs are expanded as arithmetic based on a canonical
- /// induction variable. When false, expression are expanded in a more
- /// literal form.
+ /// When true, expressions are expanded in "canonical" form. In particular,
+ /// addrecs are expanded as arithmetic based on a canonical induction
+ /// variable. When false, expression are expanded in a more literal form.
bool CanonicalMode;
- /// \brief When invoked from LSR, the expander is in "strength reduction"
- /// mode. The only difference is that phi's are only reused if they are
- /// already in "expanded" form.
+ /// When invoked from LSR, the expander is in "strength reduction" mode. The
+ /// only difference is that phi's are only reused if they are already in
+ /// "expanded" form.
bool LSRMode;
typedef IRBuilder<TargetFolder> BuilderType;
@@ -130,7 +131,7 @@ namespace llvm {
friend struct SCEVVisitor<SCEVExpander, Value*>;
public:
- /// \brief Construct a SCEVExpander in "canonical" mode.
+ /// Construct a SCEVExpander in "canonical" mode.
explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
const char *name)
: SE(se), DL(DL), IVName(name), IVIncInsertLoop(nullptr),
@@ -150,9 +151,9 @@ namespace llvm {
void setDebugType(const char* s) { DebugType = s; }
#endif
- /// \brief Erase the contents of the InsertedExpressions map so that users
- /// trying to expand the same expression into multiple BasicBlocks or
- /// different places within the same BasicBlock can do so.
+ /// Erase the contents of the InsertedExpressions map so that users trying
+ /// to expand the same expression into multiple BasicBlocks or different
+ /// places within the same BasicBlock can do so.
void clear() {
InsertedExpressions.clear();
InsertedValues.clear();
@@ -160,8 +161,8 @@ namespace llvm {
ChainedPhis.clear();
}
- /// \brief Return true for expressions that may incur non-trivial cost to
- /// evaluate at runtime.
+ /// Return true for expressions that may incur non-trivial cost to evaluate
+ /// at runtime.
///
/// At is an optional parameter which specifies point in code where user is
/// going to expand this expression. Sometimes this knowledge can lead to a
@@ -172,63 +173,60 @@ namespace llvm {
return isHighCostExpansionHelper(Expr, L, At, Processed);
}
- /// \brief This method returns the canonical induction variable of the
- /// specified type for the specified loop (inserting one if there is none).
- /// A canonical induction variable starts at zero and steps by one on each
+ /// This method returns the canonical induction variable of the specified
+ /// type for the specified loop (inserting one if there is none). A
+ /// canonical induction variable starts at zero and steps by one on each
/// iteration.
PHINode *getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty);
- /// \brief Return the induction variable increment's IV operand.
+ /// Return the induction variable increment's IV operand.
Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
bool allowScale);
- /// \brief Utility for hoisting an IV increment.
+ /// Utility for hoisting an IV increment.
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
- /// \brief replace congruent phis with their most canonical
- /// representative. Return the number of phis eliminated.
+ /// replace congruent phis with their most canonical representative. Return
+ /// the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakVH> &DeadInsts,
const TargetTransformInfo *TTI = nullptr);
- /// \brief Insert code to directly compute the specified SCEV expression
- /// into the program. The inserted code is inserted into the specified
- /// block.
+ /// Insert code to directly compute the specified SCEV expression into the
+ /// program. The inserted code is inserted into the specified block.
Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I);
- /// \brief Insert code to directly compute the specified SCEV expression
- /// into the program. The inserted code is inserted into the SCEVExpander's
- /// current insertion point. If a type is specified, the result will be
- /// expanded to have that type, with a cast if necessary.
+ /// Insert code to directly compute the specified SCEV expression into the
+ /// program. The inserted code is inserted into the SCEVExpander's current
+ /// insertion point. If a type is specified, the result will be expanded to
+ /// have that type, with a cast if necessary.
Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
- /// \brief Generates a code sequence that evaluates this predicate.
- /// The inserted instructions will be at position \p Loc.
- /// The result will be of type i1 and will have a value of 0 when the
- /// predicate is false and 1 otherwise.
+ /// Generates a code sequence that evaluates this predicate. The inserted
+ /// instructions will be at position \p Loc. The result will be of type i1
+ /// and will have a value of 0 when the predicate is false and 1 otherwise.
Value *expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc);
- /// \brief A specialized variant of expandCodeForPredicate, handling the
- /// case when we are expanding code for a SCEVEqualPredicate.
+ /// A specialized variant of expandCodeForPredicate, handling the case when
+ /// we are expanding code for a SCEVEqualPredicate.
Value *expandEqualPredicate(const SCEVEqualPredicate *Pred,
Instruction *Loc);
- /// \brief Generates code that evaluates if the \p AR expression will
- /// overflow.
+ /// Generates code that evaluates if the \p AR expression will overflow.
Value *generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc,
bool Signed);
- /// \brief A specialized variant of expandCodeForPredicate, handling the
- /// case when we are expanding code for a SCEVWrapPredicate.
+ /// A specialized variant of expandCodeForPredicate, handling the case when
+ /// we are expanding code for a SCEVWrapPredicate.
Value *expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc);
- /// \brief A specialized variant of expandCodeForPredicate, handling the
- /// case when we are expanding code for a SCEVUnionPredicate.
+ /// A specialized variant of expandCodeForPredicate, handling the case when
+ /// we are expanding code for a SCEVUnionPredicate.
Value *expandUnionPredicate(const SCEVUnionPredicate *Pred,
Instruction *Loc);
- /// \brief Set the current IV increment loop and position.
+ /// Set the current IV increment loop and position.
void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
assert(!CanonicalMode &&
"IV increment positions are not supported in CanonicalMode");
@@ -236,7 +234,7 @@ namespace llvm {
IVIncInsertPos = Pos;
}
- /// \brief Enable post-inc expansion for addrecs referring to the given
+ /// Enable post-inc expansion for addrecs referring to the given
/// loops. Post-inc expansion is only supported in non-canonical mode.
void setPostInc(const PostIncLoopSet &L) {
assert(!CanonicalMode &&
@@ -244,7 +242,7 @@ namespace llvm {
PostIncLoops = L;
}
- /// \brief Disable all post-inc expansion.
+ /// Disable all post-inc expansion.
void clearPostInc() {
PostIncLoops.clear();
@@ -253,30 +251,29 @@ namespace llvm {
InsertedPostIncValues.clear();
}
- /// \brief Disable the behavior of expanding expressions in canonical form
- /// rather than in a more literal form. Non-canonical mode is useful for
- /// late optimization passes.
+ /// Disable the behavior of expanding expressions in canonical form rather
+ /// than in a more literal form. Non-canonical mode is useful for late
+ /// optimization passes.
void disableCanonicalMode() { CanonicalMode = false; }
void enableLSRMode() { LSRMode = true; }
- /// \brief Set the current insertion point. This is useful if multiple calls
- /// to expandCodeFor() are going to be made with the same insert point and
- /// the insert point may be moved during one of the expansions (e.g. if the
+ /// Set the current insertion point. This is useful if multiple calls to
+ /// expandCodeFor() are going to be made with the same insert point and the
+ /// insert point may be moved during one of the expansions (e.g. if the
/// insert point is not a block terminator).
void setInsertPoint(Instruction *IP) {
assert(IP);
Builder.SetInsertPoint(IP);
}
- /// \brief Clear the current insertion point. This is useful if the
- /// instruction that had been serving as the insertion point may have been
- /// deleted.
+ /// Clear the current insertion point. This is useful if the instruction
+ /// that had been serving as the insertion point may have been deleted.
void clearInsertPoint() {
Builder.ClearInsertionPoint();
}
- /// \brief Return true if the specified instruction was inserted by the code
+ /// Return true if the specified instruction was inserted by the code
/// rewriter. If so, the client should not modify the instruction.
bool isInsertedInstruction(Instruction *I) const {
return InsertedValues.count(I) || InsertedPostIncValues.count(I);
@@ -284,7 +281,14 @@ namespace llvm {
void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
- /// \brief Try to find LLVM IR value for S available at the point At.
+ /// Try to find existing LLVM IR value for S available at the point At.
+ Value *getExactExistingExpansion(const SCEV *S, const Instruction *At,
+ Loop *L);
+
+ /// Try to find the ValueOffsetPair for S. The function is mainly used to
+ /// check whether S can be expanded cheaply. If this returns a non-None
+ /// value, we know we can codegen the `ValueOffsetPair` into a suitable
+ /// expansion identical with S so that S can be expanded cheaply.
///
/// L is a hint which tells in which loop to look for the suitable value.
/// On success return value which is equivalent to the expanded S at point
@@ -292,44 +296,46 @@ namespace llvm {
///
/// Note that this function does not perform an exhaustive search. I.e if it
/// didn't find any value it does not mean that there is no such value.
- Value *findExistingExpansion(const SCEV *S, const Instruction *At, Loop *L);
+ ///
+ Optional<ScalarEvolution::ValueOffsetPair>
+ getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L);
private:
LLVMContext &getContext() const { return SE.getContext(); }
- /// \brief Recursive helper function for isHighCostExpansion.
+ /// Recursive helper function for isHighCostExpansion.
bool isHighCostExpansionHelper(const SCEV *S, Loop *L,
const Instruction *At,
SmallPtrSetImpl<const SCEV *> &Processed);
- /// \brief Insert the specified binary operator, doing a small amount
- /// of work to avoid inserting an obviously redundant operation.
+ /// Insert the specified binary operator, doing a small amount of work to
+ /// avoid inserting an obviously redundant operation.
Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
- /// \brief Arrange for there to be a cast of V to Ty at IP, reusing an
- /// existing cast if a suitable one exists, moving an existing cast if a
- /// suitable one exists but isn't in the right place, or or creating a new
- /// one.
+ /// Arrange for there to be a cast of V to Ty at IP, reusing an existing
+ /// cast if a suitable one exists, moving an existing cast if a suitable one
+ /// exists but isn't in the right place, or or creating a new one.
Value *ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP);
- /// \brief Insert a cast of V to the specified type, which must be possible
- /// with a noop cast, doing what we can to share the casts.
+ /// Insert a cast of V to the specified type, which must be possible with a
+ /// noop cast, doing what we can to share the casts.
Value *InsertNoopCastOfTo(Value *V, Type *Ty);
- /// \brief Expand a SCEVAddExpr with a pointer type into a GEP
- /// instead of using ptrtoint+arithmetic+inttoptr.
+ /// Expand a SCEVAddExpr with a pointer type into a GEP instead of using
+ /// ptrtoint+arithmetic+inttoptr.
Value *expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
PointerType *PTy, Type *Ty, Value *V);
- /// \brief Find a previous Value in ExprValueMap for expand.
- Value *FindValueInExprValueMap(const SCEV *S, const Instruction *InsertPt);
+ /// Find a previous Value in ExprValueMap for expand.
+ ScalarEvolution::ValueOffsetPair
+ FindValueInExprValueMap(const SCEV *S, const Instruction *InsertPt);
Value *expand(const SCEV *S);
- /// \brief Determine the most "relevant" loop for the given SCEV.
+ /// Determine the most "relevant" loop for the given SCEV.
const Loop *getRelevantLoop(const SCEV *);
Value *visitConstant(const SCEVConstant *S) {
diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h
index ff24cafbe680..fdcd8be00dde 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -537,14 +537,58 @@ namespace llvm {
T.visitAll(Root);
}
- /// Recursively visits a SCEV expression and re-writes it.
+ /// Return true if any node in \p Root satisfies the predicate \p Pred.
+ template <typename PredTy>
+ bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
+ struct FindClosure {
+ bool Found = false;
+ PredTy Pred;
+
+ FindClosure(PredTy Pred) : Pred(Pred) {}
+
+ bool follow(const SCEV *S) {
+ if (!Pred(S))
+ return true;
+
+ Found = true;
+ return false;
+ }
+
+ bool isDone() const { return Found; }
+ };
+
+ FindClosure FC(Pred);
+ visitAll(Root, FC);
+ return FC.Found;
+ }
+
+ /// This visitor recursively visits a SCEV expression and re-writes it.
+ /// The result from each visit is cached, so it will return the same
+ /// SCEV for the same input.
template<typename SC>
class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
protected:
ScalarEvolution &SE;
+ // Memoize the result of each visit so that we only compute once for
+ // the same input SCEV. This is to avoid redundant computations when
+ // a SCEV is referenced by multiple SCEVs. Without memoization, this
+ // visit algorithm would have exponential time complexity in the worst
+ // case, causing the compiler to hang on certain tests.
+ DenseMap<const SCEV *, const SCEV *> RewriteResults;
+
public:
SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}
+ const SCEV *visit(const SCEV *S) {
+ auto It = RewriteResults.find(S);
+ if (It != RewriteResults.end())
+ return It->second;
+ auto* Visited = SCEVVisitor<SC, const SCEV *>::visit(S);
+ auto Result = RewriteResults.try_emplace(S, Visited);
+ assert(Result.second && "Should insert a new entry");
+ return Result.first->second;
+ }
+
const SCEV *visitConstant(const SCEVConstant *Constant) {
return Constant;
}
diff --git a/include/llvm/Analysis/ScopedNoAliasAA.h b/include/llvm/Analysis/ScopedNoAliasAA.h
index 87b85d4e6635..a7b57310d2d0 100644
--- a/include/llvm/Analysis/ScopedNoAliasAA.h
+++ b/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -27,14 +27,13 @@ class ScopedNoAliasAAResult : public AAResultBase<ScopedNoAliasAAResult> {
friend AAResultBase<ScopedNoAliasAAResult>;
public:
- explicit ScopedNoAliasAAResult() : AAResultBase() {}
- ScopedNoAliasAAResult(ScopedNoAliasAAResult &&Arg)
- : AAResultBase(std::move(Arg)) {}
-
/// Handle invalidation events from the new pass manager.
///
/// By definition, this result is stateless and so remains valid.
- bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
+ return false;
+ }
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
@@ -42,19 +41,17 @@ public:
private:
bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
- void collectMDInDomain(const MDNode *List, const MDNode *Domain,
- SmallPtrSetImpl<const MDNode *> &Nodes) const;
};
/// Analysis pass providing a never-invalidated alias analysis result.
class ScopedNoAliasAA : public AnalysisInfoMixin<ScopedNoAliasAA> {
friend AnalysisInfoMixin<ScopedNoAliasAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef ScopedNoAliasAAResult Result;
- ScopedNoAliasAAResult run(Function &F, AnalysisManager<Function> &AM);
+ ScopedNoAliasAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the ScopedNoAliasAAResult object.
diff --git a/include/llvm/Analysis/TargetFolder.h b/include/llvm/Analysis/TargetFolder.h
index 12bf9fe78a47..ae75d3773362 100644
--- a/include/llvm/Analysis/TargetFolder.h
+++ b/include/llvm/Analysis/TargetFolder.h
@@ -34,9 +34,8 @@ class TargetFolder {
/// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *CF = ConstantFoldConstantExpression(CE, DL))
- return CF;
+ if (Constant *CF = ConstantFoldConstant(C, DL))
+ return CF;
return C;
}
diff --git a/include/llvm/Analysis/TargetLibraryInfo.def b/include/llvm/Analysis/TargetLibraryInfo.def
index b2a593d67dca..5d5e5b127e63 100644
--- a/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/include/llvm/Analysis/TargetLibraryInfo.def
@@ -734,6 +734,9 @@ TLI_DEFINE_STRING_INTERNAL("memcpy")
/// void *memmove(void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memmove)
TLI_DEFINE_STRING_INTERNAL("memmove")
+/// void *mempcpy(void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(mempcpy)
+TLI_DEFINE_STRING_INTERNAL("mempcpy")
// void *memrchr(const void *s, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memrchr)
TLI_DEFINE_STRING_INTERNAL("memrchr")
diff --git a/include/llvm/Analysis/TargetLibraryInfo.h b/include/llvm/Analysis/TargetLibraryInfo.h
index 7efa6f059707..196fbc7faa8d 100644
--- a/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/include/llvm/Analysis/TargetLibraryInfo.h
@@ -25,8 +25,8 @@ template <typename T> class ArrayRef;
/// Function 'VectorFnName' is equivalent to 'ScalarFnName' vectorized
/// by a factor 'VectorizationFactor'.
struct VecDesc {
- const char *ScalarFnName;
- const char *VectorFnName;
+ StringRef ScalarFnName;
+ StringRef VectorFnName;
unsigned VectorizationFactor;
};
@@ -50,7 +50,8 @@ class TargetLibraryInfoImpl {
unsigned char AvailableArray[(LibFunc::NumLibFuncs+3)/4];
llvm::DenseMap<unsigned, std::string> CustomNames;
- static const char *const StandardNames[LibFunc::NumLibFuncs];
+ static StringRef const StandardNames[LibFunc::NumLibFuncs];
+ bool ShouldExtI32Param, ShouldExtI32Return, ShouldSignExtI32Param;
enum AvailabilityState {
StandardName = 3, // (memset to all ones)
@@ -85,8 +86,9 @@ public:
/// addVectorizableFunctionsFromVecLib for filling up the tables of
/// vectorizable functions.
enum VectorLibrary {
- NoLibrary, // Don't use any vector library.
- Accelerate // Use Accelerate framework.
+ NoLibrary, // Don't use any vector library.
+ Accelerate, // Use Accelerate framework.
+ SVML // Intel short vector math library.
};
TargetLibraryInfoImpl();
@@ -171,6 +173,26 @@ public:
///
/// Set VF to the vectorization factor.
StringRef getScalarizedFunction(StringRef F, unsigned &VF) const;
+
+ /// Set to true iff i32 parameters to library functions should have signext
+ /// or zeroext attributes if they correspond to C-level int or unsigned int,
+ /// respectively.
+ void setShouldExtI32Param(bool Val) {
+ ShouldExtI32Param = Val;
+ }
+
+ /// Set to true iff i32 results from library functions should have signext
+ /// or zeroext attributes if they correspond to C-level int or unsigned int,
+ /// respectively.
+ void setShouldExtI32Return(bool Val) {
+ ShouldExtI32Return = Val;
+ }
+
+ /// Set to true iff i32 parameters to library functions should have signext
+ /// attribute if they correspond to C-level int or unsigned int.
+ void setShouldSignExtI32Param(bool Val) {
+ ShouldSignExtI32Param = Val;
+ }
};
/// Provides information about what library functions are available for
@@ -251,7 +273,7 @@ public:
case LibFunc::exp2: case LibFunc::exp2f: case LibFunc::exp2l:
case LibFunc::memcmp: case LibFunc::strcmp: case LibFunc::strcpy:
case LibFunc::stpcpy: case LibFunc::strlen: case LibFunc::strnlen:
- case LibFunc::memchr:
+ case LibFunc::memchr: case LibFunc::mempcpy:
return true;
}
return false;
@@ -267,11 +289,38 @@ public:
return Impl->CustomNames.find(F)->second;
}
+ /// Returns extension attribute kind to be used for i32 parameters
+ /// correpsonding to C-level int or unsigned int. May be zeroext, signext,
+ /// or none.
+ Attribute::AttrKind getExtAttrForI32Param(bool Signed = true) const {
+ if (Impl->ShouldExtI32Param)
+ return Signed ? Attribute::SExt : Attribute::ZExt;
+ if (Impl->ShouldSignExtI32Param)
+ return Attribute::SExt;
+ return Attribute::None;
+ }
+
+ /// Returns extension attribute kind to be used for i32 return values
+ /// correpsonding to C-level int or unsigned int. May be zeroext, signext,
+ /// or none.
+ Attribute::AttrKind getExtAttrForI32Return(bool Signed = true) const {
+ if (Impl->ShouldExtI32Return)
+ return Signed ? Attribute::SExt : Attribute::ZExt;
+ return Attribute::None;
+ }
+
/// Handle invalidation from the pass manager.
///
/// If we try to invalidate this info, just return false. It cannot become
- /// invalid even if the module changes.
- bool invalidate(Module &, const PreservedAnalyses &) { return false; }
+ /// invalid even if the module or function changes.
+ bool invalidate(Module &, const PreservedAnalyses &,
+ ModuleAnalysisManager::Invalidator &) {
+ return false;
+ }
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
+ return false;
+ }
};
/// Analysis pass providing the \c TargetLibraryInfo.
@@ -295,21 +344,12 @@ public:
TargetLibraryAnalysis(TargetLibraryInfoImpl PresetInfoImpl)
: PresetInfoImpl(std::move(PresetInfoImpl)) {}
- // Move semantics. We spell out the constructors for MSVC.
- TargetLibraryAnalysis(TargetLibraryAnalysis &&Arg)
- : PresetInfoImpl(std::move(Arg.PresetInfoImpl)), Impls(std::move(Arg.Impls)) {}
- TargetLibraryAnalysis &operator=(TargetLibraryAnalysis &&RHS) {
- PresetInfoImpl = std::move(RHS.PresetInfoImpl);
- Impls = std::move(RHS.Impls);
- return *this;
- }
-
TargetLibraryInfo run(Module &M, ModuleAnalysisManager &);
TargetLibraryInfo run(Function &F, FunctionAnalysisManager &);
private:
friend AnalysisInfoMixin<TargetLibraryAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
Optional<TargetLibraryInfoImpl> PresetInfoImpl;
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 7570d22a803c..d583614284ff 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -87,7 +87,8 @@ public:
/// When used as a result of \c TargetIRAnalysis this method will be called
/// when the function this was computed for changes. When it returns false,
/// the information is preserved across those changes.
- bool invalidate(Function &, const PreservedAnalyses &) {
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
// FIXME: We should probably in some way ensure that the subtarget
// information for a function hasn't changed.
return false;
@@ -242,13 +243,17 @@ public:
/// profitable. Set this to UINT_MAX to disable the loop body cost
/// restriction.
unsigned Threshold;
- /// If complete unrolling will reduce the cost of the loop below its
- /// expected dynamic cost while rolled by this percentage, apply a discount
- /// (below) to its unrolled cost.
- unsigned PercentDynamicCostSavedThreshold;
- /// The discount applied to the unrolled cost when the *dynamic* cost
- /// savings of unrolling exceed the \c PercentDynamicCostSavedThreshold.
- unsigned DynamicCostSavingsDiscount;
+ /// If complete unrolling will reduce the cost of the loop, we will boost
+ /// the Threshold by a certain percent to allow more aggressive complete
+ /// unrolling. This value provides the maximum boost percentage that we
+ /// can apply to Threshold (The value should be no less than 100).
+ /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
+ /// MaxPercentThresholdBoost / 100)
+ /// E.g. if complete unrolling reduces the loop execution time by 50%
+ /// then we boost the threshold by the factor of 2x. If unrolling is not
+ /// expected to reduce the running time, then we do not increase the
+ /// threshold.
+ unsigned MaxPercentThresholdBoost;
/// The cost threshold for the unrolled loop when optimizing for size (set
/// to UINT_MAX to disable).
unsigned OptSizeThreshold;
@@ -264,6 +269,13 @@ public:
/// transformation will select an unrolling factor based on the current cost
/// threshold and other factors.
unsigned Count;
+ /// A forced peeling factor (the number of bodied of the original loop
+ /// that should be peeled off before the loop body). When set to 0, the
+ /// unrolling transformation will select a peeling factor based on profile
+ /// information and other factors.
+ unsigned PeelCount;
+ /// Default unroll count for loops with run-time trip count.
+ unsigned DefaultUnrollRuntimeCount;
// Set the maximum unrolling factor. The unrolling factor may be selected
// using the appropriate cost threshold, but may not exceed this number
// (set to UINT_MAX to disable). This does not apply in cases where the
@@ -273,6 +285,11 @@ public:
/// applies even if full unrolling is selected. This allows a target to fall
/// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
unsigned FullUnrollMaxCount;
+ // Represents number of instructions optimized when "back edge"
+ // becomes "fall through" in unrolled loop.
+ // For now we count a conditional branch on a backedge and a comparison
+ // feeding it.
+ unsigned BEInsns;
/// Allow partial unrolling (unrolling of loops to expand the size of the
/// loop body, not only to eliminate small constant-trip-count loops).
bool Partial;
@@ -288,6 +305,10 @@ public:
/// Apply loop unroll on any kind of loop
/// (mainly to loops that fail runtime unrolling).
bool Force;
+ /// Allow using trip count upper bound to unroll loops.
+ bool UpperBound;
+ /// Allow peeling off loop iterations for loops with low dynamic tripcount.
+ bool AllowPeeling;
};
/// \brief Get target-customized preferences for the generic loop unrolling
@@ -351,6 +372,12 @@ public:
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace = 0) const;
+ /// \brief Return true if target supports the load / store
+ /// instruction with the given Offset on the form reg + Offset. It
+ /// may be that Offset is too big for a certain type (register
+ /// class).
+ bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const;
+
/// \brief Return true if it's free to truncate a value of type Ty1 to type
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
@@ -373,6 +400,10 @@ public:
/// target.
bool shouldBuildLookupTables() const;
+ /// \brief Return true if switches should be turned into lookup tables
+ /// containing this constant value for the target.
+ bool shouldBuildLookupTablesForConstant(Constant *C) const;
+
/// \brief Don't restrict interleaved unrolling to small loops.
bool enableAggressiveInterleaving(bool LoopHasReductions) const;
@@ -389,7 +420,8 @@ public:
bool isFPVectorizationPotentiallyUnsafe() const;
/// \brief Determine if the target supports unaligned memory accesses.
- bool allowsMisalignedMemoryAccesses(unsigned BitWidth, unsigned AddressSpace = 0,
+ bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+ unsigned BitWidth, unsigned AddressSpace = 0,
unsigned Alignment = 1,
bool *Fast = nullptr) const;
@@ -435,7 +467,11 @@ public:
SK_Reverse, ///< Reverse the order of the vector.
SK_Alternate, ///< Choose alternate elements from vector.
SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
- SK_ExtractSubvector ///< ExtractSubvector Index indicates start offset.
+ SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset.
+ SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
+ ///< with any shuffle mask.
+ SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any
+ ///< shuffle mask.
};
/// \brief Additional information about an operand's possible values.
@@ -457,10 +493,6 @@ public:
/// \return The width of the largest scalar or vector register type.
unsigned getRegisterBitWidth(bool Vector) const;
- /// \return The bitwidth of the largest vector type that should be used to
- /// load/store in the given address space.
- unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
-
/// \return The size of a cache line in bytes.
unsigned getCacheLineSize() const;
@@ -611,6 +643,38 @@ public:
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
+ /// \returns The bitwidth of the largest vector type that should be used to
+ /// load/store in the given address space.
+ unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
+
+ /// \returns True if the load instruction is legal to vectorize.
+ bool isLegalToVectorizeLoad(LoadInst *LI) const;
+
+ /// \returns True if the store instruction is legal to vectorize.
+ bool isLegalToVectorizeStore(StoreInst *SI) const;
+
+ /// \returns True if it is legal to vectorize the given load chain.
+ bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const;
+
+ /// \returns True if it is legal to vectorize the given store chain.
+ bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const;
+
+ /// \returns The new vector factor value if the target doesn't support \p
+ /// SizeInBytes loads or has a better vector factor.
+ unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const;
+
+ /// \returns The new vector factor value if the target doesn't support \p
+ /// SizeInBytes stores or has a better vector factor.
+ unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const;
+
/// @}
private:
@@ -659,16 +723,19 @@ public:
virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset, bool HasBaseReg,
int64_t Scale, unsigned AddrSpace) = 0;
+ virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) = 0;
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
virtual bool isProfitableToHoist(Instruction *I) = 0;
virtual bool isTypeLegal(Type *Ty) = 0;
virtual unsigned getJumpBufAlignment() = 0;
virtual unsigned getJumpBufSize() = 0;
virtual bool shouldBuildLookupTables() = 0;
+ virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
virtual bool enableInterleavedAccessVectorization() = 0;
virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
- virtual bool allowsMisalignedMemoryAccesses(unsigned BitWidth,
+ virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+ unsigned BitWidth,
unsigned AddressSpace,
unsigned Alignment,
bool *Fast) = 0;
@@ -684,7 +751,6 @@ public:
Type *Ty) = 0;
virtual unsigned getNumberOfRegisters(bool Vector) = 0;
virtual unsigned getRegisterBitWidth(bool Vector) = 0;
- virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) = 0;
virtual unsigned getCacheLineSize() = 0;
virtual unsigned getPrefetchDistance() = 0;
virtual unsigned getMinPrefetchStride() = 0;
@@ -737,6 +803,21 @@ public:
Type *ExpectedType) = 0;
virtual bool areInlineCompatible(const Function *Caller,
const Function *Callee) const = 0;
+ virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
+ virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
+ virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
+ virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const = 0;
+ virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const = 0;
+ virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const = 0;
+ virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const = 0;
};
template <typename T>
@@ -820,6 +901,9 @@ public:
return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
}
+ bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) override {
+ return Impl.isFoldableMemAccessOffset(I, Offset);
+ }
bool isTruncateFree(Type *Ty1, Type *Ty2) override {
return Impl.isTruncateFree(Ty1, Ty2);
}
@@ -832,6 +916,9 @@ public:
bool shouldBuildLookupTables() override {
return Impl.shouldBuildLookupTables();
}
+ bool shouldBuildLookupTablesForConstant(Constant *C) override {
+ return Impl.shouldBuildLookupTablesForConstant(C);
+ }
bool enableAggressiveInterleaving(bool LoopHasReductions) override {
return Impl.enableAggressiveInterleaving(LoopHasReductions);
}
@@ -841,9 +928,10 @@ public:
bool isFPVectorizationPotentiallyUnsafe() override {
return Impl.isFPVectorizationPotentiallyUnsafe();
}
- bool allowsMisalignedMemoryAccesses(unsigned BitWidth, unsigned AddressSpace,
+ bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+ unsigned BitWidth, unsigned AddressSpace,
unsigned Alignment, bool *Fast) override {
- return Impl.allowsMisalignedMemoryAccesses(BitWidth, AddressSpace,
+ return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
Alignment, Fast);
}
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
@@ -875,10 +963,6 @@ public:
return Impl.getRegisterBitWidth(Vector);
}
- unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) override {
- return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
- }
-
unsigned getCacheLineSize() override {
return Impl.getCacheLineSize();
}
@@ -978,6 +1062,37 @@ public:
const Function *Callee) const override {
return Impl.areInlineCompatible(Caller, Callee);
}
+ unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
+ return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
+ }
+ bool isLegalToVectorizeLoad(LoadInst *LI) const override {
+ return Impl.isLegalToVectorizeLoad(LI);
+ }
+ bool isLegalToVectorizeStore(StoreInst *SI) const override {
+ return Impl.isLegalToVectorizeStore(SI);
+ }
+ bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const override {
+ return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
+ AddrSpace);
+ }
+ bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const override {
+ return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
+ AddrSpace);
+ }
+ unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const override {
+ return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
+ }
+ unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const override {
+ return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
+ }
};
template <typename T>
@@ -1025,11 +1140,11 @@ public:
return *this;
}
- Result run(const Function &F, AnalysisManager<Function> &);
+ Result run(const Function &F, FunctionAnalysisManager &);
private:
friend AnalysisInfoMixin<TargetIRAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
/// \brief The callback used to produce a result.
///
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index a97624bc2ab0..68b38a7fa538 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -152,6 +152,15 @@ public:
case Intrinsic::var_annotation:
case Intrinsic::experimental_gc_result:
case Intrinsic::experimental_gc_relocate:
+ case Intrinsic::coro_alloc:
+ case Intrinsic::coro_begin:
+ case Intrinsic::coro_free:
+ case Intrinsic::coro_end:
+ case Intrinsic::coro_frame:
+ case Intrinsic::coro_size:
+ case Intrinsic::coro_suspend:
+ case Intrinsic::coro_param:
+ case Intrinsic::coro_subfn_addr:
// These intrinsics don't actually represent code after lowering.
return TTI::TCC_Free;
}
@@ -226,6 +235,8 @@ public:
return -1;
}
+ bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) { return true; }
+
bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
bool isProfitableToHoist(Instruction *I) { return true; }
@@ -237,6 +248,7 @@ public:
unsigned getJumpBufSize() { return 0; }
bool shouldBuildLookupTables() { return true; }
+ bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
@@ -244,7 +256,8 @@ public:
bool isFPVectorizationPotentiallyUnsafe() { return false; }
- bool allowsMisalignedMemoryAccesses(unsigned BitWidth,
+ bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+ unsigned BitWidth,
unsigned AddressSpace,
unsigned Alignment,
bool *Fast) { return false; }
@@ -278,8 +291,6 @@ public:
unsigned getRegisterBitWidth(bool Vector) { return 32; }
- unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) { return 128; }
-
unsigned getCacheLineSize() { return 0; }
unsigned getPrefetchDistance() { return 0; }
@@ -381,6 +392,36 @@ public:
(Caller->getFnAttribute("target-features") ==
Callee->getFnAttribute("target-features"));
}
+
+ unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
+
+ bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
+
+ bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
+
+ bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const {
+ return true;
+ }
+
+ bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+ unsigned Alignment,
+ unsigned AddrSpace) const {
+ return true;
+ }
+
+ unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const {
+ return VF;
+ }
+
+ unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+ unsigned ChainSizeInBytes,
+ VectorType *VecTy) const {
+ return VF;
+ }
};
/// \brief CRTP base class for use as a mix-in that aids implementing
@@ -394,12 +435,6 @@ protected:
explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
public:
- // Provide value semantics. MSVC requires that we spell all of these out.
- TargetTransformInfoImplCRTPBase(const TargetTransformInfoImplCRTPBase &Arg)
- : BaseT(static_cast<const BaseT &>(Arg)) {}
- TargetTransformInfoImplCRTPBase(TargetTransformInfoImplCRTPBase &&Arg)
- : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
-
using BaseT::getCallCost;
unsigned getCallCost(const Function *F, int NumArgs) {
@@ -447,18 +482,22 @@ public:
int64_t BaseOffset = 0;
int64_t Scale = 0;
- // Assumes the address space is 0 when Ptr is nullptr.
- unsigned AS =
- (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
- auto GTI = gep_type_begin(PointeeType, AS, Operands);
+ auto GTI = gep_type_begin(PointeeType, Operands);
+ Type *TargetType;
for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
+ TargetType = GTI.getIndexedType();
// We assume that the cost of Scalar GEP with constant index and the
// cost of Vector GEP with splat constant index are the same.
const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
if (!ConstIdx)
if (auto Splat = getSplatValue(*I))
ConstIdx = dyn_cast<ConstantInt>(Splat);
- if (isa<SequentialType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ // For structures the index is always splat or scalar constant
+ assert(ConstIdx && "Unexpected GEP index");
+ uint64_t Field = ConstIdx->getZExtValue();
+ BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
+ } else {
int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
if (ConstIdx)
BaseOffset += ConstIdx->getSExtValue() * ElementSize;
@@ -469,20 +508,16 @@ public:
return TTI::TCC_Basic;
Scale = ElementSize;
}
- } else {
- StructType *STy = cast<StructType>(*GTI);
- // For structures the index is always splat or scalar constant
- assert(ConstIdx && "Unexpected GEP index");
- uint64_t Field = ConstIdx->getZExtValue();
- BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
}
}
+ // Assumes the address space is 0 when Ptr is nullptr.
+ unsigned AS =
+ (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
if (static_cast<T *>(this)->isLegalAddressingMode(
- PointerType::get(*GTI, AS), const_cast<GlobalValue *>(BaseGV),
- BaseOffset, HasBaseReg, Scale, AS)) {
+ TargetType, const_cast<GlobalValue *>(BaseGV), BaseOffset,
+ HasBaseReg, Scale, AS))
return TTI::TCC_Free;
- }
return TTI::TCC_Basic;
}
diff --git a/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/include/llvm/Analysis/TypeBasedAliasAnalysis.h
index 229b0f97b983..fd726e6cd37f 100644
--- a/include/llvm/Analysis/TypeBasedAliasAnalysis.h
+++ b/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -27,13 +27,13 @@ class TypeBasedAAResult : public AAResultBase<TypeBasedAAResult> {
friend AAResultBase<TypeBasedAAResult>;
public:
- explicit TypeBasedAAResult() {}
- TypeBasedAAResult(TypeBasedAAResult &&Arg) : AAResultBase(std::move(Arg)) {}
-
/// Handle invalidation events from the new pass manager.
///
/// By definition, this result is stateless and so remains valid.
- bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ bool invalidate(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &) {
+ return false;
+ }
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
@@ -50,12 +50,12 @@ private:
/// Analysis pass providing a never-invalidated alias analysis result.
class TypeBasedAA : public AnalysisInfoMixin<TypeBasedAA> {
friend AnalysisInfoMixin<TypeBasedAA>;
- static char PassID;
+ static AnalysisKey Key;
public:
typedef TypeBasedAAResult Result;
- TypeBasedAAResult run(Function &F, AnalysisManager<Function> &AM);
+ TypeBasedAAResult run(Function &F, FunctionAnalysisManager &AM);
};
/// Legacy wrapper pass to provide the TypeBasedAAResult object.
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index 2c6221d4933f..dd767217345a 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -16,7 +16,6 @@
#define LLVM_ANALYSIS_VALUETRACKING_H
#include "llvm/IR/CallSite.h"
-#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/DataTypes.h"
@@ -49,7 +48,7 @@ template <typename T> class ArrayRef;
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
- void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -60,14 +59,15 @@ template <typename T> class ArrayRef;
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
APInt &KnownZero, APInt &KnownOne);
/// Return true if LHS and RHS have no common bits set.
- bool haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
+ bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Determine whether the sign bit is known to be zero or one. Convenience
/// wrapper around computeKnownBits.
- void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+ void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -78,7 +78,7 @@ template <typename T> class ArrayRef;
/// of two when defined. Supports values with integer or pointer type and
/// vectors of integers. If 'OrZero' is set, then return true if the given
/// value is either a power of two or zero.
- bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL,
+ bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -88,34 +88,35 @@ template <typename T> class ArrayRef;
/// vectors, return true if every element is known to be non-zero when
/// defined. Supports values with integer or pointer type and vectors of
/// integers.
- bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the give value is known to be non-negative.
- bool isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownNonNegative(const Value *V, const DataLayout &DL,
+ unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the given value is known be positive (i.e. non-negative
/// and non-zero).
- bool isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the given value is known be negative (i.e. non-positive
/// and non-zero).
- bool isKnownNegative(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Return true if the given values are known to be non-equal when defined.
/// Supports scalar integer types only.
- bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
+ bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@@ -129,7 +130,8 @@ template <typename T> class ArrayRef;
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
- bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
+ bool MaskedValueIsZero(const Value *V, const APInt &Mask,
+ const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@@ -141,7 +143,7 @@ template <typename T> class ArrayRef;
/// equal to each other, so we return 3. For vectors, return the number of
/// sign bits for the vector element with the mininum number of known sign
/// bits.
- unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL,
+ unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@@ -213,7 +215,7 @@ template <typename T> class ArrayRef;
/// If we can compute the length of the string pointed to by the specified
/// pointer, return 'len+1'. If we can't, return 0.
- uint64_t GetStringLength(Value *V);
+ uint64_t GetStringLength(const Value *V);
/// This method strips off any GEP address adjustments and pointer casts from
/// the specified value, returning the original object being addressed. Note
@@ -306,12 +308,12 @@ template <typename T> class ArrayRef;
bool isKnownNonNull(const Value *V);
/// Return true if this pointer couldn't possibly be null. If the context
- /// instruction is specified, perform context-sensitive analysis and return
- /// true if the pointer couldn't possibly be null at the specified
- /// instruction.
+ /// instruction and dominator tree are specified, perform context-sensitive
+ /// analysis and return true if the pointer couldn't possibly be null at the
+ /// specified instruction.
bool isKnownNonNullAt(const Value *V,
const Instruction *CtxI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr);
/// Return true if it is valid to use the assumptions provided by an
/// assume intrinsic, I, at the point in the control-flow identified by the
@@ -320,23 +322,25 @@ template <typename T> class ArrayRef;
const DominatorTree *DT = nullptr);
enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
- OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+ OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
- OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
+ OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
- OverflowResult computeOverflowForSignedAdd(Value *LHS, Value *RHS,
+ OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// This version also leverages the sign bit of Add if known.
- OverflowResult computeOverflowForSignedAdd(AddOperator *Add,
+ OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -345,7 +349,8 @@ template <typename T> class ArrayRef;
/// Returns true if the arithmetic part of the \p II 's result is
/// used only along the paths control dependent on the computation
/// not overflowing, \p II being an <op>.with.overflow intrinsic.
- bool isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT);
+ bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
+ const DominatorTree &DT);
/// Return true if this function can prove that the instruction I will
/// always transfer execution to one of its successors (including the next
@@ -445,11 +450,16 @@ template <typename T> class ArrayRef;
///
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
Instruction::CastOps *CastOp = nullptr);
-
- /// Parse out a conservative ConstantRange from !range metadata.
- ///
- /// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
- ConstantRange getConstantRangeFromMetadata(MDNode &RangeMD);
+ static inline SelectPatternResult
+ matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
+ Instruction::CastOps *CastOp = nullptr) {
+ Value *L = const_cast<Value*>(LHS);
+ Value *R = const_cast<Value*>(RHS);
+ auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
+ LHS = L;
+ RHS = R;
+ return Result;
+ }
/// Return true if RHS is known to be implied true by LHS. Return false if
/// RHS is known to be implied false by LHS. Otherwise, return None if no
@@ -461,10 +471,13 @@ template <typename T> class ArrayRef;
/// T | T | F
/// F | T | T
/// (A)
- Optional<bool> isImpliedCondition(
- Value *LHS, Value *RHS, const DataLayout &DL, bool InvertAPred = false,
- unsigned Depth = 0, AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr);
+ Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ bool InvertAPred = false,
+ unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
} // end namespace llvm
#endif
diff --git a/include/llvm/Bitcode/BitCodes.h b/include/llvm/Bitcode/BitCodes.h
index 66400b697c5c..cfc7a1d7d6bd 100644
--- a/include/llvm/Bitcode/BitCodes.h
+++ b/include/llvm/Bitcode/BitCodes.h
@@ -25,6 +25,14 @@
#include <cassert>
namespace llvm {
+/// Offsets of the 32-bit fields of bitcode wrapper header.
+static const unsigned BWH_MagicField = 0 * 4;
+static const unsigned BWH_VersionField = 1 * 4;
+static const unsigned BWH_OffsetField = 2 * 4;
+static const unsigned BWH_SizeField = 3 * 4;
+static const unsigned BWH_CPUTypeField = 4 * 4;
+static const unsigned BWH_HeaderSize = 5 * 4;
+
namespace bitc {
enum StandardWidths {
BlockIDWidth = 8, // We use VBR-8 for block IDs.
diff --git a/include/llvm/Bitcode/ReaderWriter.h b/include/llvm/Bitcode/BitcodeReader.h
index 76a60a0b8d25..9e042b17241f 100644
--- a/include/llvm/Bitcode/ReaderWriter.h
+++ b/include/llvm/Bitcode/BitcodeReader.h
@@ -1,4 +1,4 @@
-//===-- llvm/Bitcode/ReaderWriter.h - Bitcode reader/writers ----*- C++ -*-===//
+//===-- llvm/Bitcode/BitcodeReader.h - Bitcode reader ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,108 +7,135 @@
//
//===----------------------------------------------------------------------===//
//
-// This header defines interfaces to read and write LLVM bitcode files/streams.
+// This header defines interfaces to read LLVM bitcode files/streams.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_BITCODE_READERWRITER_H
-#define LLVM_BITCODE_READERWRITER_H
+#ifndef LLVM_BITCODE_BITCODEREADER_H
+#define LLVM_BITCODE_BITCODEREADER_H
+#include "llvm/Bitcode/BitCodes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>
-#include <string>
namespace llvm {
- class BitstreamWriter;
- class DataStreamer;
class LLVMContext;
class Module;
- class ModulePass;
- class raw_ostream;
- /// Offsets of the 32-bit fields of bitcode wrapper header.
- static const unsigned BWH_MagicField = 0*4;
- static const unsigned BWH_VersionField = 1*4;
- static const unsigned BWH_OffsetField = 2*4;
- static const unsigned BWH_SizeField = 3*4;
- static const unsigned BWH_CPUTypeField = 4*4;
- static const unsigned BWH_HeaderSize = 5*4;
+ // These functions are for converting Expected/Error values to
+ // ErrorOr/std::error_code for compatibility with legacy clients. FIXME:
+ // Remove these functions once no longer needed by the C and libLTO APIs.
+
+ std::error_code errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, Error Err);
+
+ template <typename T>
+ ErrorOr<T> expectedToErrorOrAndEmitErrors(LLVMContext &Ctx, Expected<T> Val) {
+ if (!Val)
+ return errorToErrorCodeAndEmitErrors(Ctx, Val.takeError());
+ return std::move(*Val);
+ }
+
+ /// Represents a module in a bitcode file.
+ class BitcodeModule {
+ // This covers the identification (if present) and module blocks.
+ ArrayRef<uint8_t> Buffer;
+ StringRef ModuleIdentifier;
+
+ // The bitstream location of the IDENTIFICATION_BLOCK.
+ uint64_t IdentificationBit;
+
+ // The bitstream location of this module's MODULE_BLOCK.
+ uint64_t ModuleBit;
+
+ BitcodeModule(ArrayRef<uint8_t> Buffer, StringRef ModuleIdentifier,
+ uint64_t IdentificationBit, uint64_t ModuleBit)
+ : Buffer(Buffer), ModuleIdentifier(ModuleIdentifier),
+ IdentificationBit(IdentificationBit), ModuleBit(ModuleBit) {}
+
+ // Calls the ctor.
+ friend Expected<std::vector<BitcodeModule>>
+ getBitcodeModuleList(MemoryBufferRef Buffer);
+
+ Expected<std::unique_ptr<Module>> getModuleImpl(LLVMContext &Context,
+ bool MaterializeAll,
+ bool ShouldLazyLoadMetadata,
+ bool IsImporting);
+
+ public:
+ StringRef getBuffer() const {
+ return StringRef((const char *)Buffer.begin(), Buffer.size());
+ }
+
+ StringRef getModuleIdentifier() const { return ModuleIdentifier; }
+
+ /// Read the bitcode module and prepare for lazy deserialization of function
+ /// bodies. If ShouldLazyLoadMetadata is true, lazily load metadata as well.
+ /// If IsImporting is true, this module is being parsed for ThinLTO
+ /// importing into another module.
+ Expected<std::unique_ptr<Module>> getLazyModule(LLVMContext &Context,
+ bool ShouldLazyLoadMetadata,
+ bool IsImporting);
+
+ /// Read the entire bitcode module and return it.
+ Expected<std::unique_ptr<Module>> parseModule(LLVMContext &Context);
+
+ /// Check if the given bitcode buffer contains a summary block.
+ Expected<bool> hasSummary();
+
+ /// Parse the specified bitcode buffer, returning the module summary index.
+ Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary();
+ };
+
+ /// Returns a list of modules in the specified bitcode buffer.
+ Expected<std::vector<BitcodeModule>>
+ getBitcodeModuleList(MemoryBufferRef Buffer);
/// Read the header of the specified bitcode buffer and prepare for lazy
/// deserialization of function bodies. If ShouldLazyLoadMetadata is true,
- /// lazily load metadata as well. If successful, this moves Buffer. On
- /// error, this *does not* move Buffer.
- ErrorOr<std::unique_ptr<Module>>
- getLazyBitcodeModule(std::unique_ptr<MemoryBuffer> &&Buffer,
- LLVMContext &Context,
- bool ShouldLazyLoadMetadata = false);
-
- /// Read the header of the specified stream and prepare for lazy
- /// deserialization and streaming of function bodies.
- ErrorOr<std::unique_ptr<Module>>
- getStreamedBitcodeModule(StringRef Name,
- std::unique_ptr<DataStreamer> Streamer,
- LLVMContext &Context);
+ /// lazily load metadata as well. If IsImporting is true, this module is
+ /// being parsed for ThinLTO importing into another module.
+ Expected<std::unique_ptr<Module>>
+ getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context,
+ bool ShouldLazyLoadMetadata = false,
+ bool IsImporting = false);
+
+ /// Like getLazyBitcodeModule, except that the module takes ownership of
+ /// the memory buffer if successful. If successful, this moves Buffer. On
+ /// error, this *does not* move Buffer. If IsImporting is true, this module is
+ /// being parsed for ThinLTO importing into another module.
+ Expected<std::unique_ptr<Module>> getOwningLazyBitcodeModule(
+ std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context,
+ bool ShouldLazyLoadMetadata = false, bool IsImporting = false);
/// Read the header of the specified bitcode buffer and extract just the
/// triple information. If successful, this returns a string. On error, this
/// returns "".
- std::string getBitcodeTargetTriple(MemoryBufferRef Buffer,
- LLVMContext &Context);
+ Expected<std::string> getBitcodeTargetTriple(MemoryBufferRef Buffer);
/// Return true if \p Buffer contains a bitcode file with ObjC code (category
/// or class) in it.
- bool isBitcodeContainingObjCCategory(MemoryBufferRef Buffer,
- LLVMContext &Context);
+ Expected<bool> isBitcodeContainingObjCCategory(MemoryBufferRef Buffer);
/// Read the header of the specified bitcode buffer and extract just the
/// producer string information. If successful, this returns a string. On
/// error, this returns "".
- std::string getBitcodeProducerString(MemoryBufferRef Buffer,
- LLVMContext &Context);
+ Expected<std::string> getBitcodeProducerString(MemoryBufferRef Buffer);
/// Read the specified bitcode file, returning the module.
- ErrorOr<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
- LLVMContext &Context);
+ Expected<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
+ LLVMContext &Context);
/// Check if the given bitcode buffer contains a summary block.
- bool
- hasGlobalValueSummary(MemoryBufferRef Buffer,
- const DiagnosticHandlerFunction &DiagnosticHandler);
+ Expected<bool> hasGlobalValueSummary(MemoryBufferRef Buffer);
/// Parse the specified bitcode buffer, returning the module summary index.
- ErrorOr<std::unique_ptr<ModuleSummaryIndex>>
- getModuleSummaryIndex(MemoryBufferRef Buffer,
- const DiagnosticHandlerFunction &DiagnosticHandler);
-
- /// \brief Write the specified module to the specified raw output stream.
- ///
- /// For streams where it matters, the given stream should be in "binary"
- /// mode.
- ///
- /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
- /// Value in \c M. These will be reconstructed exactly when \a M is
- /// deserialized.
- ///
- /// If \c EmitSummaryIndex, emit the module's summary index (currently
- /// for use in ThinLTO optimization).
- void WriteBitcodeToFile(const Module *M, raw_ostream &Out,
- bool ShouldPreserveUseListOrder = false,
- const ModuleSummaryIndex *Index = nullptr,
- bool GenerateHash = false);
-
- /// Write the specified module summary index to the given raw output stream,
- /// where it will be written in a new bitcode block. This is used when
- /// writing the combined index file for ThinLTO. When writing a subset of the
- /// index for a distributed backend, provide the \p ModuleToSummariesForIndex
- /// map.
- void WriteIndexToFile(const ModuleSummaryIndex &Index, raw_ostream &Out,
- std::map<std::string, GVSummaryMapTy>
- *ModuleToSummariesForIndex = nullptr);
+ Expected<std::unique_ptr<ModuleSummaryIndex>>
+ getModuleSummaryIndex(MemoryBufferRef Buffer);
/// isBitcodeWrapper - Return true if the given bytes are the magic bytes
/// for an LLVM IR bitcode wrapper.
@@ -183,26 +210,11 @@ namespace llvm {
}
const std::error_category &BitcodeErrorCategory();
- enum class BitcodeError { InvalidBitcodeSignature = 1, CorruptedBitcode };
+ enum class BitcodeError { CorruptedBitcode = 1 };
inline std::error_code make_error_code(BitcodeError E) {
return std::error_code(static_cast<int>(E), BitcodeErrorCategory());
}
- class BitcodeDiagnosticInfo : public DiagnosticInfo {
- const Twine &Msg;
- std::error_code EC;
-
- public:
- BitcodeDiagnosticInfo(std::error_code EC, DiagnosticSeverity Severity,
- const Twine &Msg);
- void print(DiagnosticPrinter &DP) const override;
- std::error_code getError() const { return EC; }
-
- static bool classof(const DiagnosticInfo *DI) {
- return DI->getKind() == DK_Bitcode;
- }
- };
-
} // End llvm namespace
namespace std {
diff --git a/include/llvm/Bitcode/BitcodeWriter.h b/include/llvm/Bitcode/BitcodeWriter.h
new file mode 100644
index 000000000000..4f72f98bbf9c
--- /dev/null
+++ b/include/llvm/Bitcode/BitcodeWriter.h
@@ -0,0 +1,80 @@
+//===-- llvm/Bitcode/BitcodeWriter.h - Bitcode writers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines interfaces to write LLVM bitcode files/streams.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODEWRITER_H
+#define LLVM_BITCODE_BITCODEWRITER_H
+
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include <string>
+
+namespace llvm {
+ class BitstreamWriter;
+ class Module;
+ class raw_ostream;
+
+ class BitcodeWriter {
+ SmallVectorImpl<char> &Buffer;
+ std::unique_ptr<BitstreamWriter> Stream;
+
+ public:
+ /// Create a BitcodeWriter that writes to Buffer.
+ BitcodeWriter(SmallVectorImpl<char> &Buffer);
+
+ ~BitcodeWriter();
+
+ /// Write the specified module to the buffer specified at construction time.
+ ///
+ /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
+ /// Value in \c M. These will be reconstructed exactly when \a M is
+ /// deserialized.
+ ///
+ /// If \c Index is supplied, the bitcode will contain the summary index
+ /// (currently for use in ThinLTO optimization).
+ ///
+ /// \p GenerateHash enables hashing the Module and including the hash in the
+ /// bitcode (currently for use in ThinLTO incremental build).
+ void writeModule(const Module *M, bool ShouldPreserveUseListOrder = false,
+ const ModuleSummaryIndex *Index = nullptr,
+ bool GenerateHash = false);
+ };
+
+ /// \brief Write the specified module to the specified raw output stream.
+ ///
+ /// For streams where it matters, the given stream should be in "binary"
+ /// mode.
+ ///
+ /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
+ /// Value in \c M. These will be reconstructed exactly when \a M is
+ /// deserialized.
+ ///
+ /// If \c Index is supplied, the bitcode will contain the summary index
+ /// (currently for use in ThinLTO optimization).
+ ///
+ /// \p GenerateHash enables hashing the Module and including the hash in the
+ /// bitcode (currently for use in ThinLTO incremental build).
+ void WriteBitcodeToFile(const Module *M, raw_ostream &Out,
+ bool ShouldPreserveUseListOrder = false,
+ const ModuleSummaryIndex *Index = nullptr,
+ bool GenerateHash = false);
+
+ /// Write the specified module summary index to the given raw output stream,
+ /// where it will be written in a new bitcode block. This is used when
+ /// writing the combined index file for ThinLTO. When writing a subset of the
+ /// index for a distributed backend, provide the \p ModuleToSummariesForIndex
+ /// map.
+ void WriteIndexToFile(const ModuleSummaryIndex &Index, raw_ostream &Out,
+ const std::map<std::string, GVSummaryMapTy>
+ *ModuleToSummariesForIndex = nullptr);
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/Bitcode/BitcodeWriterPass.h b/include/llvm/Bitcode/BitcodeWriterPass.h
index 946255b878a6..9ac6fba16b96 100644
--- a/include/llvm/Bitcode/BitcodeWriterPass.h
+++ b/include/llvm/Bitcode/BitcodeWriterPass.h
@@ -44,7 +44,7 @@ ModulePass *createBitcodeWriterPass(raw_ostream &Str,
///
/// Note that this is intended for use with the new pass manager. To construct
/// a pass for the legacy pass manager, use the function above.
-class BitcodeWriterPass {
+class BitcodeWriterPass : public PassInfoMixin<BitcodeWriterPass> {
raw_ostream &OS;
bool ShouldPreserveUseListOrder;
bool EmitSummaryIndex;
@@ -68,8 +68,6 @@ public:
/// \brief Run the bitcode writer pass, and output the module to the selected
/// output stream.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
-
- static StringRef name() { return "BitcodeWriterPass"; }
};
}
diff --git a/include/llvm/Bitcode/BitstreamReader.h b/include/llvm/Bitcode/BitstreamReader.h
index b331ceea051c..4d95a6ce8a16 100644
--- a/include/llvm/Bitcode/BitstreamReader.h
+++ b/include/llvm/Bitcode/BitstreamReader.h
@@ -15,21 +15,28 @@
#ifndef LLVM_BITCODE_BITSTREAMREADER_H
#define LLVM_BITCODE_BITSTREAMREADER_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Bitcode/BitCodes.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/StreamingMemoryObject.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cassert>
#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
#include <string>
+#include <utility>
#include <vector>
namespace llvm {
-/// This class is used to read from an LLVM bitcode stream, maintaining
-/// information that is global to decoding the entire file. While a file is
-/// being read, multiple cursors can be independently advanced or skipped around
-/// within the file. These are represented by the BitstreamCursor class.
-class BitstreamReader {
+/// This class maintains the abbreviations read from a block info block.
+class BitstreamBlockInfo {
public:
/// This contains information emitted to BLOCKINFO_BLOCK blocks. These
/// describe abbreviations that all blocks of the specified ID inherit.
@@ -37,64 +44,13 @@ public:
unsigned BlockID;
std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> Abbrevs;
std::string Name;
-
std::vector<std::pair<unsigned, std::string> > RecordNames;
};
-private:
- std::unique_ptr<MemoryObject> BitcodeBytes;
+private:
std::vector<BlockInfo> BlockInfoRecords;
- /// This is set to true if we don't care about the block/record name
- /// information in the BlockInfo block. Only llvm-bcanalyzer uses this.
- bool IgnoreBlockInfoNames;
-
- BitstreamReader(const BitstreamReader&) = delete;
- void operator=(const BitstreamReader&) = delete;
public:
- BitstreamReader() : IgnoreBlockInfoNames(true) {
- }
-
- BitstreamReader(const unsigned char *Start, const unsigned char *End)
- : IgnoreBlockInfoNames(true) {
- init(Start, End);
- }
-
- BitstreamReader(std::unique_ptr<MemoryObject> BitcodeBytes)
- : BitcodeBytes(std::move(BitcodeBytes)), IgnoreBlockInfoNames(true) {}
-
- BitstreamReader(BitstreamReader &&Other) {
- *this = std::move(Other);
- }
-
- BitstreamReader &operator=(BitstreamReader &&Other) {
- BitcodeBytes = std::move(Other.BitcodeBytes);
- // Explicitly swap block info, so that nothing gets destroyed twice.
- std::swap(BlockInfoRecords, Other.BlockInfoRecords);
- IgnoreBlockInfoNames = Other.IgnoreBlockInfoNames;
- return *this;
- }
-
- void init(const unsigned char *Start, const unsigned char *End) {
- assert(((End-Start) & 3) == 0 &&"Bitcode stream not a multiple of 4 bytes");
- BitcodeBytes.reset(getNonStreamedMemoryObject(Start, End));
- }
-
- MemoryObject &getBitcodeBytes() { return *BitcodeBytes; }
-
- /// This is called by clients that want block/record name information.
- void CollectBlockInfoNames() { IgnoreBlockInfoNames = false; }
- bool isIgnoringBlockInfoNames() { return IgnoreBlockInfoNames; }
-
- //===--------------------------------------------------------------------===//
- // Block Manipulation
- //===--------------------------------------------------------------------===//
-
- /// Return true if we've already read and processed the block info block for
- /// this Bitstream. We only process it for the first cursor that walks over
- /// it.
- bool hasBlockInfoRecords() const { return !BlockInfoRecords.empty(); }
-
/// If there is block info for the specified ID, return it, otherwise return
/// null.
const BlockInfo *getBlockInfo(unsigned BlockID) const {
@@ -118,33 +74,21 @@ public:
BlockInfoRecords.back().BlockID = BlockID;
return BlockInfoRecords.back();
}
-
- /// Takes block info from the other bitstream reader.
- ///
- /// This is a "take" operation because BlockInfo records are non-trivial, and
- /// indeed rather expensive.
- void takeBlockInfo(BitstreamReader &&Other) {
- assert(!hasBlockInfoRecords());
- BlockInfoRecords = std::move(Other.BlockInfoRecords);
- }
};
/// This represents a position within a bitstream. There may be multiple
/// independent cursors reading within one bitstream, each maintaining their
/// own local state.
class SimpleBitstreamCursor {
- BitstreamReader *R = nullptr;
+ ArrayRef<uint8_t> BitcodeBytes;
size_t NextChar = 0;
- // The size of the bicode. 0 if we don't know it yet.
- size_t Size = 0;
-
+public:
/// This is the current data we have pulled from the stream but have not
/// returned to the client. This is specifically and intentionally defined to
/// follow the word size of the host machine for efficiency. We use word_t in
/// places that are aware of this to make it perfectly explicit what is going
/// on.
-public:
typedef size_t word_t;
private:
@@ -158,23 +102,21 @@ public:
static const size_t MaxChunkSize = sizeof(word_t) * 8;
SimpleBitstreamCursor() = default;
-
- explicit SimpleBitstreamCursor(BitstreamReader &R) : R(&R) {}
- explicit SimpleBitstreamCursor(BitstreamReader *R) : R(R) {}
+ explicit SimpleBitstreamCursor(ArrayRef<uint8_t> BitcodeBytes)
+ : BitcodeBytes(BitcodeBytes) {}
+ explicit SimpleBitstreamCursor(StringRef BitcodeBytes)
+ : BitcodeBytes(reinterpret_cast<const uint8_t *>(BitcodeBytes.data()),
+ BitcodeBytes.size()) {}
+ explicit SimpleBitstreamCursor(MemoryBufferRef BitcodeBytes)
+ : SimpleBitstreamCursor(BitcodeBytes.getBuffer()) {}
bool canSkipToPos(size_t pos) const {
// pos can be skipped to if it is a valid address or one byte past the end.
- return pos == 0 ||
- R->getBitcodeBytes().isValidAddress(static_cast<uint64_t>(pos - 1));
+ return pos <= BitcodeBytes.size();
}
bool AtEndOfStream() {
- if (BitsInCurWord != 0)
- return false;
- if (Size != 0)
- return Size <= NextChar;
- fillCurWord();
- return BitsInCurWord == 0;
+ return BitsInCurWord == 0 && BitcodeBytes.size() <= NextChar;
}
/// Return the bit # of the bit we are reading.
@@ -185,8 +127,7 @@ public:
// Return the byte # of the current bit.
uint64_t getCurrentByteNo() const { return GetCurrentBitNo() / 8; }
- BitstreamReader *getBitStreamReader() { return R; }
- const BitstreamReader *getBitStreamReader() const { return R; }
+ ArrayRef<uint8_t> getBitcodeBytes() const { return BitcodeBytes; }
/// Reset the stream to the specified bit number.
void JumpToBit(uint64_t BitNo) {
@@ -203,27 +144,9 @@ public:
Read(WordBitNo);
}
- /// Reset the stream to the bit pointed at by the specified pointer.
- ///
- /// The pointer must be a dereferenceable pointer into the bytes in the
- /// underlying memory object.
- void jumpToPointer(const uint8_t *Pointer) {
- auto *Pointer0 = getPointerToByte(0, 1);
- assert((intptr_t)Pointer0 <= (intptr_t)Pointer &&
- "Expected pointer into bitstream");
-
- JumpToBit(8 * (Pointer - Pointer0));
- assert((intptr_t)getPointerToByte(getCurrentByteNo(), 1) ==
- (intptr_t)Pointer &&
- "Expected to reach pointer");
- }
- void jumpToPointer(const char *Pointer) {
- jumpToPointer((const uint8_t *)Pointer);
- }
-
/// Get a pointer into the bitstream at the specified byte offset.
const uint8_t *getPointerToByte(uint64_t ByteNo, uint64_t NumBytes) {
- return R->getBitcodeBytes().getPointer(ByteNo, NumBytes);
+ return BitcodeBytes.data() + ByteNo;
}
/// Get a pointer into the bitstream at the specified bit offset.
@@ -235,26 +158,24 @@ public:
}
void fillCurWord() {
- if (Size != 0 && NextChar >= Size)
+ if (NextChar >= BitcodeBytes.size())
report_fatal_error("Unexpected end of file");
// Read the next word from the stream.
- uint8_t Array[sizeof(word_t)] = {0};
-
- uint64_t BytesRead =
- R->getBitcodeBytes().readBytes(Array, sizeof(Array), NextChar);
-
- // If we run out of data, stop at the end of the stream.
- if (BytesRead == 0) {
+ const uint8_t *NextCharPtr = BitcodeBytes.data() + NextChar;
+ unsigned BytesRead;
+ if (BitcodeBytes.size() >= NextChar + sizeof(word_t)) {
+ BytesRead = sizeof(word_t);
+ CurWord =
+ support::endian::read<word_t, support::little, support::unaligned>(
+ NextCharPtr);
+ } else {
+ // Short read.
+ BytesRead = BitcodeBytes.size() - NextChar;
CurWord = 0;
- BitsInCurWord = 0;
- Size = NextChar;
- return;
+ for (unsigned B = 0; B != BytesRead; ++B)
+ CurWord |= uint64_t(NextCharPtr[B]) << (B * 8);
}
-
- CurWord =
- support::endian::read<word_t, support::little, support::unaligned>(
- Array);
NextChar += BytesRead;
BitsInCurWord = BytesRead * 8;
}
@@ -283,9 +204,9 @@ public:
fillCurWord();
- // If we run out of data, stop at the end of the stream.
+ // If we run out of data, abort.
if (BitsLeft > BitsInCurWord)
- return 0;
+ report_fatal_error("Unexpected end of file");
word_t R2 = CurWord & (~word_t(0) >> (BitsInWord - BitsLeft));
@@ -306,7 +227,7 @@ public:
uint32_t Result = 0;
unsigned NextBit = 0;
- while (1) {
+ while (true) {
Result |= (Piece & ((1U << (NumBits-1))-1)) << NextBit;
if ((Piece & (1U << (NumBits-1))) == 0)
@@ -326,7 +247,7 @@ public:
uint64_t Result = 0;
unsigned NextBit = 0;
- while (1) {
+ while (true) {
Result |= uint64_t(Piece & ((1U << (NumBits-1))-1)) << NextBit;
if ((Piece & (1U << (NumBits-1))) == 0)
@@ -351,31 +272,7 @@ public:
}
/// Skip to the end of the file.
- void skipToEnd() { NextChar = R->getBitcodeBytes().getExtent(); }
-
- /// Prevent the cursor from reading past a byte boundary.
- ///
- /// Prevent the cursor from requesting byte reads past \c Limit. This is
- /// useful when working with a cursor on a StreamingMemoryObject, when it's
- /// desirable to avoid invalidating the result of getPointerToByte().
- ///
- /// If \c Limit is on a word boundary, AtEndOfStream() will return true if
- /// the cursor position reaches or exceeds \c Limit, regardless of the true
- /// number of available bytes. Otherwise, AtEndOfStream() returns true when
- /// it reaches or exceeds the next word boundary.
- void setArtificialByteLimit(uint64_t Limit) {
- assert(getCurrentByteNo() < Limit && "Move cursor before lowering limit");
-
- // Round to word boundary.
- Limit = alignTo(Limit, sizeof(word_t));
-
- // Only change size if the new one is lower.
- if (!Size || Size > Limit)
- Size = Limit;
- }
-
- /// Return the Size, if known.
- uint64_t getSizeIfKnown() const { return Size; }
+ void skipToEnd() { NextChar = BitcodeBytes.size(); }
};
/// When advancing through a bitstream cursor, each advance can discover a few
@@ -394,12 +291,15 @@ struct BitstreamEntry {
static BitstreamEntry getError() {
BitstreamEntry E; E.Kind = Error; return E;
}
+
static BitstreamEntry getEndBlock() {
BitstreamEntry E; E.Kind = EndBlock; return E;
}
+
static BitstreamEntry getSubBlock(unsigned ID) {
BitstreamEntry E; E.Kind = SubBlock; E.ID = ID; return E;
}
+
static BitstreamEntry getRecord(unsigned AbbrevID) {
BitstreamEntry E; E.Kind = Record; E.ID = AbbrevID; return E;
}
@@ -421,34 +321,32 @@ class BitstreamCursor : SimpleBitstreamCursor {
struct Block {
unsigned PrevCodeSize;
std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> PrevAbbrevs;
+
explicit Block(unsigned PCS) : PrevCodeSize(PCS) {}
};
/// This tracks the codesize of parent blocks.
SmallVector<Block, 8> BlockScope;
+ BitstreamBlockInfo *BlockInfo = nullptr;
public:
static const size_t MaxChunkSize = sizeof(word_t) * 8;
BitstreamCursor() = default;
-
- explicit BitstreamCursor(BitstreamReader &R) { init(&R); }
-
- void init(BitstreamReader *R) {
- freeState();
- SimpleBitstreamCursor::operator=(SimpleBitstreamCursor(R));
- CurCodeSize = 2;
- }
-
- void freeState();
+ explicit BitstreamCursor(ArrayRef<uint8_t> BitcodeBytes)
+ : SimpleBitstreamCursor(BitcodeBytes) {}
+ explicit BitstreamCursor(StringRef BitcodeBytes)
+ : SimpleBitstreamCursor(BitcodeBytes) {}
+ explicit BitstreamCursor(MemoryBufferRef BitcodeBytes)
+ : SimpleBitstreamCursor(BitcodeBytes) {}
using SimpleBitstreamCursor::canSkipToPos;
using SimpleBitstreamCursor::AtEndOfStream;
+ using SimpleBitstreamCursor::getBitcodeBytes;
using SimpleBitstreamCursor::GetCurrentBitNo;
using SimpleBitstreamCursor::getCurrentByteNo;
using SimpleBitstreamCursor::getPointerToByte;
- using SimpleBitstreamCursor::getBitStreamReader;
using SimpleBitstreamCursor::JumpToBit;
using SimpleBitstreamCursor::fillCurWord;
using SimpleBitstreamCursor::Read;
@@ -471,7 +369,10 @@ public:
/// Advance the current bitstream, returning the next entry in the stream.
BitstreamEntry advance(unsigned Flags = 0) {
- while (1) {
+ while (true) {
+ if (AtEndOfStream())
+ return BitstreamEntry::getError();
+
unsigned Code = ReadCode();
if (Code == bitc::END_BLOCK) {
// Pop the end of the block unless Flags tells us not to.
@@ -498,7 +399,7 @@ public:
/// This is a convenience function for clients that don't expect any
/// subblocks. This just skips over them automatically.
BitstreamEntry advanceSkippingSubblocks(unsigned Flags = 0) {
- while (1) {
+ while (true) {
// If we found a normal entry, return it.
BitstreamEntry Entry = advance(Flags);
if (Entry.Kind != BitstreamEntry::SubBlock)
@@ -514,7 +415,6 @@ public:
return Read(CurCodeSize);
}
-
// Block header:
// [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]
@@ -558,7 +458,6 @@ public:
}
private:
-
void popBlockScope() {
CurCodeSize = BlockScope.back().PrevCodeSize;
@@ -590,9 +489,19 @@ public:
//===--------------------------------------------------------------------===//
void ReadAbbrevRecord();
- bool ReadBlockInfoBlock();
+ /// Read and return a block info block from the bitstream. If an error was
+ /// encountered, return None.
+ ///
+ /// \param ReadBlockInfoNames Whether to read block/record name information in
+ /// the BlockInfo block. Only llvm-bcanalyzer uses this.
+ Optional<BitstreamBlockInfo>
+ ReadBlockInfoBlock(bool ReadBlockInfoNames = false);
+
+ /// Set the block info to be used by this BitstreamCursor to interpret
+ /// abbreviated records.
+ void setBlockInfo(BitstreamBlockInfo *BI) { BlockInfo = BI; }
};
-} // End llvm namespace
+} // end llvm namespace
-#endif
+#endif // LLVM_BITCODE_BITSTREAMREADER_H
diff --git a/include/llvm/Bitcode/BitstreamWriter.h b/include/llvm/Bitcode/BitstreamWriter.h
index d613f5e18954..8eb6e8aef7a2 100644
--- a/include/llvm/Bitcode/BitstreamWriter.h
+++ b/include/llvm/Bitcode/BitstreamWriter.h
@@ -112,6 +112,11 @@ public:
&Out[ByteNo], NewWord, BitNo & 7);
}
+ void BackpatchWord64(uint64_t BitNo, uint64_t Val) {
+ BackpatchWord(BitNo, (uint32_t)Val);
+ BackpatchWord(BitNo + 32, (uint32_t)(Val >> 32));
+ }
+
void Emit(uint32_t Val, unsigned NumBits) {
assert(NumBits && NumBits <= 32 && "Invalid value size!");
assert((Val & ~(~0U >> (32-NumBits))) == 0 && "High bits set!");
@@ -131,15 +136,6 @@ public:
CurBit = (CurBit+NumBits) & 31;
}
- void Emit64(uint64_t Val, unsigned NumBits) {
- if (NumBits <= 32)
- Emit((uint32_t)Val, NumBits);
- else {
- Emit((uint32_t)Val, 32);
- Emit((uint32_t)(Val >> 32), NumBits-32);
- }
- }
-
void FlushToWord() {
if (CurBit) {
WriteWord(CurValue);
@@ -506,9 +502,10 @@ public:
//===--------------------------------------------------------------------===//
/// EnterBlockInfoBlock - Start emitting the BLOCKINFO_BLOCK.
- void EnterBlockInfoBlock(unsigned CodeWidth) {
- EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, CodeWidth);
+ void EnterBlockInfoBlock() {
+ EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, 2);
BlockInfoCurBID = ~0U;
+ BlockInfoRecords.clear();
}
private:
/// SwitchToBlockID - If we aren't already talking about the specified block
diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h
index 52d4f01b7985..c996c38261c0 100644
--- a/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/include/llvm/Bitcode/LLVMBitCodes.h
@@ -120,9 +120,8 @@ enum AttributeCodes {
// FIXME: Remove `PARAMATTR_CODE_ENTRY_OLD' in 4.0
PARAMATTR_CODE_ENTRY_OLD = 1, // ENTRY: [paramidx0, attr0,
// paramidx1, attr1...]
- PARAMATTR_CODE_ENTRY = 2, // ENTRY: [paramidx0, attrgrp0,
- // paramidx1, attrgrp1, ...]
- PARAMATTR_GRP_CODE_ENTRY = 3 // ENTRY: [id, attr0, att1, ...]
+ PARAMATTR_CODE_ENTRY = 2, // ENTRY: [attrgrp0, attrgrp1, ...]
+ PARAMATTR_GRP_CODE_ENTRY = 3 // ENTRY: [grpid, idx, attr0, attr1, ...]
};
/// TYPE blocks have codes for each type primitive they use.
@@ -170,11 +169,6 @@ enum OperandBundleTagCode {
OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
};
-// The type symbol table only has one code (TST_ENTRY_CODE).
-enum TypeSymtabCodes {
- TST_CODE_ENTRY = 1 // TST_ENTRY: [typeid, namechar x N]
-};
-
// Value symbol table codes.
enum ValueSymtabCodes {
VST_CODE_ENTRY = 1, // VST_ENTRY: [valueid, namechar x N]
@@ -194,20 +188,20 @@ enum ModulePathSymtabCodes {
// and combined index cases.
enum GlobalValueSummarySymtabCodes {
// PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
- // n x (valueid, callsitecount)]
+ // n x (valueid)]
FS_PERMODULE = 1,
// PERMODULE_PROFILE: [valueid, flags, instcount, numrefs,
// numrefs x valueid,
- // n x (valueid, callsitecount, profilecount)]
+ // n x (valueid, hotness)]
FS_PERMODULE_PROFILE = 2,
// PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid]
FS_PERMODULE_GLOBALVAR_INIT_REFS = 3,
// COMBINED: [valueid, modid, flags, instcount, numrefs, numrefs x valueid,
- // n x (valueid, callsitecount)]
+ // n x (valueid)]
FS_COMBINED = 4,
// COMBINED_PROFILE: [valueid, modid, flags, instcount, numrefs,
// numrefs x valueid,
- // n x (valueid, callsitecount, profilecount)]
+ // n x (valueid, hotness)]
FS_COMBINED_PROFILE = 5,
// COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
FS_COMBINED_GLOBALVAR_INIT_REFS = 6,
@@ -219,45 +213,50 @@ enum GlobalValueSummarySymtabCodes {
FS_COMBINED_ORIGINAL_NAME = 9,
// VERSION of the summary, bumped when adding flags for instance.
FS_VERSION = 10,
+ // The list of llvm.type.test type identifiers used by the following function.
+ FS_TYPE_TESTS = 11,
};
enum MetadataCodes {
- METADATA_STRING_OLD = 1, // MDSTRING: [values]
- METADATA_VALUE = 2, // VALUE: [type num, value num]
- METADATA_NODE = 3, // NODE: [n x md num]
- METADATA_NAME = 4, // STRING: [values]
- METADATA_DISTINCT_NODE = 5, // DISTINCT_NODE: [n x md num]
- METADATA_KIND = 6, // [n x [id, name]]
- METADATA_LOCATION = 7, // [distinct, line, col, scope, inlined-at?]
- METADATA_OLD_NODE = 8, // OLD_NODE: [n x (type num, value num)]
- METADATA_OLD_FN_NODE = 9, // OLD_FN_NODE: [n x (type num, value num)]
- METADATA_NAMED_NODE = 10, // NAMED_NODE: [n x mdnodes]
- METADATA_ATTACHMENT = 11, // [m x [value, [n x [id, mdnode]]]
- METADATA_GENERIC_DEBUG = 12, // [distinct, tag, vers, header, n x md num]
- METADATA_SUBRANGE = 13, // [distinct, count, lo]
- METADATA_ENUMERATOR = 14, // [distinct, value, name]
- METADATA_BASIC_TYPE = 15, // [distinct, tag, name, size, align, enc]
- METADATA_FILE = 16, // [distinct, filename, directory]
- METADATA_DERIVED_TYPE = 17, // [distinct, ...]
- METADATA_COMPOSITE_TYPE = 18, // [distinct, ...]
- METADATA_SUBROUTINE_TYPE = 19, // [distinct, flags, types, cc]
- METADATA_COMPILE_UNIT = 20, // [distinct, ...]
- METADATA_SUBPROGRAM = 21, // [distinct, ...]
- METADATA_LEXICAL_BLOCK = 22, // [distinct, scope, file, line, column]
+ METADATA_STRING_OLD = 1, // MDSTRING: [values]
+ METADATA_VALUE = 2, // VALUE: [type num, value num]
+ METADATA_NODE = 3, // NODE: [n x md num]
+ METADATA_NAME = 4, // STRING: [values]
+ METADATA_DISTINCT_NODE = 5, // DISTINCT_NODE: [n x md num]
+ METADATA_KIND = 6, // [n x [id, name]]
+ METADATA_LOCATION = 7, // [distinct, line, col, scope, inlined-at?]
+ METADATA_OLD_NODE = 8, // OLD_NODE: [n x (type num, value num)]
+ METADATA_OLD_FN_NODE = 9, // OLD_FN_NODE: [n x (type num, value num)]
+ METADATA_NAMED_NODE = 10, // NAMED_NODE: [n x mdnodes]
+ METADATA_ATTACHMENT = 11, // [m x [value, [n x [id, mdnode]]]
+ METADATA_GENERIC_DEBUG = 12, // [distinct, tag, vers, header, n x md num]
+ METADATA_SUBRANGE = 13, // [distinct, count, lo]
+ METADATA_ENUMERATOR = 14, // [distinct, value, name]
+ METADATA_BASIC_TYPE = 15, // [distinct, tag, name, size, align, enc]
+ METADATA_FILE = 16, // [distinct, filename, directory, checksumkind, checksum]
+ METADATA_DERIVED_TYPE = 17, // [distinct, ...]
+ METADATA_COMPOSITE_TYPE = 18, // [distinct, ...]
+ METADATA_SUBROUTINE_TYPE = 19, // [distinct, flags, types, cc]
+ METADATA_COMPILE_UNIT = 20, // [distinct, ...]
+ METADATA_SUBPROGRAM = 21, // [distinct, ...]
+ METADATA_LEXICAL_BLOCK = 22, // [distinct, scope, file, line, column]
METADATA_LEXICAL_BLOCK_FILE = 23, //[distinct, scope, file, discriminator]
- METADATA_NAMESPACE = 24, // [distinct, scope, file, name, line]
- METADATA_TEMPLATE_TYPE = 25, // [distinct, scope, name, type, ...]
- METADATA_TEMPLATE_VALUE = 26, // [distinct, scope, name, type, value, ...]
- METADATA_GLOBAL_VAR = 27, // [distinct, ...]
- METADATA_LOCAL_VAR = 28, // [distinct, ...]
- METADATA_EXPRESSION = 29, // [distinct, n x element]
- METADATA_OBJC_PROPERTY = 30, // [distinct, name, file, line, ...]
+ METADATA_NAMESPACE = 24, // [distinct, scope, file, name, line, exportSymbols]
+ METADATA_TEMPLATE_TYPE = 25, // [distinct, scope, name, type, ...]
+ METADATA_TEMPLATE_VALUE = 26, // [distinct, scope, name, type, value, ...]
+ METADATA_GLOBAL_VAR = 27, // [distinct, ...]
+ METADATA_LOCAL_VAR = 28, // [distinct, ...]
+ METADATA_EXPRESSION = 29, // [distinct, n x element]
+ METADATA_OBJC_PROPERTY = 30, // [distinct, name, file, line, ...]
METADATA_IMPORTED_ENTITY = 31, // [distinct, tag, scope, entity, line, name]
METADATA_MODULE = 32, // [distinct, scope, name, ...]
METADATA_MACRO = 33, // [distinct, macinfo, line, name, value]
METADATA_MACRO_FILE = 34, // [distinct, macinfo, line, file, ...]
METADATA_STRINGS = 35, // [count, offset] blob([lengths][chars])
METADATA_GLOBAL_DECL_ATTACHMENT = 36, // [valueid, n x [id, mdnode]]
+ METADATA_GLOBAL_VAR_EXPR = 37, // [distinct, var, expr]
+ METADATA_INDEX_OFFSET = 38, // [offset]
+ METADATA_INDEX = 39, // [bitpos]
};
// The constants block (CONSTANTS_BLOCK_ID) describes emission for each
@@ -286,8 +285,9 @@ enum ConstantsCodes {
CST_CODE_CE_INBOUNDS_GEP = 20, // INBOUNDS_GEP: [n x operands]
CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
CST_CODE_DATA = 22, // DATA: [n x elements]
- CST_CODE_INLINEASM = 23 // INLINEASM: [sideeffect|alignstack|
+ CST_CODE_INLINEASM = 23, // INLINEASM: [sideeffect|alignstack|
// asmdialect,asmstr,conststr]
+ CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, // [opty, flags, n x operands]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
diff --git a/include/llvm/CodeGen/Analysis.h b/include/llvm/CodeGen/Analysis.h
index 2e4dc49a1e26..f20185c4499a 100644
--- a/include/llvm/CodeGen/Analysis.h
+++ b/include/llvm/CodeGen/Analysis.h
@@ -105,11 +105,21 @@ ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
/// This function only tests target-independent requirements.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM);
+/// Test if given that the input instruction is in the tail call position, if
+/// there is an attribute mismatch between the caller and the callee that will
+/// inhibit tail call optimizations.
+/// \p AllowDifferingSizes is an output parameter which, if forming a tail call
+/// is permitted, determines whether it's permitted only if the size of the
+/// caller's and callee's return types match exactly.
+bool attributesPermitTailCall(const Function *F, const Instruction *I,
+ const ReturnInst *Ret,
+ const TargetLoweringBase &TLI,
+ bool *AllowDifferingSizes = nullptr);
+
/// Test if given that the input instruction is in the tail call position if the
/// return type or any attributes of the function will inhibit tail call
/// optimization.
-bool returnTypeIsEligibleForTailCall(const Function *F,
- const Instruction *I,
+bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
const ReturnInst *Ret,
const TargetLoweringBase &TLI);
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index de618d173573..c1be46ddd7b5 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -89,10 +89,6 @@ public:
/// This is a pointer to the current MachineModuleInfo.
MachineModuleInfo *MMI;
- /// Name-mangler for global names.
- ///
- Mangler *Mang;
-
/// The symbol for the current function. This is recalculated at the beginning
/// of each call to runOnMachineFunction().
///
@@ -126,11 +122,16 @@ private:
struct HandlerInfo {
AsmPrinterHandler *Handler;
- const char *TimerName, *TimerGroupName;
+ const char *TimerName;
+ const char *TimerDescription;
+ const char *TimerGroupName;
+ const char *TimerGroupDescription;
HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
- const char *TimerGroupName)
+ const char *TimerDescription, const char *TimerGroupName,
+ const char *TimerGroupDescription)
: Handler(Handler), TimerName(TimerName),
- TimerGroupName(TimerGroupName) {}
+ TimerDescription(TimerDescription), TimerGroupName(TimerGroupName),
+ TimerGroupDescription(TimerGroupDescription) {}
};
/// A vector of all debug/EH info emitters we should use. This vector
/// maintains ownership of the emitters.
@@ -148,6 +149,9 @@ public:
DwarfDebug *getDwarfDebug() { return DD; }
DwarfDebug *getDwarfDebug() const { return DD; }
+ uint16_t getDwarfVersion() const;
+ void setDwarfVersion(uint16_t Version);
+
bool isPositionIndependent() const;
/// Return true if assembly output should contain comments.
@@ -176,9 +180,6 @@ public:
void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
- /// Return the target triple string.
- StringRef getTargetTriple() const;
-
/// Return the current section we are emitting to.
const MCSection *getCurrentSection() const;
@@ -188,6 +189,34 @@ public:
MCSymbol *getSymbol(const GlobalValue *GV) const;
//===------------------------------------------------------------------===//
+ // XRay instrumentation implementation.
+ //===------------------------------------------------------------------===//
+public:
+ // This describes the kind of sled we're storing in the XRay table.
+ enum class SledKind : uint8_t {
+ FUNCTION_ENTER = 0,
+ FUNCTION_EXIT = 1,
+ TAIL_CALL = 2,
+ };
+
+ // The table will contain these structs that point to the sled, the function
+ // containing the sled, and what kind of sled (and whether they should always
+ // be instrumented).
+ struct XRayFunctionEntry {
+ const MCSymbol *Sled;
+ const MCSymbol *Function;
+ SledKind Kind;
+ bool AlwaysInstrument;
+ const class Function *Fn;
+ };
+
+ // All the sleds to be emitted.
+ std::vector<XRayFunctionEntry> Sleds;
+
+ // Helper function to record a given XRay sled.
+ void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);
+
+ //===------------------------------------------------------------------===//
// MachineFunctionPass Implementation.
//===------------------------------------------------------------------===//
@@ -439,10 +468,6 @@ public:
/// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
virtual unsigned getISAEncoding() { return 0; }
- /// EmitDwarfRegOp - Emit a dwarf register operation.
- virtual void EmitDwarfRegOp(ByteStreamer &BS,
- const MachineLocation &MLoc) const;
-
//===------------------------------------------------------------------===//
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
diff --git a/include/llvm/CodeGen/BasicTTIImpl.h b/include/llvm/CodeGen/BasicTTIImpl.h
index 69951afb623c..df0dc1a38ae7 100644
--- a/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/include/llvm/CodeGen/BasicTTIImpl.h
@@ -60,8 +60,9 @@ private:
return Cost;
}
- /// Estimate the cost overhead of SK_Alternate shuffle.
- unsigned getAltShuffleOverhead(Type *Ty) {
+ /// Estimate a cost of shuffle as a sequence of extract and insert
+ /// operations.
+ unsigned getPermuteShuffleOverhead(Type *Ty) {
assert(Ty->isVectorTy() && "Can only shuffle vectors");
unsigned Cost = 0;
// Shuffle cost is equal to the cost of extracting element from its argument
@@ -97,18 +98,13 @@ protected:
using TargetTransformInfoImplBase::DL;
public:
- // Provide value semantics. MSVC requires that we spell all of these out.
- BasicTTIImplBase(const BasicTTIImplBase &Arg)
- : BaseT(static_cast<const BaseT &>(Arg)) {}
- BasicTTIImplBase(BasicTTIImplBase &&Arg)
- : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
-
/// \name Scalar TTI Implementations
/// @{
- bool allowsMisalignedMemoryAccesses(unsigned BitWidth, unsigned AddressSpace,
+ bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+ unsigned BitWidth, unsigned AddressSpace,
unsigned Alignment, bool *Fast) const {
- MVT M = MVT::getIntegerVT(BitWidth);
- return getTLI()->allowsMisalignedMemoryAccesses(M, AddressSpace, Alignment, Fast);
+ EVT E = EVT::getIntegerVT(Context, BitWidth);
+ return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
}
bool hasBranchDivergence() { return false; }
@@ -144,6 +140,10 @@ public:
return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
}
+ bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) {
+ return getTLI()->isFoldableMemAccessOffset(I, Offset);
+ }
+
bool isTruncateFree(Type *Ty1, Type *Ty2) {
return getTLI()->isTruncateFree(Ty1, Ty2);
}
@@ -279,8 +279,17 @@ public:
}
// Enable runtime and partial unrolling up to the specified size.
- UP.Partial = UP.Runtime = true;
- UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
+ // Enable using trip count upper bound to unroll loops.
+ UP.Partial = UP.Runtime = UP.UpperBound = true;
+ UP.PartialThreshold = MaxOps;
+
+ // Avoid unrolling when optimizing for size.
+ UP.OptSizeThreshold = 0;
+ UP.PartialOptSizeThreshold = 0;
+
+ // Set number of instructions optimized when "back edge"
+ // becomes "fall through" to default value of 2.
+ UP.BEInsns = 2;
}
/// @}
@@ -343,8 +352,9 @@ public:
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
- if (Kind == TTI::SK_Alternate) {
- return getAltShuffleOverhead(Tp);
+ if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
+ Kind == TTI::SK_PermuteSingleSrc) {
+ return getPermuteShuffleOverhead(Tp);
}
return 1;
}
@@ -919,16 +929,71 @@ public:
unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
assert(Ty->isVectorTy() && "Expect a vector type");
+ Type *ScalarTy = Ty->getVectorElementType();
unsigned NumVecElts = Ty->getVectorNumElements();
unsigned NumReduxLevels = Log2_32(NumVecElts);
- unsigned ArithCost =
- NumReduxLevels *
- static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
- // Assume the pairwise shuffles add a cost.
- unsigned ShuffleCost =
- NumReduxLevels * (IsPairwise + 1) *
- static_cast<T *>(this)
- ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
+ // Try to calculate arithmetic and shuffle op costs for reduction operations.
+ // We're assuming that reduction operation are performing the following way:
+ // 1. Non-pairwise reduction
+ // %val1 = shufflevector<n x t> %val, <n x t> %undef,
+ // <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
+ // \----------------v-------------/ \----------v------------/
+ // n/2 elements n/2 elements
+ // %red1 = op <n x t> %val, <n x t> val1
+ // After this operation we have a vector %red1 with only maningfull the
+ // first n/2 elements, the second n/2 elements are undefined and can be
+ // dropped. All other operations are actually working with the vector of
+ // length n/2, not n. though the real vector length is still n.
+ // %val2 = shufflevector<n x t> %red1, <n x t> %undef,
+ // <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
+ // \----------------v-------------/ \----------v------------/
+ // n/4 elements 3*n/4 elements
+ // %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
+ // length n/2, the resulting vector has length n/4 etc.
+ // 2. Pairwise reduction:
+ // Everything is the same except for an additional shuffle operation which
+ // is used to produce operands for pairwise kind of reductions.
+ // %val1 = shufflevector<n x t> %val, <n x t> %undef,
+ // <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
+ // \-------------v----------/ \----------v------------/
+ // n/2 elements n/2 elements
+ // %val2 = shufflevector<n x t> %val, <n x t> %undef,
+ // <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
+ // \-------------v----------/ \----------v------------/
+ // n/2 elements n/2 elements
+ // %red1 = op <n x t> %val1, <n x t> val2
+ // Again, the operation is performed on <n x t> vector, but the resulting
+ // vector %red1 is <n/2 x t> vector.
+ //
+ // The cost model should take into account that the actual length of the
+ // vector is reduced on each iteration.
+ unsigned ArithCost = 0;
+ unsigned ShuffleCost = 0;
+ auto *ConcreteTTI = static_cast<T *>(this);
+ std::pair<unsigned, MVT> LT =
+ ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
+ unsigned LongVectorCount = 0;
+ unsigned MVTLen =
+ LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
+ while (NumVecElts > MVTLen) {
+ NumVecElts /= 2;
+ // Assume the pairwise shuffles add a cost.
+ ShuffleCost += (IsPairwise + 1) *
+ ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+ NumVecElts, Ty);
+ ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
+ Ty = VectorType::get(ScalarTy, NumVecElts);
+ ++LongVectorCount;
+ }
+ // The minimal length of the vector is limited by the real length of vector
+ // operations performed on the current platform. That's why several final
+ // reduction opertions are perfomed on the vectors with the same
+ // architecture-dependent length.
+ ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
+ ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+ NumVecElts, Ty);
+ ArithCost += (NumReduxLevels - LongVectorCount) *
+ ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
}
@@ -951,13 +1016,6 @@ class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
public:
explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
-
- // Provide value semantics. MSVC requires that we spell all of these out.
- BasicTTIImpl(const BasicTTIImpl &Arg)
- : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
- BasicTTIImpl(BasicTTIImpl &&Arg)
- : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
- TLI(std::move(Arg.TLI)) {}
};
}
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
index 92e58564e040..bfbd22823eb8 100644
--- a/include/llvm/CodeGen/CallingConvLower.h
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -296,6 +296,12 @@ public:
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn);
+ /// The function will invoke AnalyzeFormalArguments.
+ void AnalyzeArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn) {
+ AnalyzeFormalArguments(Ins, Fn);
+ }
+
/// AnalyzeReturn - Analyze the returned values of a return,
/// incorporating info about the result values into this state.
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
@@ -318,11 +324,22 @@ public:
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn);
+ /// The function will invoke AnalyzeCallOperands.
+ void AnalyzeArguments(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn) {
+ AnalyzeCallOperands(Outs, Fn);
+ }
+
/// AnalyzeCallResult - Analyze the return values of a call,
/// incorporating info about the passed values into this state.
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn);
+ /// A shadow allocated register is a register that was allocated
+ /// but wasn't added to the location list (Locs).
+ /// \returns true if the register was allocated as shadow or false otherwise.
+ bool IsShadowAllocatedReg(unsigned Reg) const;
+
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.
void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
@@ -423,7 +440,7 @@ public:
void ensureMaxAlignment(unsigned Align) {
if (!AnalyzingMustTailForwardedRegs)
- MF.getFrameInfo()->ensureMaxAlignment(Align);
+ MF.getFrameInfo().ensureMaxAlignment(Align);
}
/// Version of AllocateStack with extra register to be shadowed.
@@ -521,6 +538,37 @@ public:
const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn CalleeFn, CCAssignFn CallerFn);
+ /// The function runs an additional analysis pass over function arguments.
+ /// It will mark each argument with the attribute flag SecArgPass.
+ /// After running, it will sort the locs list.
+ template <class T>
+ void AnalyzeArgumentsSecondPass(const SmallVectorImpl<T> &Args,
+ CCAssignFn Fn) {
+ unsigned NumFirstPassLocs = Locs.size();
+
+ /// Creates similar argument list to \p Args in which each argument is
+ /// marked using SecArgPass flag.
+ SmallVector<T, 16> SecPassArg;
+ // SmallVector<ISD::InputArg, 16> SecPassArg;
+ for (auto Arg : Args) {
+ Arg.Flags.setSecArgPass();
+ SecPassArg.push_back(Arg);
+ }
+
+ // Run the second argument pass
+ AnalyzeArguments(SecPassArg, Fn);
+
+ // Sort the locations of the arguments according to their original position.
+ SmallVector<CCValAssign, 16> TmpArgLocs;
+ std::swap(TmpArgLocs, Locs);
+ auto B = TmpArgLocs.begin(), E = TmpArgLocs.end();
+ std::merge(B, B + NumFirstPassLocs, B + NumFirstPassLocs, E,
+ std::back_inserter(Locs),
+ [](const CCValAssign &A, const CCValAssign &B) -> bool {
+ return A.getValNo() < B.getValNo();
+ });
+ }
+
private:
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void MarkAllocated(unsigned Reg);
diff --git a/include/llvm/CodeGen/CommandFlags.h b/include/llvm/CodeGen/CommandFlags.h
index 6376c06768b3..aab522d00de7 100644
--- a/include/llvm/CodeGen/CommandFlags.h
+++ b/include/llvm/CodeGen/CommandFlags.h
@@ -27,7 +27,6 @@
#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRecip.h"
#include <string>
using namespace llvm;
@@ -54,7 +53,12 @@ cl::opt<Reloc::Model> RelocModel(
"Fully relocatable, position independent code"),
clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
"Relocatable external references, non-relocatable code"),
- clEnumValEnd));
+ clEnumValN(Reloc::ROPI, "ropi",
+ "Code and read-only data relocatable, accessed PC-relative"),
+ clEnumValN(Reloc::RWPI, "rwpi",
+ "Read-write data relocatable, accessed relative to static base"),
+ clEnumValN(Reloc::ROPI_RWPI, "ropi-rwpi",
+ "Combination of ropi and rwpi")));
static inline Optional<Reloc::Model> getRelocModel() {
if (RelocModel.getNumOccurrences()) {
@@ -71,8 +75,7 @@ TMModel("thread-model",
cl::values(clEnumValN(ThreadModel::POSIX, "posix",
"POSIX thread model"),
clEnumValN(ThreadModel::Single, "single",
- "Single thread model"),
- clEnumValEnd));
+ "Single thread model")));
cl::opt<llvm::CodeModel::Model>
CMModel("code-model",
@@ -87,8 +90,7 @@ CMModel("code-model",
clEnumValN(CodeModel::Medium, "medium",
"Medium code model"),
clEnumValN(CodeModel::Large, "large",
- "Large code model"),
- clEnumValEnd));
+ "Large code model")));
cl::opt<llvm::ExceptionHandling>
ExceptionModel("exception-model",
@@ -103,8 +105,7 @@ ExceptionModel("exception-model",
clEnumValN(ExceptionHandling::ARM, "arm",
"ARM EHABI exceptions"),
clEnumValN(ExceptionHandling::WinEH, "wineh",
- "Windows exception model"),
- clEnumValEnd));
+ "Windows exception model")));
cl::opt<TargetMachine::CodeGenFileType>
FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
@@ -115,8 +116,7 @@ FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
"Emit a native object ('.o') file"),
clEnumValN(TargetMachine::CGFT_Null, "null",
- "Emit nothing, for performance testing"),
- clEnumValEnd));
+ "Emit nothing, for performance testing")));
cl::opt<bool>
EnableFPMAD("enable-fp-mad",
@@ -144,6 +144,25 @@ EnableNoNaNsFPMath("enable-no-nans-fp-math",
cl::init(false));
cl::opt<bool>
+EnableNoTrappingFPMath("enable-no-trapping-fp-math",
+ cl::desc("Enable setting the FP exceptions build "
+ "attribute not to use exceptions"),
+ cl::init(false));
+
+cl::opt<llvm::FPDenormal::DenormalMode>
+DenormalMode("denormal-fp-math",
+ cl::desc("Select which denormal numbers the code is permitted to require"),
+ cl::init(FPDenormal::IEEE),
+ cl::values(
+ clEnumValN(FPDenormal::IEEE, "ieee",
+ "IEEE 754 denormal numbers"),
+ clEnumValN(FPDenormal::PreserveSign, "preserve-sign",
+ "the sign of a flushed-to-zero number is preserved "
+ "in the sign of 0"),
+ clEnumValN(FPDenormal::PositiveZero, "positive-zero",
+ "denormals are flushed to positive zero")));
+
+cl::opt<bool>
EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
cl::Hidden,
cl::desc("Force codegen to assume rounding mode can change dynamically"),
@@ -159,8 +178,7 @@ FloatABIForCalls("float-abi",
clEnumValN(FloatABI::Soft, "soft",
"Soft float ABI (implied by -soft-float)"),
clEnumValN(FloatABI::Hard, "hard",
- "Hard float ABI (uses FP registers)"),
- clEnumValEnd));
+ "Hard float ABI (uses FP registers)")));
cl::opt<llvm::FPOpFusion::FPOpFusionMode>
FuseFPOps("fp-contract",
@@ -172,14 +190,7 @@ FuseFPOps("fp-contract",
clEnumValN(FPOpFusion::Standard, "on",
"Only fuse 'blessed' FP ops."),
clEnumValN(FPOpFusion::Strict, "off",
- "Only fuse FP ops when the result won't be affected."),
- clEnumValEnd));
-
-cl::list<std::string>
-ReciprocalOps("recip",
- cl::CommaSeparated,
- cl::desc("Choose reciprocal operation types and parameters."),
- cl::value_desc("all,none,default,divf,!vec-sqrtd,vec-divd:0,sqrt:9..."));
+ "Only fuse FP ops when the result won't be affected.")));
cl::opt<bool>
DontPlaceZerosInBSS("nozero-initialized-in-bss",
@@ -221,14 +232,10 @@ UseCtors("use-ctors",
cl::desc("Use .ctors instead of .init_array."),
cl::init(false));
-cl::opt<std::string> StopAfter("stop-after",
- cl::desc("Stop compilation after a specific pass"),
- cl::value_desc("pass-name"),
- cl::init(""));
-cl::opt<std::string> StartAfter("start-after",
- cl::desc("Resume compilation after a specific pass"),
- cl::value_desc("pass-name"),
- cl::init(""));
+cl::opt<bool> RelaxELFRelocations(
+ "relax-elf-relocations",
+ cl::desc("Emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL on x86-64 ELF"),
+ cl::init(false));
cl::opt<bool> DataSections("data-sections",
cl::desc("Emit data into separate sections"),
@@ -247,21 +254,6 @@ cl::opt<bool> UniqueSectionNames("unique-section-names",
cl::desc("Give unique names to every section"),
cl::init(true));
-cl::opt<llvm::JumpTable::JumpTableType>
-JTableType("jump-table-type",
- cl::desc("Choose the type of Jump-Instruction Table for jumptable."),
- cl::init(JumpTable::Single),
- cl::values(
- clEnumValN(JumpTable::Single, "single",
- "Create a single table for all jumptable functions"),
- clEnumValN(JumpTable::Arity, "arity",
- "Create one table per number of parameters."),
- clEnumValN(JumpTable::Simplified, "simplified",
- "Create one table per simplified function type."),
- clEnumValN(JumpTable::Full, "full",
- "Create one table per unique function type."),
- clEnumValEnd));
-
cl::opt<llvm::EABI> EABIVersion(
"meabi", cl::desc("Set EABI type (default depends on triple):"),
cl::init(EABI::Default),
@@ -269,7 +261,7 @@ cl::opt<llvm::EABI> EABIVersion(
"Triple default EABI version"),
clEnumValN(EABI::EABI4, "4", "EABI version 4"),
clEnumValN(EABI::EABI5, "5", "EABI version 5"),
- clEnumValN(EABI::GNU, "gnu", "EABI GNU"), clEnumValEnd));
+ clEnumValN(EABI::GNU, "gnu", "EABI GNU")));
cl::opt<DebuggerKind>
DebuggerTuningOpt("debugger-tune",
@@ -279,8 +271,7 @@ DebuggerTuningOpt("debugger-tune",
clEnumValN(DebuggerKind::GDB, "gdb", "gdb"),
clEnumValN(DebuggerKind::LLDB, "lldb", "lldb"),
clEnumValN(DebuggerKind::SCE, "sce",
- "SCE targets (e.g. PS4)"),
- clEnumValEnd));
+ "SCE targets (e.g. PS4)")));
// Common utility function tightly tied to the options listed here. Initializes
// a TargetOptions object with CodeGen flags and returns it.
@@ -288,10 +279,11 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
TargetOptions Options;
Options.LessPreciseFPMADOption = EnableFPMAD;
Options.AllowFPOpFusion = FuseFPOps;
- Options.Reciprocals = TargetRecip(ReciprocalOps);
Options.UnsafeFPMath = EnableUnsafeFPMath;
Options.NoInfsFPMath = EnableNoInfsFPMath;
Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+ Options.NoTrappingFPMath = EnableNoTrappingFPMath;
+ Options.FPDenormalMode = DenormalMode;
Options.HonorSignDependentRoundingFPMathOption =
EnableHonorSignDependentRoundingFPMath;
if (FloatABIForCalls != FloatABI::Default)
@@ -301,6 +293,7 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
Options.StackAlignmentOverride = OverrideStackAlignment;
Options.StackSymbolOrdering = StackSymbolOrdering;
Options.UseInitArray = !UseCtors;
+ Options.RelaxELFRelocations = RelaxELFRelocations;
Options.DataSections = DataSections;
Options.FunctionSections = FunctionSections;
Options.UniqueSectionNames = UniqueSectionNames;
@@ -308,7 +301,6 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
Options.ExceptionModel = ExceptionModel;
Options.MCOptions = InitMCTargetOptionsFromFlags();
- Options.JTType = JTableType;
Options.ThreadModel = TMModel;
Options.EABIVersion = EABIVersion;
@@ -385,7 +377,8 @@ static inline void setFunctionAttributes(StringRef CPU, StringRef Features,
if (F->getIntrinsicID() == Intrinsic::debugtrap ||
F->getIntrinsicID() == Intrinsic::trap)
Call->addAttribute(llvm::AttributeSet::FunctionIndex,
- "trap-func-name", TrapFuncName);
+ Attribute::get(Ctx, "trap-func-name",
+ TrapFuncName));
// Let NewAttrs override Attrs.
NewAttrs = Attrs.addAttributes(Ctx, AttributeSet::FunctionIndex, NewAttrs);
diff --git a/include/llvm/CodeGen/DIE.h b/include/llvm/CodeGen/DIE.h
index 7d6e66fa6ec2..1e3476cd8395 100644
--- a/include/llvm/CodeGen/DIE.h
+++ b/include/llvm/CodeGen/DIE.h
@@ -16,87 +16,96 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/Dwarf.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <vector>
namespace llvm {
+
class AsmPrinter;
+class DIE;
+class DIEUnit;
class MCExpr;
+class MCSection;
class MCSymbol;
class raw_ostream;
-class DwarfTypeUnit;
//===--------------------------------------------------------------------===//
-/// DIEAbbrevData - Dwarf abbreviation data, describes one attribute of a
-/// Dwarf abbreviation.
+/// Dwarf abbreviation data, describes one attribute of a Dwarf abbreviation.
class DIEAbbrevData {
- /// Attribute - Dwarf attribute code.
- ///
+ /// Dwarf attribute code.
dwarf::Attribute Attribute;
- /// Form - Dwarf form code.
- ///
+ /// Dwarf form code.
dwarf::Form Form;
public:
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) : Attribute(A), Form(F) {}
- // Accessors.
+ /// Accessors.
+ /// @{
dwarf::Attribute getAttribute() const { return Attribute; }
dwarf::Form getForm() const { return Form; }
+ /// @}
- /// Profile - Used to gather unique data for the abbreviation folding set.
- ///
+ /// Used to gather unique data for the abbreviation folding set.
void Profile(FoldingSetNodeID &ID) const;
};
//===--------------------------------------------------------------------===//
-/// DIEAbbrev - Dwarf abbreviation, describes the organization of a debug
-/// information object.
+/// Dwarf abbreviation, describes the organization of a debug information
+/// object.
class DIEAbbrev : public FoldingSetNode {
/// Unique number for node.
- ///
unsigned Number;
- /// Tag - Dwarf tag code.
- ///
+ /// Dwarf tag code.
dwarf::Tag Tag;
- /// Children - Whether or not this node has children.
+ /// Whether or not this node has children.
///
- // This cheats a bit in all of the uses since the values in the standard
- // are 0 and 1 for no children and children respectively.
+ /// This cheats a bit in all of the uses since the values in the standard
+ /// are 0 and 1 for no children and children respectively.
bool Children;
- /// Data - Raw data bytes for abbreviation.
- ///
+ /// Raw data bytes for abbreviation.
SmallVector<DIEAbbrevData, 12> Data;
public:
- DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C), Data() {}
+ DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C) {}
- // Accessors.
+ /// Accessors.
+ /// @{
dwarf::Tag getTag() const { return Tag; }
unsigned getNumber() const { return Number; }
bool hasChildren() const { return Children; }
const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
void setChildrenFlag(bool hasChild) { Children = hasChild; }
void setNumber(unsigned N) { Number = N; }
+ /// @}
- /// AddAttribute - Adds another set of attribute information to the
- /// abbreviation.
+ /// Adds another set of attribute information to the abbreviation.
void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
Data.push_back(DIEAbbrevData(Attribute, Form));
}
- /// Profile - Used to gather unique data for the abbreviation folding set.
- ///
+ /// Used to gather unique data for the abbreviation folding set.
void Profile(FoldingSetNodeID &ID) const;
- /// Emit - Print the abbreviation using the specified asm printer.
- ///
+ /// Print the abbreviation using the specified asm printer.
void Emit(const AsmPrinter *AP) const;
void print(raw_ostream &O);
@@ -104,7 +113,38 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIEInteger - An integer value DIE.
+/// Helps unique DIEAbbrev objects and assigns abbreviation numbers.
+///
+/// This class will unique the DIE abbreviations for a llvm::DIE object and
+/// assign a unique abbreviation number to each unique DIEAbbrev object it
+/// finds. The resulting collection of DIEAbbrev objects can then be emitted
+/// into the .debug_abbrev section.
+class DIEAbbrevSet {
+ /// The bump allocator to use when creating DIEAbbrev objects in the uniqued
+ /// storage container.
+ BumpPtrAllocator &Alloc;
+ /// \brief FoldingSet that uniques the abbreviations.
+ llvm::FoldingSet<DIEAbbrev> AbbreviationsSet;
+ /// A list of all the unique abbreviations in use.
+ std::vector<DIEAbbrev *> Abbreviations;
+
+public:
+ DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
+ ~DIEAbbrevSet();
+ /// Generate the abbreviation declaration for a DIE and return a pointer to
+ /// the generated abbreviation.
+ ///
+ /// \param Die the debug info entry to generate the abbreviation for.
+ /// \returns A reference to the uniqued abbreviation declaration that is
+ /// owned by this class.
+ DIEAbbrev &uniqueAbbreviation(DIE &Die);
+
+ /// Print all abbreviations using the specified asm printer.
+ void Emit(const AsmPrinter *AP, MCSection *Section) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// An integer value DIE.
///
class DIEInteger {
uint64_t Integer;
@@ -112,8 +152,7 @@ class DIEInteger {
public:
explicit DIEInteger(uint64_t I) : Integer(I) {}
- /// BestForm - Choose the best form for integer.
- ///
+ /// Choose the best form for integer.
static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
if (IsSigned) {
const int64_t SignedInt = Int;
@@ -144,16 +183,14 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIEExpr - An expression DIE.
-//
+/// An expression DIE.
class DIEExpr {
const MCExpr *Expr;
public:
explicit DIEExpr(const MCExpr *E) : Expr(E) {}
- /// getValue - Get MCExpr.
- ///
+ /// Get MCExpr.
const MCExpr *getValue() const { return Expr; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
@@ -163,16 +200,14 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIELabel - A label DIE.
-//
+/// A label DIE.
class DIELabel {
const MCSymbol *Label;
public:
explicit DIELabel(const MCSymbol *L) : Label(L) {}
- /// getValue - Get MCSymbol.
- ///
+ /// Get MCSymbol.
const MCSymbol *getValue() const { return Label; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
@@ -182,7 +217,7 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIEDelta - A simple label difference DIE.
+/// A simple label difference DIE.
///
class DIEDelta {
const MCSymbol *LabelHi;
@@ -198,15 +233,16 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIEString - A container for string values.
+/// A container for string pool string values.
///
+/// This class is used with the DW_FORM_strp and DW_FORM_GNU_str_index forms.
class DIEString {
DwarfStringPoolEntryRef S;
public:
DIEString(DwarfStringPoolEntryRef S) : S(S) {}
- /// getString - Grab the string out of the object.
+ /// Grab the string out of the object.
StringRef getString() const { return S.getString(); }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
@@ -216,9 +252,31 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIEEntry - A pointer to another debug information entry. An instance of
-/// this class can also be used as a proxy for a debug information entry not
-/// yet defined (ie. types.)
+/// A container for inline string values.
+///
+/// This class is used with the DW_FORM_string form.
+class DIEInlineString {
+ StringRef S;
+
+public:
+ template <typename Allocator>
+ explicit DIEInlineString(StringRef Str, Allocator &A) : S(Str.copy(A)) {}
+
+ ~DIEInlineString() = default;
+
+ /// Grab the string out of the object.
+ StringRef getString() const { return S; }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A pointer to another debug information entry. An instance of this class can
+/// also be used as a proxy for a debug information entry not yet defined
+/// (ie. types.)
class DIE;
class DIEEntry {
DIE *Entry;
@@ -230,30 +288,23 @@ public:
DIE &getEntry() const { return *Entry; }
- /// Returns size of a ref_addr entry.
- static unsigned getRefAddrSize(const AsmPrinter *AP);
-
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
- return Form == dwarf::DW_FORM_ref_addr ? getRefAddrSize(AP)
- : sizeof(int32_t);
- }
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
//===--------------------------------------------------------------------===//
-/// DIELocList - Represents a pointer to a location list in the debug_loc
+/// Represents a pointer to a location list in the debug_loc
/// section.
-//
class DIELocList {
- // Index into the .debug_loc vector.
+ /// Index into the .debug_loc vector.
size_t Index;
public:
DIELocList(size_t I) : Index(I) {}
- /// getValue - Grab the current index out.
+ /// Grab the current index out.
size_t getValue() const { return Index; }
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
@@ -263,9 +314,8 @@ public:
};
//===--------------------------------------------------------------------===//
-/// DIEValue - A debug information entry value. Some of these roughly correlate
+/// A debug information entry value. Some of these roughly correlate
/// to DWARF attribute classes.
-///
class DIEBlock;
class DIELoc;
class DIEValue {
@@ -277,8 +327,7 @@ public:
};
private:
- /// Ty - Type of data stored in the value.
- ///
+ /// Type of data stored in the value.
Type Ty = isNone;
dwarf::Attribute Attribute = (dwarf::Attribute)0;
dwarf::Form Form = (dwarf::Form)0;
@@ -355,9 +404,11 @@ private:
public:
DIEValue() = default;
+
DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) {
copyVal(X);
}
+
DIEValue &operator=(const DIEValue &X) {
destroyVal();
Ty = X.Ty;
@@ -366,6 +417,7 @@ public:
copyVal(X);
return *this;
}
+
~DIEValue() { destroyVal(); }
#define HANDLE_DIEVALUE_SMALL(T) \
@@ -381,11 +433,13 @@ public:
}
#include "llvm/CodeGen/DIEValue.def"
- // Accessors
+ /// Accessors.
+ /// @{
Type getType() const { return Ty; }
dwarf::Attribute getAttribute() const { return Attribute; }
dwarf::Form getForm() const { return Form; }
explicit operator bool() const { return Ty; }
+ /// @}
#define HANDLE_DIEVALUE_SMALL(T) \
const DIE##T &getDIE##T() const { \
@@ -399,12 +453,10 @@ public:
}
#include "llvm/CodeGen/DIEValue.def"
- /// EmitValue - Emit value via the Dwarf writer.
- ///
+ /// Emit value via the Dwarf writer.
void EmitValue(const AsmPrinter *AP) const;
- /// SizeOf - Return the size of a value in bytes.
- ///
+ /// Return the size of a value in bytes.
unsigned SizeOf(const AsmPrinter *AP) const;
void print(raw_ostream &O) const;
@@ -413,6 +465,7 @@ public:
struct IntrusiveBackListNode {
PointerIntPair<IntrusiveBackListNode *, 1> Next;
+
IntrusiveBackListNode() : Next(this, true) {}
IntrusiveBackListNode *getNext() const {
@@ -576,39 +629,34 @@ public:
}
value_range values() {
- return llvm::make_range(value_iterator(List.begin()),
- value_iterator(List.end()));
+ return make_range(value_iterator(List.begin()), value_iterator(List.end()));
}
const_value_range values() const {
- return llvm::make_range(const_value_iterator(List.begin()),
- const_value_iterator(List.end()));
+ return make_range(const_value_iterator(List.begin()),
+ const_value_iterator(List.end()));
}
};
//===--------------------------------------------------------------------===//
-/// DIE - A structured debug information entry. Has an abbreviation which
+/// A structured debug information entry. Has an abbreviation which
/// describes its organization.
class DIE : IntrusiveBackListNode, public DIEValueList {
friend class IntrusiveBackList<DIE>;
+ friend class DIEUnit;
- /// Offset - Offset in debug info section.
- ///
+ /// Dwarf unit relative offset.
unsigned Offset;
-
- /// Size - Size of instance + children.
- ///
+ /// Size of instance + children.
unsigned Size;
-
unsigned AbbrevNumber = ~0u;
-
- /// Tag - Dwarf tag code.
- ///
+ /// Dwarf tag code.
dwarf::Tag Tag = (dwarf::Tag)0;
-
/// Children DIEs.
IntrusiveBackList<DIE> Children;
- DIE *Parent = nullptr;
+ /// The owner is either the parent DIE for children of other DIEs, or a
+ /// DIEUnit which contains this DIE as its unit DIE.
+ PointerUnion<DIE *, DIEUnit *> Owner;
DIE() = delete;
explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag) {}
@@ -618,9 +666,15 @@ public:
return new (Alloc) DIE(Tag);
}
+ DIE(const DIE &RHS) = delete;
+ DIE(DIE &&RHS) = delete;
+ void operator=(const DIE &RHS) = delete;
+ void operator=(const DIE &&RHS) = delete;
+
// Accessors.
unsigned getAbbrevNumber() const { return AbbrevNumber; }
dwarf::Tag getTag() const { return Tag; }
+ /// Get the compile/type unit relative offset of this DIE.
unsigned getOffset() const { return Offset; }
unsigned getSize() const { return Size; }
bool hasChildren() const { return !Children.empty(); }
@@ -631,13 +685,13 @@ public:
typedef iterator_range<const_child_iterator> const_child_range;
child_range children() {
- return llvm::make_range(Children.begin(), Children.end());
+ return make_range(Children.begin(), Children.end());
}
const_child_range children() const {
- return llvm::make_range(Children.begin(), Children.end());
+ return make_range(Children.begin(), Children.end());
}
- DIE *getParent() const { return Parent; }
+ DIE *getParent() const;
/// Generate the abbreviation for this DIE.
///
@@ -648,19 +702,50 @@ public:
/// Set the abbreviation number for this DIE.
void setAbbrevNumber(unsigned I) { AbbrevNumber = I; }
- /// Climb up the parent chain to get the compile or type unit DIE this DIE
- /// belongs to.
- const DIE *getUnit() const;
- /// Similar to getUnit, returns null when DIE is not added to an
- /// owner yet.
- const DIE *getUnitOrNull() const;
+ /// Get the absolute offset within the .debug_info or .debug_types section
+ /// for this DIE.
+ unsigned getDebugSectionOffset() const;
+
+ /// Compute the offset of this DIE and all its children.
+ ///
+ /// This function gets called just before we are going to generate the debug
+ /// information and gives each DIE a chance to figure out its CU relative DIE
+ /// offset, unique its abbreviation and fill in the abbreviation code, and
+ /// return the unit offset that points to where the next DIE will be emitted
+ /// within the debug unit section. After this function has been called for all
+ /// DIE objects, the DWARF can be generated since all DIEs will be able to
+ /// properly refer to other DIE objects since all DIEs have calculated their
+ /// offsets.
+ ///
+ /// \param AP AsmPrinter to use when calculating sizes.
+ /// \param AbbrevSet the abbreviation used to unique DIE abbreviations.
+ /// \param CUOffset the compile/type unit relative offset in bytes.
+ /// \returns the offset for the DIE that follows this DIE within the
+ /// current compile/type unit.
+ unsigned computeOffsetsAndAbbrevs(const AsmPrinter *AP,
+ DIEAbbrevSet &AbbrevSet, unsigned CUOffset);
+
+ /// Climb up the parent chain to get the compile unit or type unit DIE that
+ /// this DIE belongs to.
+ ///
+ /// \returns the compile or type unit DIE that owns this DIE, or NULL if
+ /// this DIE hasn't been added to a unit DIE.
+ const DIE *getUnitDie() const;
+
+ /// Climb up the parent chain to get the compile unit or type unit that this
+ /// DIE belongs to.
+ ///
+ /// \returns the DIEUnit that represents the compile or type unit that owns
+ /// this DIE, or NULL if this DIE hasn't been added to a unit DIE.
+ const DIEUnit *getUnit() const;
+
void setOffset(unsigned O) { Offset = O; }
void setSize(unsigned S) { Size = S; }
/// Add a child to the DIE.
DIE &addChild(DIE *Child) {
assert(!Child->getParent() && "Child should be orphaned");
- Child->Parent = this;
+ Child->Owner = this;
Children.push_back(*Child);
return Children.back();
}
@@ -676,6 +761,52 @@ public:
};
//===--------------------------------------------------------------------===//
+/// Represents a compile or type unit.
+class DIEUnit {
+ /// The compile unit or type unit DIE. This variable must be an instance of
+ /// DIE so that we can calculate the DIEUnit from any DIE by traversing the
+ /// parent backchain and getting the Unit DIE, and then casting itself to a
+ /// DIEUnit. This allows us to be able to find the DIEUnit for any DIE without
+ /// having to store a pointer to the DIEUnit in each DIE instance.
+ DIE Die;
+ /// The section this unit will be emitted in. This may or may not be set to
+ /// a valid section depending on the client that is emitting DWARF.
+ MCSection *Section;
+ uint64_t Offset; /// .debug_info or .debug_types absolute section offset.
+ uint32_t Length; /// The length in bytes of all of the DIEs in this unit.
+ const uint16_t Version; /// The Dwarf version number for this unit.
+ const uint8_t AddrSize; /// The size in bytes of an address for this unit.
+public:
+ DIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag);
+ DIEUnit(const DIEUnit &RHS) = delete;
+ DIEUnit(DIEUnit &&RHS) = delete;
+ void operator=(const DIEUnit &RHS) = delete;
+ void operator=(const DIEUnit &&RHS) = delete;
+ /// Set the section that this DIEUnit will be emitted into.
+ ///
+ /// This function is used by some clients to set the section. Not all clients
+ /// that emit DWARF use this section variable.
+ void setSection(MCSection *Section) {
+ assert(!this->Section);
+ this->Section = Section;
+ }
+
+ /// Return the section that this DIEUnit will be emitted into.
+ ///
+ /// \returns Section pointer which can be NULL.
+ MCSection *getSection() const { return Section; }
+ void setDebugSectionOffset(unsigned O) { Offset = O; }
+ unsigned getDebugSectionOffset() const { return Offset; }
+ void setLength(uint64_t L) { Length = L; }
+ uint64_t getLength() const { return Length; }
+ uint16_t getDwarfVersion() const { return Version; }
+ uint16_t getAddressSize() const { return AddrSize; }
+ DIE &getUnitDie() { return Die; }
+ const DIE &getUnitDie() const { return Die; }
+};
+
+
+//===--------------------------------------------------------------------===//
/// DIELoc - Represents an expression location.
//
class DIELoc : public DIEValueList {
@@ -740,6 +871,6 @@ public:
void print(raw_ostream &O) const;
};
-} // end llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
diff --git a/include/llvm/CodeGen/DIEValue.def b/include/llvm/CodeGen/DIEValue.def
index c5ff4010b2e4..a3fce9b1d20c 100644
--- a/include/llvm/CodeGen/DIEValue.def
+++ b/include/llvm/CodeGen/DIEValue.def
@@ -40,6 +40,7 @@ HANDLE_DIEVALUE_SMALL(Entry)
HANDLE_DIEVALUE_LARGE(Block)
HANDLE_DIEVALUE_LARGE(Loc)
HANDLE_DIEVALUE_SMALL(LocList)
+HANDLE_DIEVALUE_LARGE(InlineString)
#undef HANDLE_DIEVALUE
#undef HANDLE_DIEVALUE_SMALL
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 4bff48de38e4..cdaea250c33b 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -154,7 +154,7 @@ public:
CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
CallingConv::ID CC, Type *ResultTy,
- const char *Target, ArgListTy &&ArgsList,
+ StringRef Target, ArgListTy &&ArgsList,
unsigned FixedArgs = ~0U);
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
@@ -356,19 +356,6 @@ protected:
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
- /// \brief This method is called by target-independent code to request that an
- /// instruction with the given type, opcode, and register and floating-point
- /// immediate operands be emitted.
- virtual unsigned fastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
- bool Op0IsKill, const ConstantFP *FPImm);
-
- /// \brief This method is called by target-independent code to request that an
- /// instruction with the given type, opcode, and register and immediate
- /// operands be emitted.
- virtual unsigned fastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
- unsigned Op0, bool Op0IsKill, unsigned Op1,
- bool Op1IsKill, uint64_t Imm);
-
/// \brief This method is a wrapper of fastEmit_ri.
///
/// It first tries to emit an instruction with an immediate operand using
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index 010e34179efc..75cd7da9d6b9 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -33,7 +33,6 @@ namespace llvm {
class AllocaInst;
class BasicBlock;
class BranchProbabilityInfo;
-class CallInst;
class Function;
class GlobalVariable;
class Instruction;
@@ -72,36 +71,36 @@ public:
/// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
- typedef SmallVector<unsigned, 1> SwiftErrorVRegs;
+ /// A map from swifterror value in a basic block to the virtual register it is
+ /// currently represented by.
+ llvm::DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+ SwiftErrorVRegDefMap;
+
+ /// A list of upward exposed vreg uses that need to be satisfied by either a
+ /// copy def or a phi node at the beginning of the basic block representing
+ /// the predecessor(s) swifterror value.
+ llvm::DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+ SwiftErrorVRegUpwardsUse;
+
+ /// The swifterror argument of the current function.
+ const Value *SwiftErrorArg;
+
typedef SmallVector<const Value*, 1> SwiftErrorValues;
/// A function can only have a single swifterror argument. And if it does
/// have a swifterror argument, it must be the first entry in
/// SwiftErrorVals.
SwiftErrorValues SwiftErrorVals;
- /// Track the virtual register for each swifterror value in a given basic
- /// block. Entries in SwiftErrorVRegs have the same ordering as entries
- /// in SwiftErrorVals.
- /// Note that another choice that is more straight-forward is to use
- /// Map<const MachineBasicBlock*, Map<Value*, unsigned/*VReg*/>>. It
- /// maintains a map from swifterror values to virtual registers for each
- /// machine basic block. This choice does not require a one-to-one
- /// correspondence between SwiftErrorValues and SwiftErrorVRegs. But because
- /// of efficiency concern, we do not choose it.
- llvm::DenseMap<const MachineBasicBlock*, SwiftErrorVRegs> SwiftErrorMap;
-
- /// Track the virtual register for each swifterror value at the end of a basic
- /// block when we need the assignment of a virtual register before the basic
- /// block is visited. When we actually visit the basic block, we will make
- /// sure the swifterror value is in the correct virtual register.
- llvm::DenseMap<const MachineBasicBlock*, SwiftErrorVRegs>
- SwiftErrorWorklist;
-
- /// Find the swifterror virtual register in SwiftErrorMap. We will assert
- /// failure when the value does not exist in swifterror map.
- unsigned findSwiftErrorVReg(const MachineBasicBlock*, const Value*) const;
- /// Set the swifterror virtual register in SwiftErrorMap.
- void setSwiftErrorVReg(const MachineBasicBlock *MBB, const Value*, unsigned);
+
+ /// Get or create the swifterror value virtual register in
+ /// SwiftErrorVRegDefMap for this basic block.
+ unsigned getOrCreateSwiftErrorVReg(const MachineBasicBlock *,
+ const Value *);
+
+ /// Set the swifterror virtual register in the SwiftErrorVRegDefMap for this
+ /// basic block.
+ void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *,
+ unsigned);
/// ValueMap - Since we emit code for the function a basic block at a time,
/// we must remember which virtual registers hold the values for
@@ -297,18 +296,6 @@ private:
IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
};
-/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
-/// being passed to this variadic function, and set the MachineModuleInfo's
-/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
-/// reference to _fltused on Windows, which will link in MSVCRT's
-/// floating-point support.
-void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI);
-
-/// AddLandingPadInfo - Extract the exception handling information from the
-/// landingpad instruction and add them to the specified machine module info.
-void AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
- MachineBasicBlock *MBB);
-
} // end namespace llvm
#endif
diff --git a/include/llvm/CodeGen/GlobalISel/CallLowering.h b/include/llvm/CodeGen/GlobalISel/CallLowering.h
index bbd0b6d88593..0b157bf937a3 100644
--- a/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -16,17 +16,70 @@
#define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/Target/TargetCallingConv.h"
namespace llvm {
// Forward declarations.
class MachineIRBuilder;
+class MachineOperand;
class TargetLowering;
class Value;
class CallLowering {
const TargetLowering *TLI;
- protected:
+public:
+ struct ArgInfo {
+ unsigned Reg;
+ Type *Ty;
+ ISD::ArgFlagsTy Flags;
+
+ ArgInfo(unsigned Reg, Type *Ty, ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{})
+ : Reg(Reg), Ty(Ty), Flags(Flags) {}
+ };
+
+ /// Argument handling is mostly uniform between the four places that
+ /// make these decisions: function formal arguments, call
+ /// instruction args, call instruction returns and function
+ /// returns. However, once a decision has been made on where an
+ /// arugment should go, exactly what happens can vary slightly. This
+ /// class abstracts the differences.
+ struct ValueHandler {
+ /// Materialize a VReg containing the address of the specified
+ /// stack-based object. This is either based on a FrameIndex or
+ /// direct SP manipulation, depending on the context. \p MPO
+ /// should be initialized to an appropriate description of the
+ /// address created.
+ virtual unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ MachinePointerInfo &MPO) = 0;
+
+ /// The specified value has been assigned to a physical register,
+ /// handle the appropriate COPY (either to or from) and mark any
+ /// relevant uses/defines as needed.
+ virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ CCValAssign &VA) = 0;
+
+ /// The specified value has been assigned to a stack
+ /// location. Load or store it there, with appropriate extension
+ /// if necessary.
+ virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr,
+ uint64_t Size, MachinePointerInfo &MPO,
+ CCValAssign &VA) = 0;
+
+ unsigned extendRegister(unsigned ValReg, CCValAssign &VA);
+
+ ValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
+ : MIRBuilder(MIRBuilder), MRI(MRI) {}
+
+ virtual ~ValueHandler() {}
+
+ MachineIRBuilder &MIRBuilder;
+ MachineRegisterInfo &MRI;
+ };
+
+protected:
/// Getter for generic TargetLowering class.
const TargetLowering *getTLI() const {
return TLI;
@@ -37,7 +90,20 @@ class CallLowering {
const XXXTargetLowering *getTLI() const {
return static_cast<const XXXTargetLowering *>(TLI);
}
- public:
+
+
+ template <typename FuncInfoTy>
+ void setArgFlags(ArgInfo &Arg, unsigned OpNum, const DataLayout &DL,
+ const FuncInfoTy &FuncInfo) const;
+
+ /// Invoke the \p AssignFn on each of the given \p Args and then use
+ /// \p Callback to move them to the assigned locations.
+ ///
+ /// \return True if everything has succeeded, false otherwise.
+ bool handleAssignments(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn,
+ ArrayRef<ArgInfo> Args, ValueHandler &Callback) const;
+
+public:
CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
virtual ~CallLowering() {}
@@ -46,8 +112,8 @@ class CallLowering {
/// This hook is used by GlobalISel.
///
/// \return True if the lowering succeeds, false otherwise.
- virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- unsigned VReg) const {
+ virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
+ const Value *Val, unsigned VReg) const {
return false;
}
@@ -60,12 +126,54 @@ class CallLowering {
/// lowering.
///
/// \return True if the lowering succeeded, false otherwise.
- virtual bool
- lowerFormalArguments(MachineIRBuilder &MIRBuilder,
- const Function::ArgumentListType &Args,
- const SmallVectorImpl<unsigned> &VRegs) const {
+ virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+ const Function &F,
+ ArrayRef<unsigned> VRegs) const {
return false;
}
+
+ /// This hook must be implemented to lower the given call instruction,
+ /// including argument and return value marshalling.
+ ///
+ /// \p Callee is the destination of the call. It should be either a register,
+ /// globaladdress, or externalsymbol.
+ ///
+ /// \p ResTy is the type returned by the function
+ ///
+ /// \p ResReg is the generic virtual register that the returned
+ /// value should be lowered into.
+ ///
+ /// \p ArgTys is a list of the types each member of \p ArgRegs has; used by
+ /// the target to decide which register/stack slot should be allocated.
+ ///
+ /// \p ArgRegs is a list of virtual registers containing each argument that
+ /// needs to be passed.
+ ///
+ /// \return true if the lowering succeeded, false otherwise.
+ virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
+ const MachineOperand &Callee, const ArgInfo &OrigRet,
+ ArrayRef<ArgInfo> OrigArgs) const {
+ return false;
+ }
+
+ /// This hook must be implemented to lower the given call instruction,
+ /// including argument and return value marshalling.
+ ///
+ /// \p ResReg is a register where the call's return value should be stored (or
+ /// 0 if there is no return value).
+ ///
+ /// \p ArgRegs is a list of virtual registers containing each argument that
+ /// needs to be passed.
+ ///
+ /// \p GetCalleeReg is a callback to materialize a register for the callee if
+ /// the target determines it cannot jump to the destination based purely on \p
+ /// CI. This might be because \p CI is indirect, or because of the limited
+ /// range of an immediate jump.
+ ///
+ /// \return true if the lowering succeeded, false otherwise.
+ virtual bool lowerCall(MachineIRBuilder &MIRBuilder, const CallInst &CI,
+ unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+ std::function<unsigned()> GetCalleeReg) const;
};
} // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/GISelAccessor.h b/include/llvm/CodeGen/GlobalISel/GISelAccessor.h
index 7c5ec9f3adc0..8dea38059ea4 100644
--- a/include/llvm/CodeGen/GlobalISel/GISelAccessor.h
+++ b/include/llvm/CodeGen/GlobalISel/GISelAccessor.h
@@ -17,6 +17,8 @@
namespace llvm {
class CallLowering;
+class InstructionSelector;
+class LegalizerInfo;
class RegisterBankInfo;
/// The goal of this helper class is to gather the accessor to all
@@ -27,6 +29,10 @@ class RegisterBankInfo;
struct GISelAccessor {
virtual ~GISelAccessor() {}
virtual const CallLowering *getCallLowering() const { return nullptr;}
+ virtual const InstructionSelector *getInstructionSelector() const {
+ return nullptr;
+ }
+ virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }
virtual const RegisterBankInfo *getRegBankInfo() const { return nullptr;}
};
} // End namespace llvm;
diff --git a/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 833e87493cad..76e0d47ceea3 100644
--- a/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -35,6 +35,7 @@ class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineRegisterInfo;
+class TargetPassConfig;
// Technically the pass should run on an hypothetical MachineModule,
// since it should translate Global into some sort of MachineGlobal.
@@ -64,14 +65,25 @@ private:
// do not appear in that map.
SmallSetVector<const Constant *, 8> Constants;
+ // N.b. it's not completely obvious that this will be sufficient for every
+ // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
+ // lives.
DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
+ // List of stubbed PHI instructions, for values and basic blocks to be filled
+ // in once all MachineBasicBlocks have been created.
+ SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
+
+ /// Record of what frame index has been allocated to specified allocas for
+ /// this function.
+ DenseMap<const AllocaInst *, int> FrameIndices;
+
/// Methods for translating form LLVM IR to MachineInstr.
/// \see ::translate for general information on the translate methods.
/// @{
/// Translate \p Inst into its corresponding MachineInstr instruction(s).
- /// Insert the newly translated instruction(s) right where the MIRBuilder
+ /// Insert the newly translated instruction(s) right where the CurBuilder
/// is set.
///
/// The general algorithm is:
@@ -93,51 +105,305 @@ private:
/// \return true if the translation succeeded.
bool translate(const Instruction &Inst);
+ /// Materialize \p C into virtual-register \p Reg. The generic instructions
+ /// performing this materialization will be inserted into the entry block of
+ /// the function.
+ ///
+ /// \return true if the materialization succeeded.
+ bool translate(const Constant &C, unsigned Reg);
+
+ /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
+ /// emitted.
+ bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an LLVM load instruction into generic IR.
+ bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an LLVM store instruction into generic IR.
+ bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateMemcpy(const CallInst &CI, MachineIRBuilder &MIRBuilder);
+
+ void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
+
+ bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
+ MachineIRBuilder &MIRBuilder);
+
+ bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Translate call instruction.
+ /// \pre \p U is a call instruction.
+ bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate one of LLVM's cast instructions into MachineInstrs, with the
+ /// given generic Opcode.
+ bool translateCast(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Translate static alloca instruction (i.e. one of constant size and in the
+ /// first basic block).
+ bool translateStaticAlloca(const AllocaInst &Inst,
+ MachineIRBuilder &MIRBuilder);
+
+ /// Translate a phi instruction.
+ bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate a comparison (icmp or fcmp) instruction or constant.
+ bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
+
+ /// Translate an integer compare instruction (or constant).
+ bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCompare(U, MIRBuilder);
+ }
+
+ /// Translate a floating-point compare instruction (or constant).
+ bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCompare(U, MIRBuilder);
+ }
+
+
+ /// Add remaining operands onto phis we've translated. Executed after all
+ /// MachineBasicBlocks for the function have been created.
+ void finishPendingPhis();
+
/// Translate \p Inst into a binary operation \p Opcode.
- /// \pre \p Inst is a binary operation.
- bool translateBinaryOp(unsigned Opcode, const Instruction &Inst);
+ /// \pre \p U is a binary operation.
+ bool translateBinaryOp(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
/// Translate branch (br) instruction.
- /// \pre \p Inst is a branch instruction.
- bool translateBr(const Instruction &Inst);
+ /// \pre \p U is a branch instruction.
+ bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
/// Translate return (ret) instruction.
/// The target needs to implement CallLowering::lowerReturn for
/// this to succeed.
- /// \pre \p Inst is a return instruction.
- bool translateReturn(const Instruction &Inst);
+ /// \pre \p U is a return instruction.
+ bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
+
+ bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
+ }
+ bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
+ }
+ bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
+ }
+ bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
+ }
+ bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
+ }
+ bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
+ }
+
+ bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
+ }
+ bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
+ }
+ bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
+ }
+ bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
+ }
+ bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateStaticAlloca(cast<AllocaInst>(U), MIRBuilder);
+ }
+ bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
+ }
+ bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
+ }
+ bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
+ }
+ bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
+ }
+ bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
+ }
+ bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
+ }
+ bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
+ }
+ bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
+ }
+ bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
+ }
+ bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
+ return true;
+ }
+ bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
+ }
+
+ bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
+ }
+
+ bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
+ }
+ bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
+ }
+ bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
+ }
+
+ bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
+ }
+ bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
+ }
+ bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
+ }
+ bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
+ }
+ bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
+ }
+
+
+ // Stubs to keep the compiler happy while we implement the rest of the
+ // translation.
+ bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+ bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder) {
+ return false;
+ }
+
/// @}
// Builder for machine instruction a la IRBuilder.
// I.e., compared to regular MIBuilder, this one also inserts the instruction
// in the current block, it can creates block, etc., basically a kind of
// IRBuilder, but for Machine IR.
- MachineIRBuilder MIRBuilder;
+ MachineIRBuilder CurBuilder;
+
+ // Builder set to the entry block (just after ABI lowering instructions). Used
+ // as a convenient location for Constants.
+ MachineIRBuilder EntryBuilder;
+
+ // The MachineFunction currently being translated.
+ MachineFunction *MF;
/// MachineRegisterInfo used to create virtual registers.
MachineRegisterInfo *MRI;
+ const DataLayout *DL;
+
+ /// Current target configuration. Controls how the pass handles errors.
+ const TargetPassConfig *TPC;
+
// * Insert all the code needed to materialize the constants
// at the proper place. E.g., Entry block or dominator block
// of each constant depending on how fancy we want to be.
// * Clear the different maps.
- void finalize();
+ void finalizeFunction();
/// Get the VReg that represents \p Val.
/// If such VReg does not exist, it is created.
unsigned getOrCreateVReg(const Value &Val);
+ /// Get the frame index that represents \p Val.
+ /// If such VReg does not exist, it is created.
+ int getOrCreateFrameIndex(const AllocaInst &AI);
+
+ /// Get the alignment of the given memory operation instruction. This will
+ /// either be the explicitly specified value or the ABI-required alignment for
+ /// the type being accessed (according to the Module's DataLayout).
+ unsigned getMemOpAlignment(const Instruction &I);
+
/// Get the MachineBasicBlock that represents \p BB.
/// If such basic block does not exist, it is created.
MachineBasicBlock &getOrCreateBB(const BasicBlock &BB);
+
public:
// Ctor, nothing fancy.
IRTranslator();
- const char *getPassName() const override {
- return "IRTranslator";
- }
+ StringRef getPassName() const override { return "IRTranslator"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
// Algo:
// CallLowering = MF.subtarget.getCallLowering()
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelect.h b/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
new file mode 100644
index 000000000000..01521c46ab6a
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
@@ -0,0 +1,53 @@
+//== llvm/CodeGen/GlobalISel/InstructionSelect.h -----------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for selecting (possibly generic) machine instructions to
+/// target-specific instructions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+/// This pass is responsible for selecting generic machine instructions to
+/// target-specific instructions. It relies on the InstructionSelector provided
+/// by the target.
+/// Selection is done by examining blocks in post-order, and instructions in
+/// reverse order.
+///
+/// \post for all inst in MF: not isPreISelGenericOpcode(inst.opcode)
+class InstructionSelect : public MachineFunctionPass {
+public:
+ static char ID;
+ StringRef getPassName() const override { return "InstructionSelect"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA)
+ .set(MachineFunctionProperties::Property::Legalized)
+ .set(MachineFunctionProperties::Property::RegBankSelected);
+ }
+
+ MachineFunctionProperties getSetProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::Selected);
+ }
+
+ InstructionSelect();
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
new file mode 100644
index 000000000000..63b4f7b9507f
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -0,0 +1,63 @@
+//==-- llvm/CodeGen/GlobalISel/InstructionSelector.h -------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+
+namespace llvm {
+class MachineInstr;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
+/// Provides the logic to select generic machine instructions.
+class InstructionSelector {
+public:
+ virtual ~InstructionSelector() {}
+
+ /// Select the (possibly generic) instruction \p I to only use target-specific
+ /// opcodes. It is OK to insert multiple instructions, but they cannot be
+ /// generic pre-isel instructions.
+ ///
+ /// \returns whether selection succeeded.
+ /// \pre I.getParent() && I.getParent()->getParent()
+ /// \post
+ /// if returns true:
+ /// for I in all mutated/inserted instructions:
+ /// !isPreISelGenericOpcode(I.getOpcode())
+ ///
+ virtual bool select(MachineInstr &I) const = 0;
+
+protected:
+ InstructionSelector();
+
+ /// Mutate the newly-selected instruction \p I to constrain its (possibly
+ /// generic) virtual register operands to the instruction's register class.
+ /// This could involve inserting COPYs before (for uses) or after (for defs).
+ /// This requires the number of operands to match the instruction description.
+ /// \returns whether operand regclass constraining succeeded.
+ ///
+ // FIXME: Not all instructions have the same number of operands. We should
+ // probably expose a constrain helper per operand and let the target selector
+ // constrain individual registers, like fast-isel.
+ bool constrainSelectedInstRegOperands(MachineInstr &I,
+ const TargetInstrInfo &TII,
+ const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI) const;
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/Legalizer.h b/include/llvm/CodeGen/GlobalISel/Legalizer.h
new file mode 100644
index 000000000000..8284ab6dac65
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -0,0 +1,65 @@
+//== llvm/CodeGen/GlobalISel/LegalizePass.h ------------- -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizeHelper class is where most of the work happens, and is designed
+/// to be callable from other passes that find themselves with an illegal
+/// instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+
+class Legalizer : public MachineFunctionPass {
+public:
+ static char ID;
+
+private:
+
+ /// Initialize the field members using \p MF.
+ void init(MachineFunction &MF);
+
+public:
+ // Ctor, nothing fancy.
+ Legalizer();
+
+ StringRef getPassName() const override { return "Legalizer"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
+
+ MachineFunctionProperties getSetProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::Legalized);
+ }
+
+ bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
+ const TargetInstrInfo &TII);
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
new file mode 100644
index 000000000000..56c444ca46be
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -0,0 +1,104 @@
+//== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizerHelper class is where most of the work happens, and is
+/// designed to be callable from other passes that find themselves with an
+/// illegal instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/LowLevelType.h"
+
+namespace llvm {
+// Forward declarations.
+class LegalizerInfo;
+class Legalizer;
+class MachineRegisterInfo;
+
+class LegalizerHelper {
+public:
+ enum LegalizeResult {
+ /// Instruction was already legal and no change was made to the
+ /// MachineFunction.
+ AlreadyLegal,
+
+ /// Instruction has been legalized and the MachineFunction changed.
+ Legalized,
+
+ /// Some kind of error has occurred and we could not legalize this
+ /// instruction.
+ UnableToLegalize,
+ };
+
+ LegalizerHelper(MachineFunction &MF);
+
+ /// Replace \p MI by a sequence of legal instructions that can implement the
+ /// same operation. Note that this means \p MI may be deleted, so any iterator
+ /// steps should be performed before calling this function. \p Helper should
+ /// be initialized to the MachineFunction containing \p MI.
+ ///
+ /// Considered as an opaque blob, the legal code will use and define the same
+ /// registers as \p MI.
+ LegalizeResult legalizeInstrStep(MachineInstr &MI,
+ const LegalizerInfo &LegalizerInfo);
+
+ LegalizeResult legalizeInstr(MachineInstr &MI,
+ const LegalizerInfo &LegalizerInfo);
+
+ /// Legalize an instruction by emiting a runtime library call instead.
+ LegalizeResult libcall(MachineInstr &MI);
+
+ /// Legalize an instruction by reducing the width of the underlying scalar
+ /// type.
+ LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+ /// Legalize an instruction by performing the operation on a wider scalar type
+ /// (for example a 16-bit addition can be safely performed at 32-bits
+ /// precision, ignoring the unused bits).
+ LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+
+ /// Legalize an instruction by splitting it into simpler parts, hopefully
+ /// understood by the target.
+ LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+ /// Legalize a vector instruction by splitting into multiple components, each
+ /// acting on the same scalar type as the original but with fewer elements.
+ LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+
+ /// Legalize a vector instruction by increasing the number of vector elements
+ /// involved and ignoring the added elements later.
+ LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
+
+private:
+
+ /// Helper function to split a wide generic register into bitwise blocks with
+ /// the given Type (which implies the number of blocks needed). The generic
+ /// registers created are appended to Ops, starting at bit 0 of Reg.
+ void extractParts(unsigned Reg, LLT Ty, int NumParts,
+ SmallVectorImpl<unsigned> &Ops);
+
+ MachineIRBuilder MIRBuilder;
+ MachineRegisterInfo &MRI;
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
new file mode 100644
index 000000000000..edf52daf3f8f
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -0,0 +1,207 @@
+//==-- llvm/CodeGen/GlobalISel/LegalizerInfo.h -------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations they can successfully
+/// select and how the others should be expanded most efficiently.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/Target/TargetOpcodes.h"
+
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+class LLVMContext;
+class MachineInstr;
+class MachineRegisterInfo;
+class Type;
+class VectorType;
+
+/// Legalization is decided based on an instruction's opcode, which type slot
+/// we're considering, and what the existing type is. These aspects are gathered
+/// together for convenience in the InstrAspect class.
+struct InstrAspect {
+ unsigned Opcode;
+ unsigned Idx;
+ LLT Type;
+
+ InstrAspect(unsigned Opcode, LLT Type) : Opcode(Opcode), Idx(0), Type(Type) {}
+ InstrAspect(unsigned Opcode, unsigned Idx, LLT Type)
+ : Opcode(Opcode), Idx(Idx), Type(Type) {}
+
+ bool operator==(const InstrAspect &RHS) const {
+ return Opcode == RHS.Opcode && Idx == RHS.Idx && Type == RHS.Type;
+ }
+};
+
+class LegalizerInfo {
+public:
+ enum LegalizeAction : std::uint8_t {
+ /// The operation is expected to be selectable directly by the target, and
+ /// no transformation is necessary.
+ Legal,
+
+ /// The operation should be synthesized from multiple instructions acting on
+ /// a narrower scalar base-type. For example a 64-bit add might be
+ /// implemented in terms of 32-bit add-with-carry.
+ NarrowScalar,
+
+ /// The operation should be implemented in terms of a wider scalar
+ /// base-type. For example a <2 x s8> add could be implemented as a <2
+ /// x s32> add (ignoring the high bits).
+ WidenScalar,
+
+ /// The (vector) operation should be implemented by splitting it into
+ /// sub-vectors where the operation is legal. For example a <8 x s64> add
+ /// might be implemented as 4 separate <2 x s64> adds.
+ FewerElements,
+
+ /// The (vector) operation should be implemented by widening the input
+ /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
+ /// rarely legal, but you might perform an <8 x i8> and then only look at
+ /// the first two results.
+ MoreElements,
+
+ /// The operation itself must be expressed in terms of simpler actions on
+ /// this target. E.g. a SREM replaced by an SDIV and subtraction.
+ Lower,
+
+ /// The operation should be implemented as a call to some kind of runtime
+ /// support library. For example this usually happens on machines that don't
+ /// support floating-point operations natively.
+ Libcall,
+
+ /// The target wants to do something special with this combination of
+ /// operand and type. A callback will be issued when it is needed.
+ Custom,
+
+ /// This operation is completely unsupported on the target. A programming
+ /// error has occurred.
+ Unsupported,
+
+ /// Sentinel value for when no action was found in the specified table.
+ NotFound,
+ };
+
+ LegalizerInfo();
+
+ /// Compute any ancillary tables needed to quickly decide how an operation
+ /// should be handled. This must be called after all "set*Action"methods but
+ /// before any query is made or incorrect results may be returned.
+ void computeTables();
+
+ /// More friendly way to set an action for common types that have an LLT
+ /// representation.
+ void setAction(const InstrAspect &Aspect, LegalizeAction Action) {
+ TablesInitialized = false;
+ unsigned Opcode = Aspect.Opcode - FirstOp;
+ if (Actions[Opcode].size() <= Aspect.Idx)
+ Actions[Opcode].resize(Aspect.Idx + 1);
+ Actions[Aspect.Opcode - FirstOp][Aspect.Idx][Aspect.Type] = Action;
+ }
+
+ /// If an operation on a given vector type (say <M x iN>) isn't explicitly
+ /// specified, we proceed in 2 stages. First we legalize the underlying scalar
+ /// (so that there's at least one legal vector with that scalar), then we
+ /// adjust the number of elements in the vector so that it is legal. The
+ /// desired action in the first step is controlled by this function.
+ void setScalarInVectorAction(unsigned Opcode, LLT ScalarTy,
+ LegalizeAction Action) {
+ assert(!ScalarTy.isVector());
+ ScalarInVectorActions[std::make_pair(Opcode, ScalarTy)] = Action;
+ }
+
+
+ /// Determine what action should be taken to legalize the given generic
+ /// instruction opcode, type-index and type. Requires computeTables to have
+ /// been called.
+ ///
+ /// \returns a pair consisting of the kind of legalization that should be
+ /// performed and the destination type.
+ std::pair<LegalizeAction, LLT> getAction(const InstrAspect &Aspect) const;
+
+ /// Determine what action should be taken to legalize the given generic
+ /// instruction.
+ ///
+ /// \returns a tuple consisting of the LegalizeAction that should be
+ /// performed, the type-index it should be performed on and the destination
+ /// type.
+ std::tuple<LegalizeAction, unsigned, LLT>
+ getAction(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+
+ /// Iterate the given function (typically something like doubling the width)
+ /// on Ty until we find a legal type for this operation.
+ LLT findLegalType(const InstrAspect &Aspect,
+ std::function<LLT(LLT)> NextType) const {
+ LegalizeAction Action;
+ const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx];
+ LLT Ty = Aspect.Type;
+ do {
+ Ty = NextType(Ty);
+ auto ActionIt = Map.find(Ty);
+ if (ActionIt == Map.end())
+ Action = DefaultActions.find(Aspect.Opcode)->second;
+ else
+ Action = ActionIt->second;
+ } while(Action != Legal);
+ return Ty;
+ }
+
+ /// Find what type it's actually OK to perform the given operation on, given
+ /// the general approach we've decided to take.
+ LLT findLegalType(const InstrAspect &Aspect, LegalizeAction Action) const;
+
+ std::pair<LegalizeAction, LLT> findLegalAction(const InstrAspect &Aspect,
+ LegalizeAction Action) const {
+ return std::make_pair(Action, findLegalType(Aspect, Action));
+ }
+
+ /// Find the specified \p Aspect in the primary (explicitly set) Actions
+ /// table. Returns either the action the target requested or NotFound if there
+ /// was no setAction call.
+ LegalizeAction findInActions(const InstrAspect &Aspect) const {
+ if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp)
+ return NotFound;
+ if (Aspect.Idx >= Actions[Aspect.Opcode - FirstOp].size())
+ return NotFound;
+ const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx];
+ auto ActionIt = Map.find(Aspect.Type);
+ if (ActionIt == Map.end())
+ return NotFound;
+
+ return ActionIt->second;
+ }
+
+ bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+
+private:
+ static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
+ static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+
+ typedef DenseMap<LLT, LegalizeAction> TypeMap;
+ typedef DenseMap<std::pair<unsigned, LLT>, LegalizeAction> SIVActionMap;
+
+ SmallVector<TypeMap, 1> Actions[LastOp - FirstOp + 1];
+ SIVActionMap ScalarInVectorActions;
+ DenseMap<std::pair<unsigned, LLT>, uint16_t> MaxLegalVectorElts;
+ DenseMap<unsigned, LegalizeAction> DefaultActions;
+
+ bool TablesInitialized;
+};
+
+
+} // End namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index efdc59a9cddf..ecd3e5e1e138 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -17,8 +17,13 @@
#include "llvm/CodeGen/GlobalISel/Types.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
+#include <queue>
+
namespace llvm {
// Forward declarations.
@@ -35,21 +40,26 @@ class MachineIRBuilder {
MachineFunction *MF;
/// Information used to access the description of the opcodes.
const TargetInstrInfo *TII;
+ /// Information used to verify types are consistent.
+ const MachineRegisterInfo *MRI;
/// Debug location to be set to any instruction we create.
DebugLoc DL;
/// Fields describing the insertion point.
/// @{
MachineBasicBlock *MBB;
- MachineInstr *MI;
- bool Before;
+ MachineBasicBlock::iterator II;
/// @}
+ std::function<void(MachineInstr *)> InsertedInstr;
+
const TargetInstrInfo &getTII() {
assert(TII && "TargetInstrInfo is not set");
return *TII;
}
+ void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+
public:
/// Getter for the function we currently build.
MachineFunction &getMF() {
@@ -64,82 +74,432 @@ public:
}
/// Current insertion point for new instructions.
- MachineBasicBlock::iterator getInsertPt();
+ MachineBasicBlock::iterator getInsertPt() {
+ return II;
+ }
+
+ /// Set the insertion point before the specified position.
+ /// \pre MBB must be in getMF().
+ /// \pre II must be a valid iterator in MBB.
+ void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
+ /// @}
/// Setters for the insertion point.
/// @{
/// Set the MachineFunction where to build instructions.
void setMF(MachineFunction &);
- /// Set the insertion point to the beginning (\p Beginning = true) or end
- /// (\p Beginning = false) of \p MBB.
+ /// Set the insertion point to the end of \p MBB.
/// \pre \p MBB must be contained by getMF().
- void setMBB(MachineBasicBlock &MBB, bool Beginning = false);
+ void setMBB(MachineBasicBlock &MBB);
- /// Set the insertion point to before (\p Before = true) or after
- /// (\p Before = false) \p MI.
+ /// Set the insertion point to before MI.
/// \pre MI must be in getMF().
- void setInstr(MachineInstr &MI, bool Before = false);
+ void setInstr(MachineInstr &MI);
+ /// @}
+
+ /// Control where instructions we create are recorded (typically for
+ /// visiting again later during legalization).
+ /// @{
+ void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
+ void stopRecordingInsertions();
/// @}
/// Set the debug location to \p DL for all the next build instructions.
void setDebugLoc(const DebugLoc &DL) { this->DL = DL; }
- /// Build and insert <empty> = \p Opcode [\p Ty] <empty>.
- /// \p Ty is the type of the instruction if \p Opcode describes
- /// a generic machine instruction. \p Ty must be nullptr if \p Opcode
- /// does not describe a generic instruction.
+ /// Build and insert <empty> = \p Opcode <empty>.
/// The insertion point is the one set by the last call of either
/// setBasicBlock or setMI.
///
/// \pre setBasicBlock or setMI must have been called.
- /// \pre Ty == nullptr or isPreISelGenericOpcode(Opcode)
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildInstr(unsigned Opcode);
+
+ /// Build but don't insert <empty> = \p Opcode <empty>.
+ ///
+ /// \pre setMF, setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildInstrNoInsert(unsigned Opcode);
+
+ /// Insert an existing instruction at the insertion point.
+ MachineInstrBuilder insertInstr(MachineInstrBuilder MIB);
+
+ /// Build and insert \p Res<def> = G_FRAME_INDEX \p Idx
+ ///
+ /// G_FRAME_INDEX materializes the address of an alloca value or other
+ /// stack-based object.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
+
+ /// Build and insert \p Res<def> = G_GLOBAL_VALUE \p GV
+ ///
+ /// G_GLOBAL_VALUE materializes the address of the specified global
+ /// into \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with pointer type
+ /// in the same address space as \p GV.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
+
+ /// Build and insert \p Res<def> = G_ADD \p Op0, \p Op1
+ ///
+ /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
+ unsigned Op1);
+
+ /// Build and insert \p Res<def> = G_SUB \p Op0, \p Op1
+ ///
+ /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
+ unsigned Op1);
+
+ /// Build and insert \p Res<def> = G_MUL \p Op0, \p Op1
+ ///
+ /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+ /// truncated to their width.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
+ unsigned Op1);
+
+ /// Build and insert \p Res<def> = G_GEP \p Op0, \p Op1
+ ///
+ /// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
+ /// storing the resulting pointer in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+ /// type.
+ /// \pre \p Op1 must be a generic virtual register with scalar type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
+ unsigned Op1);
+
+ /// Build and insert \p Res<def>, \p CarryOut<def> = G_UADDE \p Op0,
+ /// \p Op1, \p CarryIn
+ ///
+ /// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
+ /// width) and sets \p CarryOut to 1 if the result overflowed in unsigned
+ /// arithmetic.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same scalar type.
+ /// \pre \p CarryOut and \p CarryIn must be generic virtual
+ /// registers with the same scalar type (typically s1)
///
/// \return The newly created instruction.
- MachineInstr *buildInstr(unsigned Opcode, Type *Ty);
+ MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
+ unsigned Op1, unsigned CarryIn);
- /// Build and insert <empty> = \p Opcode [\p Ty] \p BB.
+ /// Build and insert \p Res<def> = G_ANYEXT \p Op0
+ ///
+ /// G_ANYEXT produces a register of the specified width, with bits 0 to
+ /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
+ /// (i.e. this is neither zero nor sign-extension). For a vector register,
+ /// each element is extended individually.
///
/// \pre setBasicBlock or setMI must have been called.
- /// \pre Ty == nullptr or isPreISelGenericOpcode(Opcode)
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be smaller than \p Res
///
/// \return The newly created instruction.
- MachineInstr *buildInstr(unsigned Opcode, Type *Ty, MachineBasicBlock &BB);
+ MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = \p Opcode [\p Ty] \p Op0, \p Op1.
+ /// Build and insert \p Res<def> = G_SEXT \p Op
+ ///
+ /// G_SEXT produces a register of the specified width, with bits 0 to
+ /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
+ /// high bit of \p Op (i.e. 2s-complement sign extended).
///
/// \pre setBasicBlock or setMI must have been called.
- /// \pre Ty == nullptr or isPreISelGenericOpcode(Opcode)
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be smaller than \p Res
///
/// \return The newly created instruction.
- MachineInstr *buildInstr(unsigned Opcode, Type *Ty, unsigned Res,
- unsigned Op0, unsigned Op1);
+ MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = \p Opcode \p Op0, \p Op1.
- /// I.e., instruction with a non-generic opcode.
+ /// Build and insert \p Res<def> = G_ZEXT \p Op
+ ///
+ /// G_ZEXT produces a register of the specified width, with bits 0 to
+ /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
+ /// register, each element is extended individually.
///
/// \pre setBasicBlock or setMI must have been called.
- /// \pre not isPreISelGenericOpcode(\p Opcode)
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be smaller than \p Res
///
/// \return The newly created instruction.
- MachineInstr *buildInstr(unsigned Opcode, unsigned Res, unsigned Op0,
- unsigned Op1);
+ MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = \p Opcode \p Op0.
+ /// Build and insert \p Res<def> = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
+
+ /// Build and insert G_BR \p Dest
+ ///
+ /// G_BR is an unconditional branch to \p Dest.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildBr(MachineBasicBlock &BB);
+
+ /// Build and insert G_BRCOND \p Tst, \p Dest
+ ///
+ /// G_BRCOND is a conditional branch to \p Dest.
///
/// \pre setBasicBlock or setMI must have been called.
- /// \pre not isPreISelGenericOpcode(\p Opcode)
+ /// \pre \p Tst must be a generic virtual register with scalar
+ /// type. At the beginning of legalization, this will be a single
+ /// bit (s1). Targets with interesting flags registers may change
+ /// this. For a wider type, whether the branch is taken must only
+ /// depend on bit 0 (for now).
///
/// \return The newly created instruction.
- MachineInstr *buildInstr(unsigned Opcode, unsigned Res, unsigned Op0);
+ MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
- /// Build and insert <empty> = \p Opcode <empty>.
+ /// Build and insert \p Res = G_CONSTANT \p Val
+ ///
+ /// G_CONSTANT is an integer constant with the specified size and value. \p
+ /// Val will be extended or truncated to the size of \p Reg.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or pointer
+ /// type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val);
+
+ /// Build and insert \p Res = G_CONSTANT \p Val
+ ///
+ /// G_CONSTANT is an integer constant with the specified size and value.
///
/// \pre setBasicBlock or setMI must have been called.
- /// \pre not isPreISelGenericOpcode(\p Opcode)
+ /// \pre \p Res must be a generic virtual register with scalar type.
///
/// \return The newly created instruction.
- MachineInstr *buildInstr(unsigned Opcode);
+ MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+
+ /// Build and insert \p Res = G_FCONSTANT \p Val
+ ///
+ /// G_FCONSTANT is a floating-point constant with the specified size and
+ /// value.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
+
+ /// Build and insert \p Res<def> = COPY Op
+ ///
+ /// Register-to-register COPY sets \p Res to \p Op.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildCopy(unsigned Res, unsigned Op);
+
+ /// Build and insert `Res<def> = G_LOAD Addr, MMO`.
+ ///
+ /// Loads the value stored at \p Addr. Puts the result in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `G_STORE Val, Addr, MMO`.
+ ///
+ /// Stores the value \p Val to \p Addr.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Val must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0, ...`.
+ ///
+ /// If \p Res[i] has size N bits, G_EXTRACT sets \p Res[i] to bits `[Idxs[i],
+ /// Idxs[i] + N)` of \p Src.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre Indices must be in ascending order of bit position.
+ /// \pre Each member of \p Results and \p Src must be a generic
+ /// virtual register.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildExtract(ArrayRef<unsigned> Results,
+ ArrayRef<uint64_t> Indices, unsigned Src);
+
+ /// Build and insert \p Res<def> = G_SEQUENCE \p Op0, \p Idx0...
+ ///
+ /// G_SEQUENCE inserts each element of Ops into an IMPLICIT_DEF register,
+ /// where each entry starts at the bit-index specified by \p Indices.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre The final element of the sequence must not extend past the end of the
+ /// destination register.
+ /// \pre The bits defined by each Op (derived from index and scalar size) must
+ /// not overlap.
+ /// \pre \p Indices must be in ascending order of bit position.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildSequence(unsigned Res,
+ ArrayRef<unsigned> Ops,
+ ArrayRef<uint64_t> Indices);
+
+ void addUsesWithIndices(MachineInstrBuilder MIB) {}
+
+ template <typename... ArgTys>
+ void addUsesWithIndices(MachineInstrBuilder MIB, unsigned Reg,
+ unsigned BitIndex, ArgTys... Args) {
+ MIB.addUse(Reg).addImm(BitIndex);
+ addUsesWithIndices(MIB, Args...);
+ }
+
+ template <typename... ArgTys>
+ MachineInstrBuilder buildSequence(unsigned Res, unsigned Op,
+ unsigned Index, ArgTys... Args) {
+ MachineInstrBuilder MIB =
+ buildInstr(TargetOpcode::G_SEQUENCE).addDef(Res);
+ addUsesWithIndices(MIB, Op, Index, Args...);
+ return MIB;
+ }
+
+ template <typename... ArgTys>
+ MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
+ unsigned Op, unsigned Index, ArgTys... Args) {
+ MachineInstrBuilder MIB =
+ buildInstr(TargetOpcode::G_INSERT).addDef(Res).addUse(Src);
+ addUsesWithIndices(MIB, Op, Index, Args...);
+ return MIB;
+ }
+
+ /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
+ /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
+ /// result register definition unless \p Reg is NoReg (== 0). The second
+ /// operand will be the intrinsic's ID.
+ ///
+ /// Callers are expected to add the required definitions and uses afterwards.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
+ bool HasSideEffects);
+
+ /// Build and insert \p Res<def> = G_FPTRUNC \p Op
+ ///
+ /// G_FPTRUNC converts a floating-point value into one with a smaller type.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Res must be smaller than \p Op
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
+
+ /// Build and insert \p Res<def> = G_TRUNC \p Op
+ ///
+ /// G_TRUNC extracts the low bits of a type. For a vector type each element is
+ /// truncated independently before being packed into the destination.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ /// \pre \p Res must be smaller than \p Op
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
+
+ /// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+
+ /// \pre \p Res must be a generic virtual register with scalar or
+ /// vector type. Typically this starts as s1 or <N x s1>.
+ /// \pre \p Op0 and Op1 must be generic virtual registers with the
+ /// same number of elements as \p Res. If \p Res is a scalar,
+ /// \p Op0 must be either a scalar or pointer.
+ /// \pre \p Pred must be an integer predicate.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
+ unsigned Res, unsigned Op0, unsigned Op1);
+
+ /// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+
+ /// \pre \p Res must be a generic virtual register with scalar or
+ /// vector type. Typically this starts as s1 or <N x s1>.
+ /// \pre \p Op0 and Op1 must be generic virtual registers with the
+ /// same number of elements as \p Res (or scalar, if \p Res is
+ /// scalar).
+ /// \pre \p Pred must be a floating-point predicate.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred,
+ unsigned Res, unsigned Op0, unsigned Op1);
+
+ /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same type.
+ /// \pre \p Tst must be a generic virtual register with scalar, pointer or
+ /// vector type. If vector then it must have the same number of
+ /// elements as the other parameters.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
+ unsigned Op0, unsigned Op1);
};
} // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
index b393744e67cb..106fc9ffb8b5 100644
--- a/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
+++ b/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -74,6 +74,7 @@ class BlockFrequency;
class MachineBranchProbabilityInfo;
class MachineBlockFrequencyInfo;
class MachineRegisterInfo;
+class TargetPassConfig;
class TargetRegisterInfo;
/// This pass implements the reg bank selector pass used in the GlobalISel
@@ -476,8 +477,12 @@ private:
/// Optimization mode of the pass.
Mode OptMode;
+ /// Current target configuration. Controls how the pass handles errors.
+ const TargetPassConfig *TPC;
+
/// Assign the register bank of each operand of \p MI.
- void assignInstr(MachineInstr &MI);
+ /// \return True on success, false otherwise.
+ bool assignInstr(MachineInstr &MI);
/// Initialize the field members using \p MF.
void init(MachineFunction &MF);
@@ -520,7 +525,9 @@ private:
///
/// \note The caller is supposed to do the rewriting of op if need be.
/// I.e., Reg = op ... => <NewRegs> = NewOp ...
- void repairReg(MachineOperand &MO,
+ ///
+ /// \return True if the repairing worked, false otherwise.
+ bool repairReg(MachineOperand &MO,
const RegisterBankInfo::ValueMapping &ValMapping,
RegBankSelect::RepairingPlacement &RepairPt,
const iterator_range<SmallVectorImpl<unsigned>::const_iterator>
@@ -570,7 +577,8 @@ private:
/// Apply \p Mapping to \p MI. \p RepairPts represents the different
/// mapping action that need to happen for the mapping to be
/// applied.
- void applyMapping(MachineInstr &MI,
+ /// \return True if the mapping was applied sucessfully, false otherwise.
+ bool applyMapping(MachineInstr &MI,
const RegisterBankInfo::InstructionMapping &InstrMapping,
SmallVectorImpl<RepairingPlacement> &RepairPts);
@@ -578,12 +586,21 @@ public:
/// Create a RegBankSelect pass with the specified \p RunningMode.
RegBankSelect(Mode RunningMode = Fast);
- const char *getPassName() const override {
- return "RegBankSelect";
- }
+ StringRef getPassName() const override { return "RegBankSelect"; }
void getAnalysisUsage(AnalysisUsage &AU) const override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA)
+ .set(MachineFunctionProperties::Property::Legalized);
+ }
+
+ MachineFunctionProperties getSetProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::RegBankSelected);
+ }
+
/// Walk through \p MF and assign a register bank to every virtual register
/// that are still mapped to nothing.
/// The target needs to provide a RegisterBankInfo and in particular
diff --git a/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/include/llvm/CodeGen/GlobalISel/RegisterBank.h
index e886382fd8e8..075677d30179 100644
--- a/include/llvm/CodeGen/GlobalISel/RegisterBank.h
+++ b/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -37,15 +37,16 @@ private:
/// initialized yet.
static const unsigned InvalidID;
- /// Only the RegisterBankInfo can create RegisterBank.
+ /// Only the RegisterBankInfo can initialize RegisterBank properly.
+ friend RegisterBankInfo;
+
+public:
/// The default constructor will leave the object in
/// an invalid state. I.e. isValid() == false.
- /// The field must be updated to fix that.
+ /// The fields must be updated to fix that and only
+ /// RegisterBankInfo instances are allowed to do that
RegisterBank();
- friend RegisterBankInfo;
-
-public:
/// Get the identifier of this register bank.
unsigned getID() const { return ID; }
diff --git a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 19d170365858..4d4a226eb2d2 100644
--- a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -16,6 +16,8 @@
#define LLVM_CODEGEN_GLOBALISEL_REGBANKINFO_H
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/MachineValueType.h" // For SimpleValueType.
@@ -82,15 +84,88 @@ public:
/// Helper struct that represents how a value is mapped through
/// different register banks.
+ ///
+ /// \note: So far we do not have any users of the complex mappings
+ /// (mappings with more than one partial mapping), but when we do,
+ /// we would have needed to duplicate partial mappings.
+ /// The alternative could be to use an array of pointers of partial
+ /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
+ /// pointers instead.
+ ///
+ /// E.g.,
+ /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
+ /// can expand the
+ /// <2 x 32-bit> add into 2 x 32-bit add.
+ ///
+ /// Currently the TableGen-like file would look like:
+ /// \code
+ /// PartialMapping[] = {
+ /// /*32-bit add*/ {0, 32, GPR},
+ /// /*2x32-bit add*/ {0, 32, GPR}, {0, 32, GPR}, // <-- Same entry 3x
+ /// /*<2x32-bit> vadd {0, 64, VPR}
+ /// }; // PartialMapping duplicated.
+ ///
+ /// ValueMapping[] {
+ /// /*plain 32-bit add*/ {&PartialMapping[0], 1},
+ /// /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
+ /// /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+ /// };
+ /// \endcode
+ ///
+ /// With the array of pointer, we would have:
+ /// \code
+ /// PartialMapping[] = {
+ /// /*32-bit add*/ {0, 32, GPR},
+ /// /*<2x32-bit> vadd {0, 64, VPR}
+ /// }; // No more duplication.
+ ///
+ /// BreakDowns[] = {
+ /// /*AddBreakDown*/ &PartialMapping[0],
+ /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[0],
+ /// /*VAddBreakDown*/ &PartialMapping[1]
+ /// }; // Addresses of PartialMapping duplicated (smaller).
+ ///
+ /// ValueMapping[] {
+ /// /*plain 32-bit add*/ {&BreakDowns[0], 1},
+ /// /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
+ /// /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+ /// };
+ /// \endcode
+ ///
+ /// Given that a PartialMapping is actually small, the code size
+ /// impact is actually a degradation. Moreover the compile time will
+ /// be hit by the additional indirection.
+ /// If PartialMapping gets bigger we may reconsider.
struct ValueMapping {
/// How the value is broken down between the different register banks.
- SmallVector<PartialMapping, 2> BreakDown;
+ const PartialMapping *BreakDown;
+
+ /// Number of partial mapping to break down this value.
+ unsigned NumBreakDowns;
+
+ /// The default constructor creates an invalid (isValid() == false)
+ /// instance.
+ ValueMapping() : ValueMapping(nullptr, 0) {}
- /// Verify that this mapping makes sense for a value of \p ExpectedBitWidth.
+ /// Initialize a ValueMapping with the given parameter.
+ /// \p BreakDown needs to have a life time at least as long
+ /// as this instance.
+ ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
+ : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}
+
+ /// Iterators through the PartialMappings.
+ const PartialMapping *begin() const { return BreakDown; }
+ const PartialMapping *end() const { return BreakDown + NumBreakDowns; }
+
+ /// Check if this ValueMapping is valid.
+ bool isValid() const { return BreakDown && NumBreakDowns; }
+
+ /// Verify that this mapping makes sense for a value of
+ /// \p MeaningfulBitWidth.
/// \note This method does not check anything when assertions are disabled.
///
/// \return True is the check was successful.
- bool verify(unsigned ExpectedBitWidth) const;
+ bool verify(unsigned MeaningfulBitWidth) const;
/// Print this on dbgs() stream.
void dump() const;
@@ -109,11 +184,11 @@ public:
/// Cost of this mapping.
unsigned Cost;
/// Mapping of all the operands.
- std::unique_ptr<ValueMapping[]> OperandsMapping;
+ const ValueMapping *OperandsMapping;
/// Number of operands.
unsigned NumOperands;
- ValueMapping &getOperandMapping(unsigned i) {
+ const ValueMapping &getOperandMapping(unsigned i) {
assert(i < getNumOperands() && "Out of bound operand");
return OperandsMapping[i];
}
@@ -127,11 +202,13 @@ public:
/// at the index i.
///
/// \pre ID != InvalidMappingID
- InstructionMapping(unsigned ID, unsigned Cost, unsigned NumOperands)
- : ID(ID), Cost(Cost), NumOperands(NumOperands) {
+ InstructionMapping(unsigned ID, unsigned Cost,
+ const ValueMapping *OperandsMapping,
+ unsigned NumOperands)
+ : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
+ NumOperands(NumOperands) {
assert(getID() != InvalidMappingID &&
"Use the default constructor for invalid mapping");
- OperandsMapping.reset(new ValueMapping[getNumOperands()]);
}
/// Default constructor.
@@ -148,25 +225,26 @@ public:
unsigned getNumOperands() const { return NumOperands; }
/// Get the value mapping of the ith operand.
+ /// \pre The mapping for the ith operand has been set.
+ /// \pre The ith operand is a register.
const ValueMapping &getOperandMapping(unsigned i) const {
- return const_cast<InstructionMapping *>(this)->getOperandMapping(i);
+ const ValueMapping &ValMapping =
+ const_cast<InstructionMapping *>(this)->getOperandMapping(i);
+ return ValMapping;
}
- /// Get the value mapping of the ith operand.
- void setOperandMapping(unsigned i, const ValueMapping &ValMapping) {
- getOperandMapping(i) = ValMapping;
+ /// Set the mapping for all the operands.
+ /// In other words, OpdsMapping should hold at least getNumOperands
+ /// ValueMapping.
+ void setOperandsMapping(const ValueMapping *OpdsMapping) {
+ OperandsMapping = OpdsMapping;
}
/// Check whether this object is valid.
/// This is a lightweight check for obvious wrong instance.
- bool isValid() const { return getID() != InvalidMappingID; }
-
- /// Set the operand mapping for the \p OpIdx-th operand.
- /// The mapping will consist of only one element in the break down list.
- /// This element will map to \p RegBank and fully define a mask, whose
- /// bitwidth matches the size of \p MaskSize.
- void setOperandMapping(unsigned OpIdx, unsigned MaskSize,
- const RegisterBank &RegBank);
+ bool isValid() const {
+ return getID() != InvalidMappingID && OperandsMapping;
+ }
/// Verifiy that this mapping makes sense for \p MI.
/// \pre \p MI must be connected to a MachineFunction.
@@ -188,12 +266,13 @@ public:
/// \todo When we move to TableGen this should be an array ref.
typedef SmallVector<InstructionMapping, 4> InstructionMappings;
- /// Helper class use to get/create the virtual registers that will be used
+ /// Helper class used to get/create the virtual registers that will be used
/// to replace the MachineOperand when applying a mapping.
class OperandsMapper {
/// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
/// OpIdx-th operand starts. -1 means we do not have such mapping yet.
- std::unique_ptr<int[]> OpToNewVRegIdx;
+ /// Note: We use a SmallVector to avoid heap allocation for most cases.
+ SmallVector<int, 8> OpToNewVRegIdx;
/// Hold the registers that will be used to map MI with InstrMapping.
SmallVector<unsigned, 8> NewVRegs;
/// Current MachineRegisterInfo, used to create new virtual registers.
@@ -287,12 +366,21 @@ public:
protected:
/// Hold the set of supported register banks.
- std::unique_ptr<RegisterBank[]> RegBanks;
+ RegisterBank **RegBanks;
/// Total number of register banks.
unsigned NumRegBanks;
- /// Mapping from MVT::SimpleValueType to register banks.
- std::unique_ptr<const RegisterBank *[]> VTToRegBank;
+ /// Keep dynamically allocated PartialMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, const PartialMapping *> MapOfPartialMappings;
+
+ /// Keep dynamically allocated ValueMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, const ValueMapping *> MapOfValueMappings;
+
+ /// Keep dynamically allocated array of ValueMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, ValueMapping *> MapOfOperandsMappings;
/// Create a RegisterBankInfo that can accomodate up to \p NumRegBanks
/// RegisterBank instances.
@@ -300,7 +388,7 @@ protected:
/// \note For the verify method to succeed all the \p NumRegBanks
/// must be initialized by createRegisterBank and updated with
/// addRegBankCoverage RegisterBank.
- RegisterBankInfo(unsigned NumRegBanks);
+ RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
/// This constructor is meaningless.
/// It just provides a default constructor that can be used at link time
@@ -325,14 +413,6 @@ protected:
/// It also adjusts the size of the register bank to reflect the maximal
/// size of a value that can be hold into that register bank.
///
- /// If \p AddTypeMapping is true, this method also records what types can
- /// be mapped to \p ID. Although this done by default, targets may want to
- /// disable it, espicially if a given type may be mapped on different
- /// register bank. Indeed, in such case, this method only records the
- /// first register bank where the type matches.
- /// This information is only used to provide default mapping
- /// (see getInstrMappingImpl).
- ///
/// \note This method does *not* add the super classes of \p RCId.
/// The rationale is if \p ID covers the registers of \p RCId, that
/// does not necessarily mean that \p ID covers the set of registers
@@ -343,43 +423,12 @@ protected:
///
/// \todo TableGen should just generate the BitSet vector for us.
void addRegBankCoverage(unsigned ID, unsigned RCId,
- const TargetRegisterInfo &TRI,
- bool AddTypeMapping = true);
+ const TargetRegisterInfo &TRI);
/// Get the register bank identified by \p ID.
RegisterBank &getRegBank(unsigned ID) {
assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
- return RegBanks[ID];
- }
-
- /// Get the register bank that has been recorded to cover \p SVT.
- const RegisterBank *getRegBankForType(MVT::SimpleValueType SVT) const {
- if (!VTToRegBank)
- return nullptr;
- assert(SVT < MVT::SimpleValueType::LAST_VALUETYPE && "Out-of-bound access");
- return VTToRegBank.get()[SVT];
- }
-
- /// Record \p RegBank as the register bank that covers \p SVT.
- /// If a record was already set for \p SVT, the mapping is not
- /// updated, unless \p Force == true
- ///
- /// \post if getRegBankForType(SVT)\@pre == nullptr then
- /// getRegBankForType(SVT) == &RegBank
- /// \post if Force == true then getRegBankForType(SVT) == &RegBank
- void recordRegBankForType(const RegisterBank &RegBank,
- MVT::SimpleValueType SVT, bool Force = false) {
- if (!VTToRegBank) {
- VTToRegBank.reset(
- new const RegisterBank *[MVT::SimpleValueType::LAST_VALUETYPE]);
- std::fill(&VTToRegBank[0],
- &VTToRegBank[MVT::SimpleValueType::LAST_VALUETYPE], nullptr);
- }
- assert(SVT < MVT::SimpleValueType::LAST_VALUETYPE && "Out-of-bound access");
- // If we want to override the mapping or the mapping does not exits yet,
- // set the register bank for SVT.
- if (Force || !getRegBankForType(SVT))
- VTToRegBank.get()[SVT] = &RegBank;
+ return *RegBanks[ID];
}
/// Try to get the mapping of \p MI.
@@ -393,14 +442,65 @@ protected:
///
/// This implementation is able to get the mapping of:
/// - Target specific instructions by looking at the encoding constraints.
- /// - Any instruction if all the register operands are already been assigned
+ /// - Any instruction if all the register operands have already been assigned
/// a register, a register class, or a register bank.
- /// - Copies and phis if at least one of the operand has been assigned a
+ /// - Copies and phis if at least one of the operands has been assigned a
/// register, a register class, or a register bank.
/// In other words, this method will likely fail to find a mapping for
/// any generic opcode that has not been lowered by target specific code.
InstructionMapping getInstrMappingImpl(const MachineInstr &MI) const;
+ /// Get the uniquely generated PartialMapping for the
+ /// given arguments.
+ const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
+ const RegisterBank &RegBank) const;
+
+ /// Methods to get a uniquely generated ValueMapping.
+ /// @{
+
+ /// The most common ValueMapping consists of a single PartialMapping.
+ /// Feature a method for that.
+ const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
+ const RegisterBank &RegBank) const;
+
+ /// Get the ValueMapping for the given arguments.
+ const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
+ unsigned NumBreakDowns) const;
+ /// @}
+
+ /// Methods to get a uniquely generated array of ValueMapping.
+ /// @{
+
+ /// Get the uniquely generated array of ValueMapping for the
+ /// elements of between \p Begin and \p End.
+ ///
+ /// Elements that are nullptr will be replaced by
+ /// invalid ValueMapping (ValueMapping::isValid == false).
+ ///
+ /// \pre The pointers on ValueMapping between \p Begin and \p End
+ /// must uniquely identify a ValueMapping. Otherwise, there is no
+ /// guarantee that the return instance will be unique, i.e., another
+ /// OperandsMapping could have the same content.
+ template <typename Iterator>
+ const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;
+
+ /// Get the uniquely generated array of ValueMapping for the
+ /// elements of \p OpdsMapping.
+ ///
+ /// Elements of \p OpdsMapping that are nullptr will be replaced by
+ /// invalid ValueMapping (ValueMapping::isValid == false).
+ const ValueMapping *getOperandsMapping(
+ const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;
+
+ /// Get the uniquely generated array of ValueMapping for the
+ /// given arguments.
+ ///
+ /// Arguments that are nullptr will be replaced by invalid
+ /// ValueMapping (ValueMapping::isValid == false).
+ const ValueMapping *getOperandsMapping(
+ std::initializer_list<const ValueMapping *> OpdsMapping) const;
+ /// @}
+
/// Get the register bank for the \p OpIdx-th operand of \p MI form
/// the encoding constraints, if any.
///
@@ -429,7 +529,7 @@ protected:
}
public:
- virtual ~RegisterBankInfo() {}
+ virtual ~RegisterBankInfo();
/// Get the register bank identified by \p ID.
const RegisterBank &getRegBank(unsigned ID) const {
@@ -479,6 +579,15 @@ public:
return &A != &B;
}
+ /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
+ ///
+ /// \pre \p Reg is a virtual register that either has a bank or a class.
+ /// \returns The constrained register class, or nullptr if there is none.
+ /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
+ static const TargetRegisterClass *
+ constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC,
+ MachineRegisterInfo &MRI);
+
/// Identifier used when the related instruction mapping instance
/// is generated by target independent code.
/// Make sure not to use that identifier to avoid possible collision.
@@ -494,7 +603,7 @@ public:
/// This mapping should be the direct translation of \p MI.
/// In other words, when \p MI is mapped with the returned mapping,
/// only the register banks of the operands of \p MI need to be updated.
- /// In particular, neither the opcode or the type of \p MI needs to be
+ /// In particular, neither the opcode nor the type of \p MI needs to be
/// updated for this direct mapping.
///
/// The target independent implementation gives a mapping based on
@@ -597,6 +706,10 @@ operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
OpdMapper.print(OS, /*ForDebug*/ false);
return OS;
}
+
+/// Hashing function for PartialMapping.
+/// It is required for the hashing of ValueMapping.
+hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);
} // End namespace llvm.
#endif
diff --git a/include/llvm/CodeGen/GlobalISel/Utils.h b/include/llvm/CodeGen/GlobalISel/Utils.h
new file mode 100644
index 000000000000..f5d5f5cdf0cd
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -0,0 +1,43 @@
+//==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of helper functions used throughout the
+/// GlobalISel pipeline.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
+#define LLVM_CODEGEN_GLOBALISEL_UTILS_H
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class MCInstrDesc;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
+/// Try to constrain Reg so that it is usable by argument OpIdx of the
+/// provided MCInstrDesc \p II. If this fails, create a new virtual
+/// register in the correct class and insert a COPY before \p InsertPt.
+/// The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainOperandRegClass(const MachineFunction &MF,
+ const TargetRegisterInfo &TRI,
+ MachineRegisterInfo &MRI,
+ const TargetInstrInfo &TII,
+ const RegisterBankInfo &RBI,
+ MachineInstr &InsertPt, const MCInstrDesc &II,
+ unsigned Reg, unsigned OpIdx);
+
+} // End namespace llvm.
+#endif
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index 89cb7a86f99f..420b03ec02bd 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -70,7 +70,7 @@ namespace ISD {
/// of the frame or return address to return. An index of zero corresponds
/// to the current function's frame or return address, an index of one to
/// the parent's frame or return address, and so on.
- FRAMEADDR, RETURNADDR,
+ FRAMEADDR, RETURNADDR, ADDROFRETURNADDR,
/// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
/// Materializes the offset from the local object pointer of another
@@ -90,6 +90,11 @@ namespace ISD {
/// adjustment during unwind.
FRAME_TO_ARGS_OFFSET,
+ /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
+ /// Frame Address (CFA), generally the value of the stack pointer at the
+ /// call site in the previous frame.
+ EH_DWARF_CFA,
+
/// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
/// 'eh_return' gcc dwarf builtin, which is used to return from
/// exception. The general meaning is: adjust stack by OFFSET and pass
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
index 04e840dea2ca..a86706223261 100644
--- a/include/llvm/CodeGen/LiveInterval.h
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -23,7 +23,6 @@
#include "llvm/ADT/IntEqClasses.h"
#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
@@ -58,7 +57,7 @@ namespace llvm {
: id(i), def(d)
{ }
- /// VNInfo construtor, copies values from orig, except for the value number.
+ /// VNInfo constructor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
: id(i), def(orig.def)
{ }
@@ -316,6 +315,10 @@ namespace llvm {
/// add liveness for a dead def.
VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
+ /// Create a def of value @p VNI. Return @p VNI. If there already exists
+ /// a definition at VNI->def, the value defined there must be @p VNI.
+ VNInfo *createDeadDef(VNInfo *VNI);
+
/// Create a copy of the given value. The new value will be identical except
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
@@ -451,10 +454,29 @@ namespace llvm {
/// may have grown since it was inserted).
iterator addSegment(Segment S);
+ /// Attempt to extend a value defined after @p StartIdx to include @p Use.
+ /// Both @p StartIdx and @p Use should be in the same basic block. In case
+ /// of subranges, an extension could be prevented by an explicit "undef"
+ /// caused by a <def,read-undef> on a non-overlapping lane. The list of
+ /// location of such "undefs" should be provided in @p Undefs.
+ /// The return value is a pair: the first element is VNInfo of the value
+ /// that was extended (possibly nullptr), the second is a boolean value
+ /// indicating whether an "undef" was encountered.
/// If this range is live before @p Use in the basic block that starts at
- /// @p StartIdx, extend it to be live up to @p Use, and return the value. If
- /// there is no segment before @p Use, return nullptr.
- VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Use);
+ /// @p StartIdx, and there is no intervening "undef", extend it to be live
+ /// up to @p Use, and return the pair {value, false}. If there is no
+ /// segment before @p Use and there is no "undef" between @p StartIdx and
+ /// @p Use, return {nullptr, false}. If there is an "undef" before @p Use,
+ /// return {nullptr, true}.
+ std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs,
+ SlotIndex StartIdx, SlotIndex Use);
+
+ /// Simplified version of the above "extendInBlock", which assumes that
+ /// no register lanes are undefined by <def,read-undef> operands.
+ /// If this range is live before @p Use in the basic block that starts
+ /// at @p StartIdx, extend it to be live up to @p Use, and return the
+ /// value. If there is no segment before @p Use, return nullptr.
+ VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Kill);
/// join - Join two live ranges (this, and other) together. This applies
/// mappings to the value numbers in the LHS/RHS ranges as specified. If
@@ -555,6 +577,16 @@ namespace llvm {
return thisIndex < otherIndex;
}
+ /// Returns true if there is an explicit "undef" between @p Begin
+ /// @p End.
+ bool isUndefIn(ArrayRef<SlotIndex> Undefs, SlotIndex Begin,
+ SlotIndex End) const {
+ return std::any_of(Undefs.begin(), Undefs.end(),
+ [Begin,End] (SlotIndex Idx) -> bool {
+ return Begin <= Idx && Idx < End;
+ });
+ }
+
/// Flush segment set into the regular segment vector.
/// The method is to be called after the live range
/// has been created, if use of the segment set was
@@ -581,7 +613,6 @@ namespace llvm {
friend class LiveRangeUpdater;
void addSegmentToSet(Segment S);
void markValNoForDeletion(VNInfo *V);
-
};
inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) {
@@ -641,7 +672,7 @@ namespace llvm {
P = P->Next;
return *this;
}
- SingleLinkedListIterator<T> &operator++(int) {
+ SingleLinkedListIterator<T> operator++(int) {
SingleLinkedListIterator res = *this;
++*this;
return res;
@@ -729,6 +760,13 @@ namespace llvm {
weight = llvm::huge_valf;
}
+ /// For a given lane mask @p LaneMask, compute indexes at which the
+ /// lane is marked undefined by subregister <def,read-undef> definitions.
+ void computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
+ LaneBitmask LaneMask,
+ const MachineRegisterInfo &MRI,
+ const SlotIndexes &Indexes) const;
+
bool operator<(const LiveInterval& other) const {
const SlotIndex &thisIndex = beginIndex();
const SlotIndex &otherIndex = other.beginIndex();
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index d4ee0582cc41..f8dc52566dc0 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -163,16 +163,24 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
/// LiveInterval::removeEmptySubranges() afterwards.
void shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg);
- /// extendToIndices - Extend the live range of LI to reach all points in
- /// Indices. The points in the Indices array must be jointly dominated by
- /// existing defs in LI. PHI-defs are added as needed to maintain SSA form.
+ /// Extend the live range @p LR to reach all points in @p Indices. The
+ /// points in the @p Indices array must be jointly dominated by the union
+ /// of the existing defs in @p LR and points in @p Undefs.
///
- /// If a SlotIndex in Indices is the end index of a basic block, LI will be
- /// extended to be live out of the basic block.
+ /// PHI-defs are added as needed to maintain SSA form.
+ ///
+ /// If a SlotIndex in @p Indices is the end index of a basic block, @p LR
+ /// will be extended to be live out of the basic block.
+ /// If a SlotIndex in @p Indices is jointy dominated only by points in
+ /// @p Undefs, the live range will not be extended to that point.
///
/// See also LiveRangeCalc::extend().
- void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices);
+ void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices,
+ ArrayRef<SlotIndex> Undefs);
+ void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices) {
+ extendToIndices(LR, Indices, /*Undefs=*/{});
+ }
/// If @p LR has a live value at @p Kill, prune its live range by removing
/// any liveness reachable from Kill. Add live range end points to
@@ -253,8 +261,8 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
Indexes->removeMachineInstrFromMaps(MI);
}
- void ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
- Indexes->replaceMachineInstrInMaps(MI, NewMI);
+ SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
+ return Indexes->replaceMachineInstrInMaps(MI, NewMI);
}
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
@@ -392,6 +400,13 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
return RegUnitRanges[Unit];
}
+ /// removeRegUnit - Remove computed live range for Unit. Subsequent uses
+ /// should rely on on-demand recomputation.
+ void removeRegUnit(unsigned Unit) {
+ delete RegUnitRanges[Unit];
+ RegUnitRanges[Unit] = nullptr;
+ }
+
/// Remove value numbers and related live segments starting at position
/// @p Pos that are part of any liverange of physical register @p Reg or one
/// of its subregisters.
@@ -444,7 +459,8 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
void repairOldRegInRange(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
const SlotIndex endIdx, LiveRange &LR,
- unsigned Reg, LaneBitmask LaneMask = ~0u);
+ unsigned Reg,
+ LaneBitmask LaneMask = LaneBitmask::getAll());
class HMEditor;
};
diff --git a/include/llvm/CodeGen/LivePhysRegs.h b/include/llvm/CodeGen/LivePhysRegs.h
index 1cea9d5b90d6..9e04c467fadc 100644
--- a/include/llvm/CodeGen/LivePhysRegs.h
+++ b/include/llvm/CodeGen/LivePhysRegs.h
@@ -31,8 +31,10 @@
#include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
+#include <utility>
namespace llvm {
@@ -41,14 +43,15 @@ class MachineInstr;
/// \brief A set of live physical registers with functions to track liveness
/// when walking backward/forward through a basic block.
class LivePhysRegs {
- const TargetRegisterInfo *TRI;
+ const TargetRegisterInfo *TRI = nullptr;
SparseSet<unsigned> LiveRegs;
LivePhysRegs(const LivePhysRegs&) = delete;
LivePhysRegs &operator=(const LivePhysRegs&) = delete;
+
public:
/// \brief Constructs a new empty LivePhysRegs set.
- LivePhysRegs() : TRI(nullptr), LiveRegs() {}
+ LivePhysRegs() = default;
/// \brief Constructs and initialize an empty LivePhysRegs set.
LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
@@ -57,11 +60,10 @@ public:
}
/// \brief Clear and initialize the LivePhysRegs set.
- void init(const TargetRegisterInfo *TRI) {
- assert(TRI && "Invalid TargetRegisterInfo pointer.");
- this->TRI = TRI;
+ void init(const TargetRegisterInfo &TRI) {
+ this->TRI = &TRI;
LiveRegs.clear();
- LiveRegs.setUniverse(TRI->getNumRegs());
+ LiveRegs.setUniverse(TRI.getNumRegs());
}
/// \brief Clears the LivePhysRegs set.
@@ -141,6 +143,11 @@ public:
/// \brief Dumps the currently live registers to the debug output.
void dump() const;
+
+private:
+ /// Adds live-in registers from basic block @p MBB, taking associated
+ /// lane masks into consideration.
+ void addBlockLiveIns(const MachineBasicBlock &MBB);
};
inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
@@ -148,6 +155,13 @@ inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
return OS;
}
-} // namespace llvm
+/// Compute the live-in list for \p MBB assuming all of its successors live-in
+/// lists are up-to-date. Uses the given LivePhysReg instance \p LiveRegs; This
+/// is just here to avoid repeated heap allocations when calling this multiple
+/// times in a pass.
+void computeLiveIns(LivePhysRegs &LiveRegs, const TargetRegisterInfo &TRI,
+ MachineBasicBlock &MBB);
+
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_LIVEPHYSREGS_H
diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h
index bc210dda08c0..d6e947c03dbd 100644
--- a/include/llvm/CodeGen/LiveVariables.h
+++ b/include/llvm/CodeGen/LiveVariables.h
@@ -92,8 +92,7 @@ public:
/// machine instruction. Returns true if there was a kill
/// corresponding to this instruction, false otherwise.
bool removeKill(MachineInstr &MI) {
- std::vector<MachineInstr *>::iterator I =
- std::find(Kills.begin(), Kills.end(), &MI);
+ std::vector<MachineInstr *>::iterator I = find(Kills, &MI);
if (I == Kills.end())
return false;
Kills.erase(I);
diff --git a/include/llvm/CodeGen/LowLevelType.h b/include/llvm/CodeGen/LowLevelType.h
new file mode 100644
index 000000000000..b8885c3a95fd
--- /dev/null
+++ b/include/llvm/CodeGen/LowLevelType.h
@@ -0,0 +1,206 @@
+//== llvm/CodeGen/GlobalISel/LowLevelType.h -------------------- -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Implement a low-level type suitable for MachineInstr level instruction
+/// selection.
+///
+/// For a type attached to a MachineInstr, we only care about 2 details: total
+/// size and the number of vector lanes (if any). Accordingly, there are 4
+/// possible valid type-kinds:
+///
+/// * `sN` for scalars and aggregates
+/// * `<N x sM>` for vectors, which must have at least 2 elements.
+/// * `pN` for pointers
+///
+/// Other information required for correct selection is expected to be carried
+/// by the opcode, or non-type flags. For example the distinction between G_ADD
+/// and G_FADD for int/float or fast-math flags.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
+#define LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
+
+#include <cassert>
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+
+class DataLayout;
+class LLVMContext;
+class Type;
+class raw_ostream;
+
+class LLT {
+public:
+ enum TypeKind : uint16_t {
+ Invalid,
+ Scalar,
+ Pointer,
+ Vector,
+ };
+
+ /// Get a low-level scalar or aggregate "bag of bits".
+ static LLT scalar(unsigned SizeInBits) {
+ assert(SizeInBits > 0 && "invalid scalar size");
+ return LLT{Scalar, 1, SizeInBits};
+ }
+
+ /// Get a low-level pointer in the given address space (defaulting to 0).
+ static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
+ return LLT{Pointer, AddressSpace, SizeInBits};
+ }
+
+ /// Get a low-level vector of some number of elements and element width.
+ /// \p NumElements must be at least 2.
+ static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
+ assert(NumElements > 1 && "invalid number of vector elements");
+ return LLT{Vector, NumElements, ScalarSizeInBits};
+ }
+
+ /// Get a low-level vector of some number of elements and element type.
+ static LLT vector(uint16_t NumElements, LLT ScalarTy) {
+ assert(NumElements > 1 && "invalid number of vector elements");
+ assert(ScalarTy.isScalar() && "invalid vector element type");
+ return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
+ }
+
+ explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
+ : SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
+ assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
+ "invalid number of vector elements");
+ }
+
+ explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
+
+ /// Construct a low-level type based on an LLVM type.
+ explicit LLT(Type &Ty, const DataLayout &DL);
+
+ explicit LLT(MVT VT);
+
+ bool isValid() const { return Kind != Invalid; }
+
+ bool isScalar() const { return Kind == Scalar; }
+
+ bool isPointer() const { return Kind == Pointer; }
+
+ bool isVector() const { return Kind == Vector; }
+
+ /// Returns the number of elements in a vector LLT. Must only be called on
+ /// vector types.
+ uint16_t getNumElements() const {
+ assert(isVector() && "cannot get number of elements on scalar/aggregate");
+ return ElementsOrAddrSpace;
+ }
+
+ /// Returns the total size of the type. Must only be called on sized types.
+ unsigned getSizeInBits() const {
+ if (isPointer() || isScalar())
+ return SizeInBits;
+ return SizeInBits * ElementsOrAddrSpace;
+ }
+
+ unsigned getScalarSizeInBits() const {
+ return SizeInBits;
+ }
+
+ unsigned getAddressSpace() const {
+ assert(isPointer() && "cannot get address space of non-pointer type");
+ return ElementsOrAddrSpace;
+ }
+
+ /// Returns the vector's element type. Only valid for vector types.
+ LLT getElementType() const {
+ assert(isVector() && "cannot get element type of scalar/aggregate");
+ return scalar(SizeInBits);
+ }
+
+ /// Get a low-level type with half the size of the original, by halving the
+ /// size of the scalar type involved. For example `s32` will become `s16`,
+ /// `<2 x s32>` will become `<2 x s16>`.
+ LLT halfScalarSize() const {
+ assert(!isPointer() && getScalarSizeInBits() > 1 &&
+ getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
+ return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
+ }
+
+ /// Get a low-level type with twice the size of the original, by doubling the
+ /// size of the scalar type involved. For example `s32` will become `s64`,
+ /// `<2 x s32>` will become `<2 x s64>`.
+ LLT doubleScalarSize() const {
+ assert(!isPointer() && "cannot change size of this type");
+ return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
+ }
+
+ /// Get a low-level type with half the size of the original, by halving the
+ /// number of vector elements of the scalar type involved. The source must be
+ /// a vector type with an even number of elements. For example `<4 x s32>`
+ /// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
+ LLT halfElements() const {
+ assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
+ "cannot half odd vector");
+ if (ElementsOrAddrSpace == 2)
+ return scalar(SizeInBits);
+
+ return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
+ SizeInBits};
+ }
+
+ /// Get a low-level type with twice the size of the original, by doubling the
+ /// number of vector elements of the scalar type involved. The source must be
+ /// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
+ /// the number of elements in sN produces <2 x sN>.
+ LLT doubleElements() const {
+ assert(!isPointer() && "cannot double elements in pointer");
+ return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
+ SizeInBits};
+ }
+
+ void print(raw_ostream &OS) const;
+
+ bool operator==(const LLT &RHS) const {
+ return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
+ ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
+ }
+
+ bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
+
+ friend struct DenseMapInfo<LLT>;
+private:
+ unsigned SizeInBits;
+ uint16_t ElementsOrAddrSpace;
+ TypeKind Kind;
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
+ Ty.print(OS);
+ return OS;
+}
+
+template<> struct DenseMapInfo<LLT> {
+ static inline LLT getEmptyKey() {
+ return LLT{LLT::Invalid, 0, -1u};
+ }
+ static inline LLT getTombstoneKey() {
+ return LLT{LLT::Invalid, 0, -2u};
+ }
+ static inline unsigned getHashValue(const LLT &Ty) {
+ uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
+ ((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
+ return DenseMapInfo<uint64_t>::getHashValue(Val);
+ }
+ static bool isEqual(const LLT &LHS, const LLT &RHS) {
+ return LHS == RHS;
+ }
+};
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/MIRYamlMapping.h b/include/llvm/CodeGen/MIRYamlMapping.h
index 7f9c44833336..778f72c06e65 100644
--- a/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/include/llvm/CodeGen/MIRYamlMapping.h
@@ -381,13 +381,12 @@ struct MachineFunction {
StringRef Name;
unsigned Alignment = 0;
bool ExposesReturnsTwice = false;
- bool HasInlineAsm = false;
- // MachineFunctionProperties
- bool AllVRegsAllocated = false;
+ // GISel MachineFunctionProperties.
+ bool Legalized = false;
+ bool RegBankSelected = false;
+ bool Selected = false;
// Register information
- bool IsSSA = false;
bool TracksRegLiveness = false;
- bool TracksSubRegLiveness = false;
std::vector<VirtualRegisterDefinition> VirtualRegisters;
std::vector<MachineFunctionLiveIn> LiveIns;
Optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
@@ -406,11 +405,10 @@ template <> struct MappingTraits<MachineFunction> {
YamlIO.mapRequired("name", MF.Name);
YamlIO.mapOptional("alignment", MF.Alignment);
YamlIO.mapOptional("exposesReturnsTwice", MF.ExposesReturnsTwice);
- YamlIO.mapOptional("hasInlineAsm", MF.HasInlineAsm);
- YamlIO.mapOptional("allVRegsAllocated", MF.AllVRegsAllocated);
- YamlIO.mapOptional("isSSA", MF.IsSSA);
+ YamlIO.mapOptional("legalized", MF.Legalized);
+ YamlIO.mapOptional("regBankSelected", MF.RegBankSelected);
+ YamlIO.mapOptional("selected", MF.Selected);
YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness);
- YamlIO.mapOptional("tracksSubRegLiveness", MF.TracksSubRegLiveness);
YamlIO.mapOptional("registers", MF.VirtualRegisters);
YamlIO.mapOptional("liveins", MF.LiveIns);
YamlIO.mapOptional("calleeSavedRegisters", MF.CalleeSavedRegisters);
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 2923371c1005..be811c6fe437 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/BranchProbability.h"
+#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/DataTypes.h"
#include <functional>
@@ -35,36 +36,21 @@ class StringRef;
class raw_ostream;
class MachineBranchProbabilityInfo;
-// Forward declaration to avoid circular include problem with TargetRegisterInfo
-typedef unsigned LaneBitmask;
-
-template <>
-struct ilist_traits<MachineInstr> : public ilist_default_traits<MachineInstr> {
+template <> struct ilist_traits<MachineInstr> {
private:
- mutable ilist_half_node<MachineInstr> Sentinel;
+ friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
+ MachineBasicBlock *Parent;
- // this is only set by the MachineBasicBlock owning the LiveList
- friend class MachineBasicBlock;
- MachineBasicBlock* Parent;
+ typedef simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator
+ instr_iterator;
public:
- MachineInstr *createSentinel() const {
- return static_cast<MachineInstr*>(&Sentinel);
- }
- void destroySentinel(MachineInstr *) const {}
-
- MachineInstr *provideInitialHead() const { return createSentinel(); }
- MachineInstr *ensureHead(MachineInstr*) const { return createSentinel(); }
- static void noteHead(MachineInstr*, MachineInstr*) {}
+ void addNodeToList(MachineInstr *N);
+ void removeNodeFromList(MachineInstr *N);
+ void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
+ instr_iterator Last);
- void addNodeToList(MachineInstr* N);
- void removeNodeFromList(MachineInstr* N);
- void transferNodesFromList(ilist_traits &SrcTraits,
- ilist_iterator<MachineInstr> First,
- ilist_iterator<MachineInstr> Last);
- void deleteNode(MachineInstr *N);
-private:
- void createNode(const MachineInstr &);
+ void deleteNode(MachineInstr *MI);
};
class MachineBasicBlock
@@ -83,7 +69,7 @@ public:
};
private:
- typedef ilist<MachineInstr> Instructions;
+ typedef ilist<MachineInstr, ilist_sentinel_tracking<true>> Instructions;
Instructions Insts;
const BasicBlock *BB;
int Number;
@@ -161,15 +147,14 @@ public:
typedef Instructions::iterator instr_iterator;
typedef Instructions::const_iterator const_instr_iterator;
- typedef std::reverse_iterator<instr_iterator> reverse_instr_iterator;
- typedef
- std::reverse_iterator<const_instr_iterator> const_reverse_instr_iterator;
+ typedef Instructions::reverse_iterator reverse_instr_iterator;
+ typedef Instructions::const_reverse_iterator const_reverse_instr_iterator;
typedef MachineInstrBundleIterator<MachineInstr> iterator;
typedef MachineInstrBundleIterator<const MachineInstr> const_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
-
+ typedef MachineInstrBundleIterator<MachineInstr, true> reverse_iterator;
+ typedef MachineInstrBundleIterator<const MachineInstr, true>
+ const_reverse_iterator;
unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); }
@@ -204,10 +189,16 @@ public:
const_iterator begin() const { return instr_begin(); }
iterator end () { return instr_end(); }
const_iterator end () const { return instr_end(); }
- reverse_iterator rbegin() { return instr_rbegin(); }
- const_reverse_iterator rbegin() const { return instr_rbegin(); }
- reverse_iterator rend () { return instr_rend(); }
- const_reverse_iterator rend () const { return instr_rend(); }
+ reverse_iterator rbegin() {
+ return reverse_iterator::getAtBundleBegin(instr_rbegin());
+ }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator::getAtBundleBegin(instr_rbegin());
+ }
+ reverse_iterator rend() { return reverse_iterator(instr_rend()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(instr_rend());
+ }
/// Support for MachineInstr::getNextNode().
static Instructions MachineBasicBlock::*getSublistAccess(MachineInstr *) {
@@ -285,7 +276,8 @@ public:
/// Adds the specified register as a live in. Note that it is an error to add
/// the same register to the same set more than once unless the intention is
/// to call sortUniqueLiveIns after all registers are added.
- void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask = ~0u) {
+ void addLiveIn(MCPhysReg PhysReg,
+ LaneBitmask LaneMask = LaneBitmask::getAll()) {
LiveIns.push_back(RegisterMaskPair(PhysReg, LaneMask));
}
void addLiveIn(const RegisterMaskPair &RegMaskPair) {
@@ -297,16 +289,21 @@ public:
/// LiveIn insertion.
void sortUniqueLiveIns();
+ /// Clear live in list.
+ void clearLiveIns();
+
/// Add PhysReg as live in to this block, and ensure that there is a copy of
/// PhysReg to a virtual register of class RC. Return the virtual register
/// that is a copy of the live in PhysReg.
unsigned addLiveIn(MCPhysReg PhysReg, const TargetRegisterClass *RC);
/// Remove the specified register from the live in set.
- void removeLiveIn(MCPhysReg Reg, LaneBitmask LaneMask = ~0u);
+ void removeLiveIn(MCPhysReg Reg,
+ LaneBitmask LaneMask = LaneBitmask::getAll());
/// Return true if the specified register is in the live in set.
- bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask = ~0u) const;
+ bool isLiveIn(MCPhysReg Reg,
+ LaneBitmask LaneMask = LaneBitmask::getAll()) const;
// Iteration support for live in sets. These sets are kept in sorted
// order by their register number.
@@ -462,10 +459,15 @@ public:
iterator getFirstNonPHI();
/// Return the first instruction in MBB after I that is not a PHI or a label.
- /// This is the correct point to insert copies at the beginning of a basic
- /// block.
+ /// This is the correct point to insert lowered copies at the beginning of a
+ /// basic block that must be before any debugging information.
iterator SkipPHIsAndLabels(iterator I);
+ /// Return the first instruction in MBB after I that is not a PHI, label or
+ /// debug. This is the correct point to insert copies at the beginning of a
+ /// basic block.
+ iterator SkipPHIsLabelsAndDebug(iterator I);
+
/// Returns an iterator to the first terminator instruction of this basic
/// block. If a terminator does not exist, it returns end().
iterator getFirstTerminator();
@@ -705,7 +707,7 @@ private:
BranchProbability getSuccProbability(const_succ_iterator Succ) const;
// Methods used to maintain doubly linked list of blocks...
- friend struct ilist_traits<MachineBasicBlock>;
+ friend struct ilist_callback_traits<MachineBasicBlock>;
// Machine-CFG mutators
@@ -739,31 +741,21 @@ struct MBB2NumberFunctor :
//
template <> struct GraphTraits<MachineBasicBlock *> {
- typedef MachineBasicBlock NodeType;
typedef MachineBasicBlock *NodeRef;
typedef MachineBasicBlock::succ_iterator ChildIteratorType;
- static NodeType *getEntryNode(MachineBasicBlock *BB) { return BB; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->succ_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->succ_end();
- }
+ static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
template <> struct GraphTraits<const MachineBasicBlock *> {
- typedef const MachineBasicBlock NodeType;
typedef const MachineBasicBlock *NodeRef;
typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
- static NodeType *getEntryNode(const MachineBasicBlock *BB) { return BB; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->succ_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->succ_end();
- }
+ static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
// Provide specializations of GraphTraits to be able to treat a
@@ -773,33 +765,23 @@ template <> struct GraphTraits<const MachineBasicBlock *> {
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
- typedef MachineBasicBlock NodeType;
typedef MachineBasicBlock *NodeRef;
typedef MachineBasicBlock::pred_iterator ChildIteratorType;
- static NodeType *getEntryNode(Inverse<MachineBasicBlock *> G) {
+ static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
return G.Graph;
}
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->pred_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->pred_end();
- }
+ static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
- typedef const MachineBasicBlock NodeType;
typedef const MachineBasicBlock *NodeRef;
typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
- static NodeType *getEntryNode(Inverse<const MachineBasicBlock*> G) {
+ static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
return G.Graph;
}
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->pred_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->pred_end();
- }
+ static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
@@ -827,6 +809,28 @@ public:
MachineBasicBlock::iterator getInitial() { return I; }
};
+/// Increment \p It until it points to a non-debug instruction or to \p End
+/// and return the resulting iterator. This function should only be used
+/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
+/// const_instr_iterator} and the respective reverse iterators.
+template<typename IterT>
+inline IterT skipDebugInstructionsForward(IterT It, IterT End) {
+ while (It != End && It->isDebugValue())
+ It++;
+ return It;
+}
+
+/// Decrement \p It until it points to a non-debug instruction or to \p Begin
+/// and return the resulting iterator. This function should only be used
+/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
+/// const_instr_iterator} and the respective reverse iterators.
+template<class IterT>
+inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
+ while (It != Begin && It->isDebugValue())
+ It--;
+ return It;
+}
+
} // End llvm namespace
#endif
diff --git a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
index 7a236086ed09..bfa5bf6c2845 100644
--- a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
+++ b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -52,6 +52,7 @@ public:
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
Optional<uint64_t> getBlockProfileCount(const MachineBasicBlock *MBB) const;
+ Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
const MachineFunction *getFunction() const;
const MachineBranchProbabilityInfo *getMBPI() const;
diff --git a/include/llvm/CodeGen/MachineDominators.h b/include/llvm/CodeGen/MachineDominators.h
index ed7cc277e8b6..76e1df89169e 100644
--- a/include/llvm/CodeGen/MachineDominators.h
+++ b/include/llvm/CodeGen/MachineDominators.h
@@ -271,14 +271,12 @@ public:
template <class Node, class ChildIterator>
struct MachineDomTreeGraphTraitsBase {
- typedef Node NodeType;
+ typedef Node *NodeRef;
typedef ChildIterator ChildIteratorType;
- static NodeType *getEntryNode(NodeType *N) { return N; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) { return N->end(); }
+ static NodeRef getEntryNode(NodeRef N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
template <class T> struct GraphTraits;
@@ -296,7 +294,7 @@ struct GraphTraits<const MachineDomTreeNode *>
template <> struct GraphTraits<MachineDominatorTree*>
: public GraphTraits<MachineDomTreeNode *> {
- static NodeType *getEntryNode(MachineDominatorTree *DT) {
+ static NodeRef getEntryNode(MachineDominatorTree *DT) {
return DT->getRootNode();
}
};
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
index 59755674c69e..2fab8137564e 100644
--- a/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -103,7 +103,7 @@ class MachineFrameInfo {
/// If true, this stack slot is used to spill a value (could be deopt
/// and/or GC related) over a statepoint. We know that the address of the
- /// slot can't alias any LLVM IR value. This is very similiar to a Spill
+ /// slot can't alias any LLVM IR value. This is very similar to a Spill
/// Slot, but is created by statepoint lowering is SelectionDAG, not the
/// register allocator.
bool isStatepointSpillSlot;
@@ -544,7 +544,8 @@ public:
/// Create a spill slot at a fixed location on the stack.
/// Returns an index with a negative value.
- int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset);
+ int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset,
+ bool Immutable = false);
/// Returns true if the specified index corresponds to a fixed stack object.
bool isFixedObjectIndex(int ObjectIdx) const {
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index 4aa9a92e6bee..0c21b3254631 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -20,10 +20,14 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/Compiler.h"
@@ -48,29 +52,18 @@ class TargetRegisterClass;
struct MachinePointerInfo;
struct WinEHFuncInfo;
-template <>
-struct ilist_traits<MachineBasicBlock>
- : public ilist_default_traits<MachineBasicBlock> {
- mutable ilist_half_node<MachineBasicBlock> Sentinel;
-public:
- // FIXME: This downcast is UB. See llvm.org/PR26753.
- LLVM_NO_SANITIZE("object-size")
- MachineBasicBlock *createSentinel() const {
- return static_cast<MachineBasicBlock*>(&Sentinel);
- }
- void destroySentinel(MachineBasicBlock *) const {}
-
- MachineBasicBlock *provideInitialHead() const { return createSentinel(); }
- MachineBasicBlock *ensureHead(MachineBasicBlock*) const {
- return createSentinel();
- }
- static void noteHead(MachineBasicBlock*, MachineBasicBlock*) {}
+template <> struct ilist_alloc_traits<MachineBasicBlock> {
+ void deleteNode(MachineBasicBlock *MBB);
+};
+template <> struct ilist_callback_traits<MachineBasicBlock> {
void addNodeToList(MachineBasicBlock* MBB);
void removeNodeFromList(MachineBasicBlock* MBB);
- void deleteNode(MachineBasicBlock *MBB);
-private:
- void createNode(const MachineBasicBlock &);
+
+ template <class Iterator>
+ void transferNodesFromList(ilist_callback_traits &OldList, Iterator, Iterator) {
+ llvm_unreachable("Never transfer between lists");
+ }
};
/// MachineFunctionInfo - This class can be derived from and used by targets to
@@ -94,8 +87,6 @@ struct MachineFunctionInfo {
/// Each of these has checking code in the MachineVerifier, and passes can
/// require that a property be set.
class MachineFunctionProperties {
- // TODO: Add MachineVerifier checks for AllVRegsAllocated
- // TODO: Add a way to print the properties and make more useful error messages
// Possible TODO: Allow targets to extend this (perhaps by allowing the
// constructor to specify the size of the bit vector)
// Possible TODO: Allow requiring the negative (e.g. VRegsAllocated could be
@@ -108,6 +99,7 @@ public:
// Property descriptions:
// IsSSA: True when the machine function is in SSA form and virtual registers
// have a single def.
+ // NoPHIs: The machine function does not contain any PHI instruction.
// TracksLiveness: True when tracking register liveness accurately.
// While this property is set, register liveness information in basic block
// live-in lists and machine instruction operands (e.g. kill flags, implicit
@@ -115,13 +107,31 @@ public:
// that affect the values in registers, for example by the register
// scavenger.
// When this property is clear, liveness is no longer reliable.
- // AllVRegsAllocated: All virtual registers have been allocated; i.e. all
- // register operands are physical registers.
+ // NoVRegs: The machine function does not use any virtual registers.
+ // Legalized: In GlobalISel: the MachineLegalizer ran and all pre-isel generic
+ // instructions have been legalized; i.e., all instructions are now one of:
+ // - generic and always legal (e.g., COPY)
+ // - target-specific
+ // - legal pre-isel generic instructions.
+ // RegBankSelected: In GlobalISel: the RegBankSelect pass ran and all generic
+ // virtual registers have been assigned to a register bank.
+ // Selected: In GlobalISel: the InstructionSelect pass ran and all pre-isel
+ // generic instructions have been eliminated; i.e., all instructions are now
+ // target-specific or non-pre-isel generic instructions (e.g., COPY).
+ // Since only pre-isel generic instructions can have generic virtual register
+ // operands, this also means that all generic virtual registers have been
+ // constrained to virtual registers (assigned to register classes) and that
+ // all sizes attached to them have been eliminated.
enum class Property : unsigned {
IsSSA,
+ NoPHIs,
TracksLiveness,
- AllVRegsAllocated,
- LastProperty,
+ NoVRegs,
+ FailedISel,
+ Legalized,
+ RegBankSelected,
+ Selected,
+ LastProperty = Selected,
};
bool hasProperty(Property P) const {
@@ -131,15 +141,20 @@ public:
Properties.set(static_cast<unsigned>(P));
return *this;
}
- MachineFunctionProperties &clear(Property P) {
+ MachineFunctionProperties &reset(Property P) {
Properties.reset(static_cast<unsigned>(P));
return *this;
}
+ /// Reset all the properties.
+ MachineFunctionProperties &reset() {
+ Properties.reset();
+ return *this;
+ }
MachineFunctionProperties &set(const MachineFunctionProperties &MFP) {
Properties |= MFP.Properties;
return *this;
}
- MachineFunctionProperties &clear(const MachineFunctionProperties &MFP) {
+ MachineFunctionProperties &reset(const MachineFunctionProperties &MFP) {
Properties.reset(MFP.Properties);
return *this;
}
@@ -149,13 +164,34 @@ public:
return !V.Properties.test(Properties);
}
- // Print the MachineFunctionProperties in human-readable form. If OnlySet is
- // true, only print the properties that are set.
- void print(raw_ostream &ROS, bool OnlySet=false) const;
+ /// Print the MachineFunctionProperties in human-readable form.
+ void print(raw_ostream &OS) const;
private:
BitVector Properties =
- BitVector(static_cast<unsigned>(Property::LastProperty));
+ BitVector(static_cast<unsigned>(Property::LastProperty)+1);
+};
+
+struct SEHHandler {
+ /// Filter or finally function. Null indicates a catch-all.
+ const Function *FilterOrFinally;
+
+ /// Address of block to recover at. Null for a finally handler.
+ const BlockAddress *RecoverBA;
+};
+
+
+/// This structure is used to retain landing pad info for the current function.
+struct LandingPadInfo {
+ MachineBasicBlock *LandingPadBlock; // Landing pad block.
+ SmallVector<MCSymbol *, 1> BeginLabels; // Labels prior to invoke.
+ SmallVector<MCSymbol *, 1> EndLabels; // Labels after invoke.
+ SmallVector<SEHHandler, 1> SEHHandlers; // SEH handlers active at this lpad.
+ MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
+ std::vector<int> TypeIds; // List of type ids (filters negative).
+
+ explicit LandingPadInfo(MachineBasicBlock *MBB)
+ : LandingPadBlock(MBB), LandingPadLabel(nullptr) {}
};
class MachineFunction {
@@ -224,6 +260,9 @@ class MachineFunction {
/// True if the function includes any inline assembly.
bool HasInlineAsm = false;
+ /// True if any WinCFI instruction have been emitted in this function.
+ Optional<bool> HasWinCFI;
+
/// Current high-level properties of the IR of the function (e.g. is in SSA
/// form or whether registers have been allocated)
MachineFunctionProperties Properties;
@@ -231,13 +270,77 @@ class MachineFunction {
// Allocation management for pseudo source values.
std::unique_ptr<PseudoSourceValueManager> PSVManager;
+ /// List of moves done by a function's prolog. Used to construct frame maps
+ /// by debug and exception handling consumers.
+ std::vector<MCCFIInstruction> FrameInstructions;
+
+ /// \name Exception Handling
+ /// \{
+
+ /// List of LandingPadInfo describing the landing pad information.
+ std::vector<LandingPadInfo> LandingPads;
+
+ /// Map a landing pad's EH symbol to the call site indexes.
+ DenseMap<MCSymbol*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
+
+ /// Map of invoke call site index values to associated begin EH_LABEL.
+ DenseMap<MCSymbol*, unsigned> CallSiteMap;
+
+ bool CallsEHReturn = false;
+ bool CallsUnwindInit = false;
+ bool HasEHFunclets = false;
+
+ /// List of C++ TypeInfo used.
+ std::vector<const GlobalValue *> TypeInfos;
+
+ /// List of typeids encoding filters used.
+ std::vector<unsigned> FilterIds;
+
+ /// List of the indices in FilterIds corresponding to filter terminators.
+ std::vector<unsigned> FilterEnds;
+
+ EHPersonality PersonalityTypeCache = EHPersonality::Unknown;
+
+ /// \}
+
MachineFunction(const MachineFunction &) = delete;
void operator=(const MachineFunction&) = delete;
+
+ /// Clear all the members of this MachineFunction, but the ones used
+ /// to initialize again the MachineFunction.
+ /// More specifically, this deallocates all the dynamically allocated
+ /// objects and get rid of all the XXXInfo data structure, but keep
+ /// unchanged the references to Fn, Target, MMI, and FunctionNumber.
+ void clear();
+ /// Allocate and initialize the different members.
+ /// In particular, the XXXInfo data structure.
+ /// \pre Fn, Target, MMI, and FunctionNumber are properly set.
+ void init();
public:
+
+ struct VariableDbgInfo {
+ const DILocalVariable *Var;
+ const DIExpression *Expr;
+ unsigned Slot;
+ const DILocation *Loc;
+
+ VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+ unsigned Slot, const DILocation *Loc)
+ : Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
+ };
+ typedef SmallVector<VariableDbgInfo, 4> VariableDbgInfoMapTy;
+ VariableDbgInfoMapTy VariableDbgInfos;
+
MachineFunction(const Function *Fn, const TargetMachine &TM,
unsigned FunctionNum, MachineModuleInfo &MMI);
~MachineFunction();
+ /// Reset the instance as if it was just created.
+ void reset() {
+ clear();
+ init();
+ }
+
MachineModuleInfo &getMMI() const { return MMI; }
MCContext &getContext() const { return Ctx; }
@@ -283,8 +386,8 @@ public:
/// This object contains information about objects allocated on the stack
/// frame of the current function in an abstract way.
///
- MachineFrameInfo *getFrameInfo() { return FrameInfo; }
- const MachineFrameInfo *getFrameInfo() const { return FrameInfo; }
+ MachineFrameInfo &getFrameInfo() { return *FrameInfo; }
+ const MachineFrameInfo &getFrameInfo() const { return *FrameInfo; }
/// getJumpTableInfo - Return the jump table info object for the current
/// function. This object contains information about jump tables in the
@@ -345,6 +448,12 @@ public:
HasInlineAsm = B;
}
+ bool hasWinCFI() const {
+ assert(HasWinCFI.hasValue() && "HasWinCFI not set yet!");
+ return *HasWinCFI;
+ }
+ void setHasWinCFI(bool v) { HasWinCFI = v; }
+
/// Get the function properties
const MachineFunctionProperties &getProperties() const { return Properties; }
MachineFunctionProperties &getProperties() { return Properties; }
@@ -422,8 +531,8 @@ public:
// Provide accessors for the MachineBasicBlock list...
typedef BasicBlockListType::iterator iterator;
typedef BasicBlockListType::const_iterator const_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef BasicBlockListType::const_reverse_iterator const_reverse_iterator;
+ typedef BasicBlockListType::reverse_iterator reverse_iterator;
/// Support for MachineBasicBlock::getNextNode().
static BasicBlockListType MachineFunction::*
@@ -530,11 +639,13 @@ public:
/// getMachineMemOperand - Allocate a new MachineMemOperand.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
- MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
- MachineMemOperand::Flags f,
- uint64_t s, unsigned base_alignment,
- const AAMDNodes &AAInfo = AAMDNodes(),
- const MDNode *Ranges = nullptr);
+ MachineMemOperand *getMachineMemOperand(
+ MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
+ unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
+ const MDNode *Ranges = nullptr,
+ SynchronizationScope SynchScope = CrossThread,
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+ AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
@@ -601,8 +712,139 @@ public:
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
/// base.
MCSymbol *getPICBaseSymbol() const;
+
+ /// Returns a reference to a list of cfi instructions in the function's
+ /// prologue. Used to construct frame maps for debug and exception handling
+ /// comsumers.
+ const std::vector<MCCFIInstruction> &getFrameInstructions() const {
+ return FrameInstructions;
+ }
+
+ LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst) {
+ FrameInstructions.push_back(Inst);
+ return FrameInstructions.size() - 1;
+ }
+
+ /// \name Exception Handling
+ /// \{
+
+ bool callsEHReturn() const { return CallsEHReturn; }
+ void setCallsEHReturn(bool b) { CallsEHReturn = b; }
+
+ bool callsUnwindInit() const { return CallsUnwindInit; }
+ void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
+
+ bool hasEHFunclets() const { return HasEHFunclets; }
+ void setHasEHFunclets(bool V) { HasEHFunclets = V; }
+
+ /// Find or create an LandingPadInfo for the specified MachineBasicBlock.
+ LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
+
+ /// Remap landing pad labels and remove any deleted landing pads.
+ void tidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
+
+ /// Return a reference to the landing pad info for the current function.
+ const std::vector<LandingPadInfo> &getLandingPads() const {
+ return LandingPads;
+ }
+
+ /// Provide the begin and end labels of an invoke style call and associate it
+ /// with a try landing pad block.
+ void addInvoke(MachineBasicBlock *LandingPad,
+ MCSymbol *BeginLabel, MCSymbol *EndLabel);
+
+ /// Add a new panding pad. Returns the label ID for the landing pad entry.
+ MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
+
+ /// Provide the catch typeinfo for a landing pad.
+ void addCatchTypeInfo(MachineBasicBlock *LandingPad,
+ ArrayRef<const GlobalValue *> TyInfo);
+
+ /// Provide the filter typeinfo for a landing pad.
+ void addFilterTypeInfo(MachineBasicBlock *LandingPad,
+ ArrayRef<const GlobalValue *> TyInfo);
+
+ /// Add a cleanup action for a landing pad.
+ void addCleanup(MachineBasicBlock *LandingPad);
+
+ void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
+ const BlockAddress *RecoverLabel);
+
+ void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
+ const Function *Cleanup);
+
+ /// Return the type id for the specified typeinfo. This is function wide.
+ unsigned getTypeIDFor(const GlobalValue *TI);
+
+ /// Return the id of the filter encoded by TyIds. This is function wide.
+ int getFilterIDFor(std::vector<unsigned> &TyIds);
+
+ /// Map the landing pad's EH symbol to the call site indexes.
+ void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
+
+ /// Get the call site indexes for a landing pad EH symbol.
+ SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
+ assert(hasCallSiteLandingPad(Sym) &&
+ "missing call site number for landing pad!");
+ return LPadToCallSiteMap[Sym];
+ }
+
+ /// Return true if the landing pad Eh symbol has an associated call site.
+ bool hasCallSiteLandingPad(MCSymbol *Sym) {
+ return !LPadToCallSiteMap[Sym].empty();
+ }
+
+ /// Map the begin label for a call site.
+ void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
+ CallSiteMap[BeginLabel] = Site;
+ }
+
+ /// Get the call site number for a begin label.
+ unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) const {
+ assert(hasCallSiteBeginLabel(BeginLabel) &&
+ "Missing call site number for EH_LABEL!");
+ return CallSiteMap.lookup(BeginLabel);
+ }
+
+ /// Return true if the begin label has a call site number associated with it.
+ bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) const {
+ return CallSiteMap.count(BeginLabel);
+ }
+
+ /// Return a reference to the C++ typeinfo for the current function.
+ const std::vector<const GlobalValue *> &getTypeInfos() const {
+ return TypeInfos;
+ }
+
+ /// Return a reference to the typeids encoding filters used in the current
+ /// function.
+ const std::vector<unsigned> &getFilterIds() const {
+ return FilterIds;
+ }
+
+ /// \}
+
+ /// Collect information used to emit debugging information of a variable.
+ void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+ unsigned Slot, const DILocation *Loc) {
+ VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
+ }
+
+ VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
+ const VariableDbgInfoMapTy &getVariableDbgInfo() const {
+ return VariableDbgInfos;
+ }
};
+/// \name Exception Handling
+/// \{
+
+/// Extract the exception handling information from the landingpad instruction
+/// and add them to the specified machine module info.
+void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB);
+
+/// \}
+
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
@@ -614,29 +856,29 @@ public:
//
template <> struct GraphTraits<MachineFunction*> :
public GraphTraits<MachineBasicBlock*> {
- static NodeType *getEntryNode(MachineFunction *F) {
- return &F->front();
- }
+ static NodeRef getEntryNode(MachineFunction *F) { return &F->front(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef MachineFunction::iterator nodes_iterator;
- static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); }
- static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); }
+ typedef pointer_iterator<MachineFunction::iterator> nodes_iterator;
+ static nodes_iterator nodes_begin(MachineFunction *F) {
+ return nodes_iterator(F->begin());
+ }
+ static nodes_iterator nodes_end(MachineFunction *F) {
+ return nodes_iterator(F->end());
+ }
static unsigned size (MachineFunction *F) { return F->size(); }
};
template <> struct GraphTraits<const MachineFunction*> :
public GraphTraits<const MachineBasicBlock*> {
- static NodeType *getEntryNode(const MachineFunction *F) {
- return &F->front();
- }
+ static NodeRef getEntryNode(const MachineFunction *F) { return &F->front(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef MachineFunction::const_iterator nodes_iterator;
+ typedef pointer_iterator<MachineFunction::const_iterator> nodes_iterator;
static nodes_iterator nodes_begin(const MachineFunction *F) {
- return F->begin();
+ return nodes_iterator(F->begin());
}
static nodes_iterator nodes_end (const MachineFunction *F) {
- return F->end();
+ return nodes_iterator(F->end());
}
static unsigned size (const MachineFunction *F) {
return F->size();
@@ -651,13 +893,13 @@ template <> struct GraphTraits<const MachineFunction*> :
//
template <> struct GraphTraits<Inverse<MachineFunction*> > :
public GraphTraits<Inverse<MachineBasicBlock*> > {
- static NodeType *getEntryNode(Inverse<MachineFunction*> G) {
+ static NodeRef getEntryNode(Inverse<MachineFunction *> G) {
return &G.Graph->front();
}
};
template <> struct GraphTraits<Inverse<const MachineFunction*> > :
public GraphTraits<Inverse<const MachineBasicBlock*> > {
- static NodeType *getEntryNode(Inverse<const MachineFunction *> G) {
+ static NodeRef getEntryNode(Inverse<const MachineFunction *> G) {
return &G.Graph->front();
}
};
diff --git a/include/llvm/CodeGen/MachineFunctionAnalysis.h b/include/llvm/CodeGen/MachineFunctionAnalysis.h
deleted file mode 100644
index 4c0f5e63ea1d..000000000000
--- a/include/llvm/CodeGen/MachineFunctionAnalysis.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//===-- MachineFunctionAnalysis.h - Owner of MachineFunctions ----*-C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the MachineFunctionAnalysis class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
-#define LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
-
-#include "llvm/Pass.h"
-
-namespace llvm {
-
-class MachineFunction;
-class MachineFunctionInitializer;
-class TargetMachine;
-
-/// MachineFunctionAnalysis - This class is a Pass that manages a
-/// MachineFunction object.
-struct MachineFunctionAnalysis : public FunctionPass {
-private:
- const TargetMachine &TM;
- MachineFunction *MF;
- unsigned NextFnNum;
- MachineFunctionInitializer *MFInitializer;
-
-public:
- static char ID;
- explicit MachineFunctionAnalysis(const TargetMachine &tm,
- MachineFunctionInitializer *MFInitializer);
- ~MachineFunctionAnalysis() override;
-
- MachineFunction &getMF() const { return *MF; }
-
- const char* getPassName() const override {
- return "Machine Function Analysis";
- }
-
-private:
- bool doInitialization(Module &M) override;
- bool runOnFunction(Function &F) override;
- void releaseMemory() override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index 8f1cb9b6f659..bac93e5d3a4c 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -39,9 +39,6 @@ class DIExpression;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;
-#ifdef LLVM_BUILD_GLOBAL_ISEL
-class Type;
-#endif
class MachineFunction;
class MachineMemOperand;
@@ -53,7 +50,8 @@ class MachineMemOperand;
/// without having their destructor called.
///
class MachineInstr
- : public ilist_node_with_parent<MachineInstr, MachineBasicBlock> {
+ : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
+ ilist_sentinel_tracking<true>> {
public:
typedef MachineMemOperand **mmo_iterator;
@@ -62,7 +60,7 @@ public:
/// otherwise easily derivable from the IR text.
///
enum CommentFlag {
- ReloadReuse = 0x1
+ ReloadReuse = 0x1 // higher bits are reserved for target dep comments.
};
enum MIFlag {
@@ -106,13 +104,6 @@ private:
DebugLoc debugLoc; // Source line information.
-#ifdef LLVM_BUILD_GLOBAL_ISEL
- /// Type of the instruction in case of a generic opcode.
- /// \invariant This must be nullptr is getOpcode() is not
- /// in the range of generic opcodes.
- Type *Ty;
-#endif
-
MachineInstr(const MachineInstr&) = delete;
void operator=(const MachineInstr&) = delete;
// Use MachineFunction::DeleteMachineInstr() instead.
@@ -120,7 +111,7 @@ private:
// Intrusive list support
friend struct ilist_traits<MachineInstr>;
- friend struct ilist_traits<MachineBasicBlock>;
+ friend struct ilist_callback_traits<MachineBasicBlock>;
void setParent(MachineBasicBlock *P) { Parent = P; }
/// This constructor creates a copy of the given
@@ -152,8 +143,8 @@ public:
}
/// Set a flag for the AsmPrinter.
- void setAsmPrinterFlag(CommentFlag Flag) {
- AsmPrinterFlags |= (uint8_t)Flag;
+ void setAsmPrinterFlag(uint8_t Flag) {
+ AsmPrinterFlags |= Flag;
}
/// Clear specific AsmPrinter flags.
@@ -187,10 +178,6 @@ public:
Flags &= ~((uint8_t)Flag);
}
- /// Set the type of the instruction.
- /// \pre getOpcode() is in the range of the generic opcodes.
- void setType(Type *Ty);
- Type *getType() const;
/// Return true if MI is in a bundle (but not the first MI in a bundle).
///
@@ -404,10 +391,10 @@ public:
bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
// Inline the fast path for unbundled or bundle-internal instructions.
if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
- return getDesc().getFlags() & (1 << MCFlag);
+ return getDesc().getFlags() & (1ULL << MCFlag);
// If this is the first instruction in a bundle, take the slow path.
- return hasPropertyInBundle(1 << MCFlag, Type);
+ return hasPropertyInBundle(1ULL << MCFlag, Type);
}
/// Return true if this instruction can have a variable number of operands.
@@ -734,8 +721,11 @@ public:
IgnoreVRegDefs // Ignore virtual register definitions
};
- /// Return true if this instruction is identical to (same
- /// opcode and same operands as) the specified instruction.
+ /// Return true if this instruction is identical to \p Other.
+ /// Two instructions are identical if they have the same opcode and all their
+ /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
+ /// Note that this means liveness related flags (dead, undef, kill) do not
+ /// affect the notion of identical.
bool isIdenticalTo(const MachineInstr &Other,
MICheckType Check = CheckDefs) const;
@@ -1124,12 +1114,14 @@ public:
/// ordered or volatile memory references.
bool hasOrderedMemoryRef() const;
- /// Return true if this instruction is loading from a
- /// location whose value is invariant across the function. For example,
- /// loading a value from the constant pool or from the argument area of
- /// a function if it does not change. This should only return true of *all*
- /// loads the instruction does are invariant (if it does multiple loads).
- bool isInvariantLoad(AliasAnalysis *AA) const;
+ /// Return true if this load instruction never traps and points to a memory
+ /// location whose value doesn't change during the execution of this function.
+ ///
+ /// Examples include loading a value from the constant pool or from the
+ /// argument area of a function (if it does not change). If the instruction
+ /// does multiple loads, this returns true only if all of the loads are
+ /// dereferenceable and invariant.
+ bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
/// If the specified instruction is a PHI that always merges together the
/// same virtual register, return the register, otherwise return 0.
@@ -1157,10 +1149,11 @@ public:
//
// Debugging support
//
- void print(raw_ostream &OS, bool SkipOpers = false) const;
- void print(raw_ostream &OS, ModuleSlotTracker &MST,
- bool SkipOpers = false) const;
- void dump() const;
+ void print(raw_ostream &OS, bool SkipOpers = false,
+ const TargetInstrInfo *TII = nullptr) const;
+ void print(raw_ostream &OS, ModuleSlotTracker &MST, bool SkipOpers = false,
+ const TargetInstrInfo *TII = nullptr) const;
+ void dump(const TargetInstrInfo *TII = nullptr) const;
//===--------------------------------------------------------------------===//
// Accessors used to build up machine instructions.
diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h
index 37b67aa0cf5c..3c8a3626f364 100644
--- a/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -83,6 +83,21 @@ public:
return *this;
}
+ /// Add a virtual register definition operand.
+ const MachineInstrBuilder &addDef(unsigned RegNo, unsigned Flags = 0,
+ unsigned SubReg = 0) const {
+ return addReg(RegNo, Flags | RegState::Define, SubReg);
+ }
+
+ /// Add a virtual register use operand. It is an error for Flags to contain
+ /// `RegState::Define` when calling this function.
+ const MachineInstrBuilder &addUse(unsigned RegNo, unsigned Flags = 0,
+ unsigned SubReg = 0) const {
+ assert(!(Flags & RegState::Define) &&
+ "Misleading addUse defines register, use addReg instead.");
+ return addReg(RegNo, Flags, SubReg);
+ }
+
/// Add a new immediate operand.
const MachineInstrBuilder &addImm(int64_t Val) const {
MI->addOperand(*MF, MachineOperand::CreateImm(Val));
@@ -190,6 +205,16 @@ public:
return *this;
}
+ const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
+ MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
+ MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
+ return *this;
+ }
+
const MachineInstrBuilder &addSym(MCSymbol *Sym,
unsigned char TargetFlags = 0) const {
MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
@@ -435,7 +460,8 @@ public:
/// Create an MIBundleBuilder representing an existing instruction or bundle
/// that has MI as its head.
explicit MIBundleBuilder(MachineInstr *MI)
- : MBB(*MI->getParent()), Begin(MI), End(getBundleEnd(*MI)) {}
+ : MBB(*MI->getParent()), Begin(MI),
+ End(getBundleEnd(MI->getIterator())) {}
/// Return a reference to the basic block containing this bundle.
MachineBasicBlock &getMBB() const { return MBB; }
diff --git a/include/llvm/CodeGen/MachineInstrBundle.h b/include/llvm/CodeGen/MachineInstrBundle.h
index c0033a5148cf..995c7001d928 100644
--- a/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/include/llvm/CodeGen/MachineInstrBundle.h
@@ -41,34 +41,33 @@ MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
/// MachineFunction. Return true if any bundles are finalized.
bool finalizeBundles(MachineFunction &MF);
-/// getBundleStart - Returns the first instruction in the bundle containing MI.
-///
-inline MachineInstr &getBundleStart(MachineInstr &MI) {
- MachineBasicBlock::instr_iterator I(MI);
+/// Returns an iterator to the first instruction in the bundle containing \p I.
+inline MachineBasicBlock::instr_iterator getBundleStart(
+ MachineBasicBlock::instr_iterator I) {
while (I->isBundledWithPred())
--I;
- return *I;
+ return I;
}
-inline const MachineInstr &getBundleStart(const MachineInstr &MI) {
- MachineBasicBlock::const_instr_iterator I(MI);
+/// Returns an iterator to the first instruction in the bundle containing \p I.
+inline MachineBasicBlock::const_instr_iterator getBundleStart(
+ MachineBasicBlock::const_instr_iterator I) {
while (I->isBundledWithPred())
--I;
- return *I;
+ return I;
}
-/// Return an iterator pointing beyond the bundle containing MI.
-inline MachineBasicBlock::instr_iterator getBundleEnd(MachineInstr &MI) {
- MachineBasicBlock::instr_iterator I(MI);
+/// Returns an iterator pointing beyond the bundle containing \p I.
+inline MachineBasicBlock::instr_iterator getBundleEnd(
+ MachineBasicBlock::instr_iterator I) {
while (I->isBundledWithSucc())
++I;
return ++I;
}
-/// Return an iterator pointing beyond the bundle containing MI.
-inline MachineBasicBlock::const_instr_iterator
-getBundleEnd(const MachineInstr &MI) {
- MachineBasicBlock::const_instr_iterator I(MI);
+/// Returns an iterator pointing beyond the bundle containing \p I.
+inline MachineBasicBlock::const_instr_iterator getBundleEnd(
+ MachineBasicBlock::const_instr_iterator I) {
while (I->isBundledWithSucc())
++I;
return ++I;
@@ -115,7 +114,7 @@ protected:
///
explicit MachineOperandIteratorBase(MachineInstr &MI, bool WholeBundle) {
if (WholeBundle) {
- InstrI = getBundleStart(MI).getIterator();
+ InstrI = getBundleStart(MI.getIterator());
InstrE = MI.getParent()->instr_end();
} else {
InstrI = InstrE = MI.getIterator();
diff --git a/include/llvm/CodeGen/MachineInstrBundleIterator.h b/include/llvm/CodeGen/MachineInstrBundleIterator.h
index 45a9a188f90e..2d77cfcae20f 100644
--- a/include/llvm/CodeGen/MachineInstrBundleIterator.h
+++ b/include/llvm/CodeGen/MachineInstrBundleIterator.h
@@ -19,23 +19,126 @@
namespace llvm {
+template <class T, bool IsReverse> struct MachineInstrBundleIteratorTraits;
+template <class T> struct MachineInstrBundleIteratorTraits<T, false> {
+ typedef simple_ilist<T, ilist_sentinel_tracking<true>> list_type;
+ typedef typename list_type::iterator instr_iterator;
+ typedef typename list_type::iterator nonconst_instr_iterator;
+ typedef typename list_type::const_iterator const_instr_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<T, true> {
+ typedef simple_ilist<T, ilist_sentinel_tracking<true>> list_type;
+ typedef typename list_type::reverse_iterator instr_iterator;
+ typedef typename list_type::reverse_iterator nonconst_instr_iterator;
+ typedef typename list_type::const_reverse_iterator const_instr_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<const T, false> {
+ typedef simple_ilist<T, ilist_sentinel_tracking<true>> list_type;
+ typedef typename list_type::const_iterator instr_iterator;
+ typedef typename list_type::iterator nonconst_instr_iterator;
+ typedef typename list_type::const_iterator const_instr_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<const T, true> {
+ typedef simple_ilist<T, ilist_sentinel_tracking<true>> list_type;
+ typedef typename list_type::const_reverse_iterator instr_iterator;
+ typedef typename list_type::reverse_iterator nonconst_instr_iterator;
+ typedef typename list_type::const_reverse_iterator const_instr_iterator;
+};
+
+template <bool IsReverse> struct MachineInstrBundleIteratorHelper;
+template <> struct MachineInstrBundleIteratorHelper<false> {
+ /// Get the beginning of the current bundle.
+ template <class Iterator> static Iterator getBundleBegin(Iterator I) {
+ if (!I.isEnd())
+ while (I->isBundledWithPred())
+ --I;
+ return I;
+ }
+
+ /// Get the final node of the current bundle.
+ template <class Iterator> static Iterator getBundleFinal(Iterator I) {
+ if (!I.isEnd())
+ while (I->isBundledWithSucc())
+ ++I;
+ return I;
+ }
+
+ /// Increment forward ilist iterator.
+ template <class Iterator> static void increment(Iterator &I) {
+ I = std::next(getBundleFinal(I));
+ }
+
+ /// Decrement forward ilist iterator.
+ template <class Iterator> static void decrement(Iterator &I) {
+ I = getBundleBegin(std::prev(I));
+ }
+};
+
+template <> struct MachineInstrBundleIteratorHelper<true> {
+ /// Get the beginning of the current bundle.
+ template <class Iterator> static Iterator getBundleBegin(Iterator I) {
+ return MachineInstrBundleIteratorHelper<false>::getBundleBegin(
+ I.getReverse())
+ .getReverse();
+ }
+
+ /// Get the final node of the current bundle.
+ template <class Iterator> static Iterator getBundleFinal(Iterator I) {
+ return MachineInstrBundleIteratorHelper<false>::getBundleFinal(
+ I.getReverse())
+ .getReverse();
+ }
+
+ /// Increment reverse ilist iterator.
+ template <class Iterator> static void increment(Iterator &I) {
+ I = getBundleBegin(std::next(I));
+ }
+
+ /// Decrement reverse ilist iterator.
+ template <class Iterator> static void decrement(Iterator &I) {
+ I = std::prev(getBundleFinal(I));
+ }
+};
+
/// MachineBasicBlock iterator that automatically skips over MIs that are
/// inside bundles (i.e. walk top level MIs only).
-template <typename Ty>
-class MachineInstrBundleIterator
- : public std::iterator<std::bidirectional_iterator_tag, Ty, ptrdiff_t> {
- typedef ilist_iterator<Ty> instr_iterator;
+template <typename Ty, bool IsReverse = false>
+class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
+ typedef MachineInstrBundleIteratorTraits<Ty, IsReverse> Traits;
+ typedef typename Traits::instr_iterator instr_iterator;
instr_iterator MII;
public:
- MachineInstrBundleIterator(instr_iterator MI) : MII(MI) {}
+ typedef typename instr_iterator::value_type value_type;
+ typedef typename instr_iterator::difference_type difference_type;
+ typedef typename instr_iterator::pointer pointer;
+ typedef typename instr_iterator::reference reference;
+ typedef std::bidirectional_iterator_tag iterator_category;
+
+ typedef typename instr_iterator::const_pointer const_pointer;
+ typedef typename instr_iterator::const_reference const_reference;
+
+private:
+ typedef typename Traits::nonconst_instr_iterator nonconst_instr_iterator;
+ typedef typename Traits::const_instr_iterator const_instr_iterator;
+ typedef MachineInstrBundleIterator<
+ typename nonconst_instr_iterator::value_type, IsReverse>
+ nonconst_iterator;
+ typedef MachineInstrBundleIterator<Ty, !IsReverse> reverse_iterator;
- MachineInstrBundleIterator(Ty &MI) : MII(MI) {
+public:
+ MachineInstrBundleIterator(instr_iterator MI) : MII(MI) {
+ assert((!MI.getNodePtr() || MI.isEnd() || !MI->isBundledWithPred()) &&
+ "It's not legal to initialize MachineInstrBundleIterator with a "
+ "bundled MI");
+ }
+
+ MachineInstrBundleIterator(reference MI) : MII(MI) {
assert(!MI.isBundledWithPred() && "It's not legal to initialize "
"MachineInstrBundleIterator with a "
"bundled MI");
}
- MachineInstrBundleIterator(Ty *MI) : MII(MI) {
+ MachineInstrBundleIterator(pointer MI) : MII(MI) {
// FIXME: This conversion should be explicit.
assert((!MI || !MI->isBundledWithPred()) && "It's not legal to initialize "
"MachineInstrBundleIterator "
@@ -43,34 +146,101 @@ public:
}
// Template allows conversion from const to nonconst.
template <class OtherTy>
- MachineInstrBundleIterator(const MachineInstrBundleIterator<OtherTy> &I)
+ MachineInstrBundleIterator(
+ const MachineInstrBundleIterator<OtherTy, IsReverse> &I,
+ typename std::enable_if<std::is_convertible<OtherTy *, Ty *>::value,
+ void *>::type = nullptr)
: MII(I.getInstrIterator()) {}
MachineInstrBundleIterator() : MII(nullptr) {}
- Ty &operator*() const { return *MII; }
- Ty *operator->() const { return &operator*(); }
+ /// Get the bundle iterator for the given instruction's bundle.
+ static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) {
+ return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI);
+ }
+
+ reference operator*() const { return *MII; }
+ pointer operator->() const { return &operator*(); }
+
+ /// Check for null.
+ bool isValid() const { return MII.getNodePtr(); }
- // FIXME: This conversion should be explicit.
- operator Ty *() const { return MII.getNodePtrUnchecked(); }
+ friend bool operator==(const MachineInstrBundleIterator &L,
+ const MachineInstrBundleIterator &R) {
+ return L.MII == R.MII;
+ }
+ friend bool operator==(const MachineInstrBundleIterator &L,
+ const const_instr_iterator &R) {
+ return L.MII == R; // Avoid assertion about validity of R.
+ }
+ friend bool operator==(const const_instr_iterator &L,
+ const MachineInstrBundleIterator &R) {
+ return L == R.MII; // Avoid assertion about validity of L.
+ }
+ friend bool operator==(const MachineInstrBundleIterator &L,
+ const nonconst_instr_iterator &R) {
+ return L.MII == R; // Avoid assertion about validity of R.
+ }
+ friend bool operator==(const nonconst_instr_iterator &L,
+ const MachineInstrBundleIterator &R) {
+ return L == R.MII; // Avoid assertion about validity of L.
+ }
+ friend bool operator==(const MachineInstrBundleIterator &L, const_pointer R) {
+ return L == const_instr_iterator(R); // Avoid assertion about validity of R.
+ }
+ friend bool operator==(const_pointer L, const MachineInstrBundleIterator &R) {
+ return const_instr_iterator(L) == R; // Avoid assertion about validity of L.
+ }
+ friend bool operator==(const MachineInstrBundleIterator &L,
+ const_reference R) {
+ return L == &R; // Avoid assertion about validity of R.
+ }
+ friend bool operator==(const_reference L,
+ const MachineInstrBundleIterator &R) {
+ return &L == R; // Avoid assertion about validity of L.
+ }
- bool operator==(const MachineInstrBundleIterator &X) const {
- return MII == X.MII;
+ friend bool operator!=(const MachineInstrBundleIterator &L,
+ const MachineInstrBundleIterator &R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const MachineInstrBundleIterator &L,
+ const const_instr_iterator &R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const const_instr_iterator &L,
+ const MachineInstrBundleIterator &R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const MachineInstrBundleIterator &L,
+ const nonconst_instr_iterator &R) {
+ return !(L == R);
}
- bool operator!=(const MachineInstrBundleIterator &X) const {
- return !operator==(X);
+ friend bool operator!=(const nonconst_instr_iterator &L,
+ const MachineInstrBundleIterator &R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const MachineInstrBundleIterator &L, const_pointer R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const_pointer L, const MachineInstrBundleIterator &R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const MachineInstrBundleIterator &L,
+ const_reference R) {
+ return !(L == R);
+ }
+ friend bool operator!=(const_reference L,
+ const MachineInstrBundleIterator &R) {
+ return !(L == R);
}
// Increment and decrement operators...
MachineInstrBundleIterator &operator--() {
- do
- --MII;
- while (MII->isBundledWithPred());
+ this->decrement(MII);
return *this;
}
MachineInstrBundleIterator &operator++() {
- while (MII->isBundledWithSucc())
- ++MII;
- ++MII;
+ this->increment(MII);
return *this;
}
MachineInstrBundleIterator operator--(int) {
@@ -85,6 +255,10 @@ public:
}
instr_iterator getInstrIterator() const { return MII; }
+
+ nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); }
+
+ reverse_iterator getReverse() const { return MII.getReverse(); }
};
} // end namespace llvm
diff --git a/include/llvm/CodeGen/MachineLoopInfo.h b/include/llvm/CodeGen/MachineLoopInfo.h
index 224a2a1aa59f..dc72ae1810ee 100644
--- a/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/include/llvm/CodeGen/MachineLoopInfo.h
@@ -54,6 +54,12 @@ public:
/// that contains the header.
MachineBasicBlock *getBottomBlock();
+ /// \brief Find the block that contains the loop control variable and the
+ /// loop test. This will return the latch block if it's one of the exiting
+ /// blocks. Otherwise, return the exiting block. Return 'null' when
+ /// multiple exiting blocks are present.
+ MachineBasicBlock *findLoopControlBlock();
+
void dump() const;
private:
@@ -81,6 +87,14 @@ public:
LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+ /// \brief Find the block that either is the loop preheader, or could
+ /// speculatively be used as the preheader. This is e.g. useful to place
+ /// loop setup code. Code that cannot be speculated should not be placed
+ /// here. SpeculativePreheader is controlling whether it also tries to
+ /// find the speculative preheader if the regular preheader is not present.
+ MachineBasicBlock *findLoopPreheader(MachineLoop *L,
+ bool SpeculativePreheader = false) const;
+
/// The iterator interface to the top-level loops in the current function.
typedef LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator iterator;
inline iterator begin() const { return LI.begin(); }
@@ -148,29 +162,21 @@ public:
// Allow clients to walk the list of nested loops...
template <> struct GraphTraits<const MachineLoop*> {
- typedef const MachineLoop NodeType;
+ typedef const MachineLoop *NodeRef;
typedef MachineLoopInfo::iterator ChildIteratorType;
- static NodeType *getEntryNode(const MachineLoop *L) { return L; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->end();
- }
+ static NodeRef getEntryNode(const MachineLoop *L) { return L; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
template <> struct GraphTraits<MachineLoop*> {
- typedef MachineLoop NodeType;
+ typedef MachineLoop *NodeRef;
typedef MachineLoopInfo::iterator ChildIteratorType;
- static NodeType *getEntryNode(MachineLoop *L) { return L; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->end();
- }
+ static NodeRef getEntryNode(MachineLoop *L) { return L; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
} // End llvm namespace
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
index 5fa7058733b3..a311124a35ba 100644
--- a/include/llvm/CodeGen/MachineMemOperand.h
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -19,8 +19,10 @@
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -101,32 +103,53 @@ public:
MOVolatile = 1u << 2,
/// The memory access is non-temporal.
MONonTemporal = 1u << 3,
- /// The memory access is invariant.
- MOInvariant = 1u << 4,
+ /// The memory access is dereferenceable (i.e., doesn't trap).
+ MODereferenceable = 1u << 4,
+ /// The memory access always returns the same value (or traps).
+ MOInvariant = 1u << 5,
// Reserved for use by target-specific passes.
- MOTargetFlag1 = 1u << 5,
- MOTargetFlag2 = 1u << 6,
- MOTargetFlag3 = 1u << 7,
+ MOTargetFlag1 = 1u << 6,
+ MOTargetFlag2 = 1u << 7,
+ MOTargetFlag3 = 1u << 8,
LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
};
private:
+ /// Atomic information for this memory operation.
+ struct MachineAtomicInfo {
+ /// Synchronization scope for this memory operation.
+ unsigned SynchScope : 1; // enum SynchronizationScope
+ /// Atomic ordering requirements for this memory operation. For cmpxchg
+ /// atomic operations, atomic ordering requirements when store occurs.
+ unsigned Ordering : 4; // enum AtomicOrdering
+ /// For cmpxchg atomic operations, atomic ordering requirements when store
+ /// does not occur.
+ unsigned FailureOrdering : 4; // enum AtomicOrdering
+ };
+
MachinePointerInfo PtrInfo;
uint64_t Size;
Flags FlagVals;
uint16_t BaseAlignLog2; // log_2(base_alignment) + 1
+ MachineAtomicInfo AtomicInfo;
AAMDNodes AAInfo;
const MDNode *Ranges;
public:
/// Construct a MachineMemOperand object with the specified PtrInfo, flags,
- /// size, and base alignment.
+ /// size, and base alignment. For atomic operations the synchronization scope
+ /// and atomic ordering requirements must also be specified. For cmpxchg
+ /// atomic operations the atomic ordering requirements when store does not
+ /// occur must also be specified.
MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
unsigned base_alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
- const MDNode *Ranges = nullptr);
+ const MDNode *Ranges = nullptr,
+ SynchronizationScope SynchScope = CrossThread,
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+ AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
@@ -174,12 +197,35 @@ public:
/// Return the range tag for the memory reference.
const MDNode *getRanges() const { return Ranges; }
+ /// Return the synchronization scope for this memory operation.
+ SynchronizationScope getSynchScope() const {
+ return static_cast<SynchronizationScope>(AtomicInfo.SynchScope);
+ }
+
+ /// Return the atomic ordering requirements for this memory operation. For
+ /// cmpxchg atomic operations, return the atomic ordering requirements when
+ /// store occurs.
+ AtomicOrdering getOrdering() const {
+ return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
+ }
+
+ /// For cmpxchg atomic operations, return the atomic ordering requirements
+ /// when store does not occur.
+ AtomicOrdering getFailureOrdering() const {
+ return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
+ }
+
bool isLoad() const { return FlagVals & MOLoad; }
bool isStore() const { return FlagVals & MOStore; }
bool isVolatile() const { return FlagVals & MOVolatile; }
bool isNonTemporal() const { return FlagVals & MONonTemporal; }
+ bool isDereferenceable() const { return FlagVals & MODereferenceable; }
bool isInvariant() const { return FlagVals & MOInvariant; }
+ /// Returns true if this operation has an atomic ordering requirement of
+ /// unordered or higher, false otherwise.
+ bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; }
+
/// Returns true if this memory operation doesn't have any ordering
/// constraints other than normal aliasing. Volatile and atomic memory
/// operations can't be reordered.
diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h
index 77571124a1b8..182d23ef3c90 100644
--- a/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/include/llvm/CodeGen/MachineModuleInfo.h
@@ -35,62 +35,38 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/IR/DebugLoc.h"
-#include "llvm/IR/Metadata.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/Pass.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/Dwarf.h"
namespace llvm {
//===----------------------------------------------------------------------===//
// Forward declarations.
+class BlockAddress;
+class CallInst;
class Constant;
class GlobalVariable;
-class BlockAddress;
+class LandingPadInst;
class MDNode;
class MMIAddrLabelMap;
class MachineBasicBlock;
class MachineFunction;
+class MachineFunctionInitializer;
class Module;
class PointerType;
class StructType;
-struct SEHHandler {
- // Filter or finally function. Null indicates a catch-all.
- const Function *FilterOrFinally;
-
- // Address of block to recover at. Null for a finally handler.
- const BlockAddress *RecoverBA;
-};
-
//===----------------------------------------------------------------------===//
-/// LandingPadInfo - This structure is used to retain landing pad info for
-/// the current function.
+/// This class can be derived from and used by targets to hold private
+/// target-specific information for each Module. Objects of type are
+/// accessed/created with MMI::getInfo and destroyed when the MachineModuleInfo
+/// is destroyed.
///
-struct LandingPadInfo {
- MachineBasicBlock *LandingPadBlock; // Landing pad block.
- SmallVector<MCSymbol *, 1> BeginLabels; // Labels prior to invoke.
- SmallVector<MCSymbol *, 1> EndLabels; // Labels after invoke.
- SmallVector<SEHHandler, 1> SEHHandlers; // SEH handlers active at this lpad.
- MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
- std::vector<int> TypeIds; // List of type ids (filters negative).
-
- explicit LandingPadInfo(MachineBasicBlock *MBB)
- : LandingPadBlock(MBB), LandingPadLabel(nullptr) {}
-};
-
-//===----------------------------------------------------------------------===//
-/// MachineModuleInfoImpl - This class can be derived from and used by targets
-/// to hold private target-specific information for each Module. Objects of
-/// type are accessed/created with MMI::getInfo and destroyed when the
-/// MachineModuleInfo is destroyed.
-///
class MachineModuleInfoImpl {
public:
typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
@@ -104,127 +80,99 @@ protected:
};
//===----------------------------------------------------------------------===//
-/// MachineModuleInfo - This class contains meta information specific to a
-/// module. Queries can be made by different debugging and exception handling
-/// schemes and reformated for specific use.
+/// This class contains meta information specific to a module. Queries can be
+/// made by different debugging and exception handling schemes and reformated
+/// for specific use.
///
class MachineModuleInfo : public ImmutablePass {
- /// Context - This is the MCContext used for the entire code generator.
+ const TargetMachine &TM;
+
+ /// This is the MCContext used for the entire code generator.
MCContext Context;
- /// TheModule - This is the LLVM Module being worked on.
+ /// This is the LLVM Module being worked on.
const Module *TheModule;
- /// ObjFileMMI - This is the object-file-format-specific implementation of
+ /// This is the object-file-format-specific implementation of
/// MachineModuleInfoImpl, which lets targets accumulate whatever info they
/// want.
MachineModuleInfoImpl *ObjFileMMI;
- /// List of moves done by a function's prolog. Used to construct frame maps
- /// by debug and exception handling consumers.
- std::vector<MCCFIInstruction> FrameInstructions;
+ /// \name Exception Handling
+ /// \{
- /// LandingPads - List of LandingPadInfo describing the landing pad
- /// information in the current function.
- std::vector<LandingPadInfo> LandingPads;
-
- /// LPadToCallSiteMap - Map a landing pad's EH symbol to the call site
- /// indexes.
- DenseMap<MCSymbol*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
-
- /// CallSiteMap - Map of invoke call site index values to associated begin
- /// EH_LABEL for the current function.
- DenseMap<MCSymbol*, unsigned> CallSiteMap;
+ /// Vector of all personality functions ever seen. Used to emit common EH
+ /// frames.
+ std::vector<const Function *> Personalities;
- /// CurCallSite - The current call site index being processed, if any. 0 if
- /// none.
+ /// The current call site index being processed, if any. 0 if none.
unsigned CurCallSite;
- /// TypeInfos - List of C++ TypeInfo used in the current function.
- std::vector<const GlobalValue *> TypeInfos;
-
- /// FilterIds - List of typeids encoding filters used in the current function.
- std::vector<unsigned> FilterIds;
-
- /// FilterEnds - List of the indices in FilterIds corresponding to filter
- /// terminators.
- std::vector<unsigned> FilterEnds;
-
- /// Personalities - Vector of all personality functions ever seen. Used to
- /// emit common EH frames.
- std::vector<const Function *> Personalities;
+ /// \}
- /// AddrLabelSymbols - This map keeps track of which symbol is being used for
- /// the specified basic block's address of label.
+ /// This map keeps track of which symbol is being used for the specified
+ /// basic block's address of label.
MMIAddrLabelMap *AddrLabelSymbols;
- bool CallsEHReturn;
- bool CallsUnwindInit;
- bool HasEHFunclets;
-
// TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
// even under this switch, we'd like .debug_frame to be precise when using.
// -g. At this moment, there's no way to specify that some CFI directives
// go into .eh_frame only, while others go into .debug_frame only.
- /// DbgInfoAvailable - True if debugging information is available
- /// in this module.
+ /// True if debugging information is available in this module.
bool DbgInfoAvailable;
- /// UsesVAFloatArgument - True if this module calls VarArg function with
- /// floating-point arguments. This is used to emit an undefined reference
- /// to _fltused on Windows targets.
+ /// True if this module calls VarArg function with floating-point arguments.
+ /// This is used to emit an undefined reference to _fltused on Windows
+ /// targets.
bool UsesVAFloatArgument;
- /// UsesMorestackAddr - True if the module calls the __morestack function
- /// indirectly, as is required under the large code model on x86. This is used
- /// to emit a definition of a symbol, __morestack_addr, containing the
- /// address. See comments in lib/Target/X86/X86FrameLowering.cpp for more
- /// details.
+ /// True if the module calls the __morestack function indirectly, as is
+ /// required under the large code model on x86. This is used to emit
+ /// a definition of a symbol, __morestack_addr, containing the address. See
+ /// comments in lib/Target/X86/X86FrameLowering.cpp for more details.
bool UsesMorestackAddr;
- EHPersonality PersonalityTypeCache;
+ MachineFunctionInitializer *MFInitializer;
+ /// Maps IR Functions to their corresponding MachineFunctions.
+ DenseMap<const Function*, std::unique_ptr<MachineFunction>> MachineFunctions;
+ /// Next unique number available for a MachineFunction.
+ unsigned NextFnNum = 0;
+ const Function *LastRequest = nullptr; ///< Used for shortcut/cache.
+ MachineFunction *LastResult = nullptr; ///< Used for shortcut/cache.
public:
static char ID; // Pass identification, replacement for typeid
- struct VariableDbgInfo {
- const DILocalVariable *Var;
- const DIExpression *Expr;
- unsigned Slot;
- const DILocation *Loc;
-
- VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
- unsigned Slot, const DILocation *Loc)
- : Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
- };
- typedef SmallVector<VariableDbgInfo, 4> VariableDbgInfoMapTy;
- VariableDbgInfoMapTy VariableDbgInfos;
-
- MachineModuleInfo(); // DUMMY CONSTRUCTOR, DO NOT CALL.
- // Real constructor.
- MachineModuleInfo(const MCAsmInfo &MAI, const MCRegisterInfo &MRI,
- const MCObjectFileInfo *MOFI);
+ explicit MachineModuleInfo(const TargetMachine *TM = nullptr);
~MachineModuleInfo() override;
// Initialization and Finalization
bool doInitialization(Module &) override;
bool doFinalization(Module &) override;
- /// EndFunction - Discard function meta information.
- ///
- void EndFunction();
-
const MCContext &getContext() const { return Context; }
MCContext &getContext() { return Context; }
void setModule(const Module *M) { TheModule = M; }
const Module *getModule() const { return TheModule; }
- /// getInfo - Keep track of various per-function pieces of information for
- /// backends that would like to do so.
- ///
+ void setMachineFunctionInitializer(MachineFunctionInitializer *MFInit) {
+ MFInitializer = MFInit;
+ }
+
+ /// Returns the MachineFunction constructed for the IR function \p F.
+ /// Creates a new MachineFunction and runs the MachineFunctionInitializer
+ /// if none exists yet.
+ MachineFunction &getMachineFunction(const Function &F);
+
+ /// Delete the MachineFunction \p MF and reset the link in the IR Function to
+ /// Machine Function map.
+ void deleteMachineFunctionFor(Function &F);
+
+ /// Keep track of various per-function pieces of information for backends
+ /// that would like to do so.
template<typename Ty>
Ty &getObjFileInfo() {
if (ObjFileMMI == nullptr)
@@ -237,20 +185,10 @@ public:
return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
}
- /// hasDebugInfo - Returns true if valid debug info is present.
- ///
+ /// Returns true if valid debug info is present.
bool hasDebugInfo() const { return DbgInfoAvailable; }
void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
- bool callsEHReturn() const { return CallsEHReturn; }
- void setCallsEHReturn(bool b) { CallsEHReturn = b; }
-
- bool callsUnwindInit() const { return CallsUnwindInit; }
- void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
-
- bool hasEHFunclets() const { return HasEHFunclets; }
- void setHasEHFunclets(bool V) { HasEHFunclets = V; }
-
bool usesVAFloatArgument() const {
return UsesVAFloatArgument;
}
@@ -267,166 +205,52 @@ public:
UsesMorestackAddr = b;
}
- /// \brief Returns a reference to a list of cfi instructions in the current
- /// function's prologue. Used to construct frame maps for debug and exception
- /// handling comsumers.
- const std::vector<MCCFIInstruction> &getFrameInstructions() const {
- return FrameInstructions;
- }
-
- unsigned LLVM_ATTRIBUTE_UNUSED_RESULT
- addFrameInst(const MCCFIInstruction &Inst) {
- FrameInstructions.push_back(Inst);
- return FrameInstructions.size() - 1;
- }
-
- /// getAddrLabelSymbol - Return the symbol to be used for the specified basic
- /// block when its address is taken. This cannot be its normal LBB label
- /// because the block may be accessed outside its containing function.
+ /// Return the symbol to be used for the specified basic block when its
+ /// address is taken. This cannot be its normal LBB label because the block
+ /// may be accessed outside its containing function.
MCSymbol *getAddrLabelSymbol(const BasicBlock *BB) {
return getAddrLabelSymbolToEmit(BB).front();
}
- /// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
- /// basic block when its address is taken. If other blocks were RAUW'd to
- /// this one, we may have to emit them as well, return the whole set.
+ /// Return the symbol to be used for the specified basic block when its
+ /// address is taken. If other blocks were RAUW'd to this one, we may have
+ /// to emit them as well, return the whole set.
ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(const BasicBlock *BB);
- /// takeDeletedSymbolsForFunction - If the specified function has had any
- /// references to address-taken blocks generated, but the block got deleted,
- /// return the symbol now so we can emit it. This prevents emitting a
- /// reference to a symbol that has no definition.
+ /// If the specified function has had any references to address-taken blocks
+ /// generated, but the block got deleted, return the symbol now so we can
+ /// emit it. This prevents emitting a reference to a symbol that has no
+ /// definition.
void takeDeletedSymbolsForFunction(const Function *F,
std::vector<MCSymbol*> &Result);
+ /// \name Exception Handling
+ /// \{
- //===- EH ---------------------------------------------------------------===//
-
- /// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
- /// specified MachineBasicBlock.
- LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
-
- /// addInvoke - Provide the begin and end labels of an invoke style call and
- /// associate it with a try landing pad block.
- void addInvoke(MachineBasicBlock *LandingPad,
- MCSymbol *BeginLabel, MCSymbol *EndLabel);
+ /// Set the call site currently being processed.
+ void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
- /// addLandingPad - Add a new panding pad. Returns the label ID for the
- /// landing pad entry.
- MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
+ /// Get the call site currently being processed, if any. return zero if
+ /// none.
+ unsigned getCurrentCallSite() { return CurCallSite; }
- /// addPersonality - Provide the personality function for the exception
- /// information.
+ /// Provide the personality function for the exception information.
void addPersonality(const Function *Personality);
- /// getPersonalities - Return array of personality functions ever seen.
+ /// Return array of personality functions ever seen.
const std::vector<const Function *>& getPersonalities() const {
return Personalities;
}
+ /// \}
+}; // End class MachineModuleInfo
- /// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
- ///
- void addCatchTypeInfo(MachineBasicBlock *LandingPad,
- ArrayRef<const GlobalValue *> TyInfo);
-
- /// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
- ///
- void addFilterTypeInfo(MachineBasicBlock *LandingPad,
- ArrayRef<const GlobalValue *> TyInfo);
-
- /// addCleanup - Add a cleanup action for a landing pad.
- ///
- void addCleanup(MachineBasicBlock *LandingPad);
-
- void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
- const BlockAddress *RecoverLabel);
-
- void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
- const Function *Cleanup);
-
- /// getTypeIDFor - Return the type id for the specified typeinfo. This is
- /// function wide.
- unsigned getTypeIDFor(const GlobalValue *TI);
-
- /// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
- /// function wide.
- int getFilterIDFor(std::vector<unsigned> &TyIds);
-
- /// TidyLandingPads - Remap landing pad labels and remove any deleted landing
- /// pads.
- void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
-
- /// getLandingPads - Return a reference to the landing pad info for the
- /// current function.
- const std::vector<LandingPadInfo> &getLandingPads() const {
- return LandingPads;
- }
-
- /// setCallSiteLandingPad - Map the landing pad's EH symbol to the call
- /// site indexes.
- void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
-
- /// getCallSiteLandingPad - Get the call site indexes for a landing pad EH
- /// symbol.
- SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
- assert(hasCallSiteLandingPad(Sym) &&
- "missing call site number for landing pad!");
- return LPadToCallSiteMap[Sym];
- }
-
- /// hasCallSiteLandingPad - Return true if the landing pad Eh symbol has an
- /// associated call site.
- bool hasCallSiteLandingPad(MCSymbol *Sym) {
- return !LPadToCallSiteMap[Sym].empty();
- }
-
- /// setCallSiteBeginLabel - Map the begin label for a call site.
- void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
- CallSiteMap[BeginLabel] = Site;
- }
-
- /// getCallSiteBeginLabel - Get the call site number for a begin label.
- unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) {
- assert(hasCallSiteBeginLabel(BeginLabel) &&
- "Missing call site number for EH_LABEL!");
- return CallSiteMap[BeginLabel];
- }
-
- /// hasCallSiteBeginLabel - Return true if the begin label has a call site
- /// number associated with it.
- bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) {
- return CallSiteMap[BeginLabel] != 0;
- }
-
- /// setCurrentCallSite - Set the call site currently being processed.
- void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
-
- /// getCurrentCallSite - Get the call site currently being processed, if any.
- /// return zero if none.
- unsigned getCurrentCallSite() { return CurCallSite; }
-
- /// getTypeInfos - Return a reference to the C++ typeinfo for the current
- /// function.
- const std::vector<const GlobalValue *> &getTypeInfos() const {
- return TypeInfos;
- }
-
- /// getFilterIds - Return a reference to the typeids encoding filters used in
- /// the current function.
- const std::vector<unsigned> &getFilterIds() const {
- return FilterIds;
- }
-
- /// setVariableDbgInfo - Collect information used to emit debugging
- /// information of a variable.
- void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
- unsigned Slot, const DILocation *Loc) {
- VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
- }
-
- VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
+//===- MMI building helpers -----------------------------------------------===//
-}; // End class MachineModuleInfo
+/// Determine if any floating-point values are being passed to this variadic
+/// function, and set the MachineModuleInfo's usesVAFloatArgument flag if so.
+/// This flag is used to emit an undefined reference to _fltused on Windows,
+/// which will link in MSVCRT's floating-point support.
+void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI);
} // End llvm namespace
diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h
index ee0a9cf11e6a..5df99a6c807e 100644
--- a/include/llvm/CodeGen/MachineOperand.h
+++ b/include/llvm/CodeGen/MachineOperand.h
@@ -15,6 +15,7 @@
#define LLVM_CODEGEN_MACHINEOPERAND_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/IR/Intrinsics.h"
#include <cassert>
namespace llvm {
@@ -29,6 +30,7 @@ class MachineRegisterInfo;
class MDNode;
class ModuleSlotTracker;
class TargetMachine;
+class TargetIntrinsicInfo;
class TargetRegisterInfo;
class hash_code;
class raw_ostream;
@@ -60,7 +62,9 @@ public:
MO_RegisterLiveOut, ///< Mask of live-out registers.
MO_Metadata, ///< Metadata reference (for debug info)
MO_MCSymbol, ///< MCSymbol reference (for debug/eh info)
- MO_CFIIndex ///< MCCFIInstruction index.
+ MO_CFIIndex, ///< MCCFIInstruction index.
+ MO_IntrinsicID, ///< Intrinsic ID for ISel
+ MO_Predicate, ///< Generic predicate for ISel
};
private:
@@ -160,6 +164,8 @@ private:
const MDNode *MD; // For MO_Metadata.
MCSymbol *Sym; // For MO_MCSymbol.
unsigned CFIIndex; // For MO_CFI.
+ Intrinsic::ID IntrinsicID; // For MO_IntrinsicID.
+ unsigned Pred; // For MO_Predicate
struct { // For MO_Register.
// Register number is in SmallContents.RegNo.
@@ -218,9 +224,12 @@ public:
///
void clearParent() { ParentMI = nullptr; }
- void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr) const;
+ void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr,
+ const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
void print(raw_ostream &os, ModuleSlotTracker &MST,
- const TargetRegisterInfo *TRI = nullptr) const;
+ const TargetRegisterInfo *TRI = nullptr,
+ const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
+ LLVM_DUMP_METHOD void dump() const;
//===--------------------------------------------------------------------===//
// Accessors that tell you what kind of MachineOperand you're looking at.
@@ -258,7 +267,8 @@ public:
bool isMetadata() const { return OpKind == MO_Metadata; }
bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
bool isCFIIndex() const { return OpKind == MO_CFIIndex; }
-
+ bool isIntrinsicID() const { return OpKind == MO_IntrinsicID; }
+ bool isPredicate() const { return OpKind == MO_Predicate; }
//===--------------------------------------------------------------------===//
// Accessors for Register Operands
//===--------------------------------------------------------------------===//
@@ -453,6 +463,16 @@ public:
return Contents.CFIIndex;
}
+ Intrinsic::ID getIntrinsicID() const {
+ assert(isIntrinsicID() && "Wrong MachineOperand accessor");
+ return Contents.IntrinsicID;
+ }
+
+ unsigned getPredicate() const {
+ assert(isPredicate() && "Wrong MachineOperand accessor");
+ return Contents.Pred;
+ }
+
/// Return the offset from the symbol in this operand. This always returns 0
/// for ExternalSymbol operands.
int64_t getOffset() const {
@@ -547,8 +567,8 @@ public:
// Other methods.
//===--------------------------------------------------------------------===//
- /// isIdenticalTo - Return true if this operand is identical to the specified
- /// operand. Note: This method ignores isKill and isDead properties.
+ /// Returns true if this operand is identical to the specified operand except
+ /// for liveness related flags (isKill, isUndef and isDead).
bool isIdenticalTo(const MachineOperand &Other) const;
/// \brief MachineOperand hash_value overload.
@@ -574,6 +594,9 @@ public:
/// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
void ChangeToMCSymbol(MCSymbol *Sym);
+ /// Replace this operand with a frame index.
+ void ChangeToFrameIndex(int Idx);
+
/// ChangeToRegister - Replace this operand with a new register operand of
/// the specified value. If an operand is known to be an register already,
/// the setReg method should be used.
@@ -732,6 +755,18 @@ public:
return Op;
}
+ static MachineOperand CreateIntrinsicID(Intrinsic::ID ID) {
+ MachineOperand Op(MachineOperand::MO_IntrinsicID);
+ Op.Contents.IntrinsicID = ID;
+ return Op;
+ }
+
+ static MachineOperand CreatePredicate(unsigned Pred) {
+ MachineOperand Op(MachineOperand::MO_Predicate);
+ Op.Contents.Pred = Pred;
+ return Op;
+ }
+
friend class MachineInstr;
friend class MachineRegisterInfo;
private:
diff --git a/include/llvm/CodeGen/MachinePassRegistry.h b/include/llvm/CodeGen/MachinePassRegistry.h
index 6731983c5874..db914b1f8bc7 100644
--- a/include/llvm/CodeGen/MachinePassRegistry.h
+++ b/include/llvm/CodeGen/MachinePassRegistry.h
@@ -37,8 +37,8 @@ class MachinePassRegistryListener {
public:
MachinePassRegistryListener() {}
virtual ~MachinePassRegistryListener() {}
- virtual void NotifyAdd(const char *N, MachinePassCtor C, const char *D) = 0;
- virtual void NotifyRemove(const char *N) = 0;
+ virtual void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) = 0;
+ virtual void NotifyRemove(StringRef N) = 0;
};
@@ -52,8 +52,8 @@ class MachinePassRegistryNode {
private:
MachinePassRegistryNode *Next; // Next function pass in list.
- const char *Name; // Name of function pass.
- const char *Description; // Description string.
+ StringRef Name; // Name of function pass.
+ StringRef Description; // Description string.
MachinePassCtor Ctor; // Function pass creator.
public:
@@ -68,8 +68,8 @@ public:
// Accessors
MachinePassRegistryNode *getNext() const { return Next; }
MachinePassRegistryNode **getNextAddress() { return &Next; }
- const char *getName() const { return Name; }
- const char *getDescription() const { return Description; }
+ StringRef getName() const { return Name; }
+ StringRef getDescription() const { return Description; }
MachinePassCtor getCtor() const { return Ctor; }
void setNext(MachinePassRegistryNode *N) { Next = N; }
@@ -143,10 +143,10 @@ public:
// Implement the MachinePassRegistryListener callbacks.
//
- void NotifyAdd(const char *N, MachinePassCtor C, const char *D) override {
+ void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) override {
this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
}
- void NotifyRemove(const char *N) override {
+ void NotifyRemove(StringRef N) override {
this->removeLiteralOption(N);
}
};
diff --git a/include/llvm/CodeGen/MachineRegionInfo.h b/include/llvm/CodeGen/MachineRegionInfo.h
index df9823f741dc..21f847c7e5ba 100644
--- a/include/llvm/CodeGen/MachineRegionInfo.h
+++ b/include/llvm/CodeGen/MachineRegionInfo.h
@@ -142,10 +142,11 @@ RegionGraphTraits(const MachineRegion, const MachineRegionNode);
template <> struct GraphTraits<MachineRegionInfo*>
: public GraphTraits<FlatIt<MachineRegionNode*> > {
- typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
- GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+ typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
+ GraphTraits<FlatIt<NodeRef>>>
+ nodes_iterator;
- static NodeType *getEntryNode(MachineRegionInfo *RI) {
+ static NodeRef getEntryNode(MachineRegionInfo *RI) {
return GraphTraits<FlatIt<MachineRegion*> >::getEntryNode(RI->getTopLevelRegion());
}
static nodes_iterator nodes_begin(MachineRegionInfo* RI) {
@@ -158,10 +159,11 @@ template <> struct GraphTraits<MachineRegionInfo*>
template <> struct GraphTraits<MachineRegionInfoPass*>
: public GraphTraits<MachineRegionInfo *> {
- typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
- GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+ typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
+ GraphTraits<FlatIt<NodeRef>>>
+ nodes_iterator;
- static NodeType *getEntryNode(MachineRegionInfoPass *RI) {
+ static NodeRef getEntryNode(MachineRegionInfoPass *RI) {
return GraphTraits<MachineRegionInfo*>::getEntryNode(&RI->getRegionInfo());
}
static nodes_iterator nodes_begin(MachineRegionInfoPass* RI) {
diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h
index 07d2d016f274..c599caf7535d 100644
--- a/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/iterator_range.h"
// PointerUnion needs to have access to the full RegisterBank type.
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
+#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -51,7 +52,7 @@ private:
Delegate *TheDelegate;
/// True if subregister liveness is tracked.
- bool TracksSubRegLiveness;
+ const bool TracksSubRegLiveness;
/// VRegInfo - Information we keep for each virtual register.
///
@@ -104,17 +105,9 @@ private:
/// started.
BitVector ReservedRegs;
- typedef DenseMap<unsigned, unsigned> VRegToSizeMap;
+ typedef DenseMap<unsigned, LLT> VRegToTypeMap;
/// Map generic virtual registers to their actual size.
- mutable std::unique_ptr<VRegToSizeMap> VRegToSize;
-
- /// Accessor for VRegToSize. This accessor should only be used
- /// by global-isel related work.
- VRegToSizeMap &getVRegToSize() const {
- if (!VRegToSize)
- VRegToSize.reset(new VRegToSizeMap);
- return *VRegToSize.get();
- }
+ mutable std::unique_ptr<VRegToTypeMap> VRegToType;
/// Keep track of the physical registers that are live in to the function.
/// Live in values are typically arguments in registers. LiveIn values are
@@ -166,7 +159,7 @@ public:
// leaveSSA - Indicates that the machine function is no longer in SSA form.
void leaveSSA() {
- MF->getProperties().clear(MachineFunctionProperties::Property::IsSSA);
+ MF->getProperties().reset(MachineFunctionProperties::Property::IsSSA);
}
/// tracksLiveness - Returns true when tracking register liveness accurately.
@@ -182,7 +175,7 @@ public:
/// This should be called by late passes that invalidate the liveness
/// information.
void invalidateLiveness() {
- MF->getProperties().clear(
+ MF->getProperties().reset(
MachineFunctionProperties::Property::TracksLiveness);
}
@@ -199,10 +192,6 @@ public:
return TracksSubRegLiveness;
}
- void enableSubRegLiveness(bool Enable = true) {
- TracksSubRegLiveness = Enable;
- }
-
//===--------------------------------------------------------------------===//
// Register Info
//===--------------------------------------------------------------------===//
@@ -553,10 +542,9 @@ public:
void dumpUses(unsigned RegNo) const;
#endif
- /// isConstantPhysReg - Returns true if PhysReg is unallocatable and constant
- /// throughout the function. It is safe to move instructions that read such
- /// a physreg.
- bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const;
+ /// Returns true if PhysReg is unallocatable and constant throughout the
+ /// function. Writing to a constant register has no effect.
+ bool isConstantPhysReg(unsigned PhysReg) const;
/// Get an iterator over the pressure sets affected by the given physical or
/// virtual register. If RegUnit is physical, it must be a register unit (from
@@ -645,18 +633,35 @@ public:
///
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
- /// Get the size in bits of \p VReg or 0 if VReg is not a generic
+ /// Accessor for VRegToType. This accessor should only be used
+ /// by global-isel related work.
+ VRegToTypeMap &getVRegToType() const {
+ if (!VRegToType)
+ VRegToType.reset(new VRegToTypeMap);
+ return *VRegToType.get();
+ }
+
+ /// Get the low-level type of \p VReg or LLT{} if VReg is not a generic
/// (target independent) virtual register.
- unsigned getSize(unsigned VReg) const;
+ LLT getType(unsigned VReg) const;
+
+ /// Set the low-level type of \p VReg to \p Ty.
+ void setType(unsigned VReg, LLT Ty);
+
+ /// Create and return a new generic virtual register with low-level
+ /// type \p Ty.
+ unsigned createGenericVirtualRegister(LLT Ty);
- /// Set the size in bits of \p VReg to \p Size.
- /// Although the size should be set at build time, mir infrastructure
- /// is not yet able to do it.
- void setSize(unsigned VReg, unsigned Size);
+ /// Remove all types associated to virtual registers (after instruction
+ /// selection and constraining of all generic virtual registers).
+ void clearVirtRegTypes();
- /// Create and return a new generic virtual register with a size of \p Size.
- /// \pre Size > 0.
- unsigned createGenericVirtualRegister(unsigned Size);
+ /// Creates a new virtual register that has no register class, register bank
+ /// or size assigned yet. This is only allowed to be used
+ /// temporarily while constructing machine instructions. Most operations are
+ /// undefined on an incomplete register until one of setRegClass(),
+ /// setRegBank() or setSize() has been called on it.
+ unsigned createIncompleteVirtualRegister();
/// getNumVirtRegs - Return the number of virtual registers created.
///
@@ -892,10 +897,11 @@ public:
advance();
} while (Op && Op->getParent() == P);
} else if (ByBundle) {
- MachineInstr &P = getBundleStart(*Op->getParent());
+ MachineBasicBlock::instr_iterator P =
+ getBundleStart(Op->getParent()->getIterator());
do {
advance();
- } while (Op && &getBundleStart(*Op->getParent()) == &P);
+ } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
}
return *this;
@@ -994,10 +1000,11 @@ public:
advance();
} while (Op && Op->getParent() == P);
} else if (ByBundle) {
- MachineInstr &P = getBundleStart(*Op->getParent());
+ MachineBasicBlock::instr_iterator P =
+ getBundleStart(Op->getParent()->getIterator());
do {
advance();
- } while (Op && &getBundleStart(*Op->getParent()) == &P);
+ } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
}
return *this;
@@ -1010,7 +1017,7 @@ public:
MachineInstr &operator*() const {
assert(Op && "Cannot dereference end iterator!");
if (ByBundle)
- return getBundleStart(*Op->getParent());
+ return *getBundleStart(Op->getParent()->getIterator());
return *Op->getParent();
}
diff --git a/include/llvm/CodeGen/MachineScheduler.h b/include/llvm/CodeGen/MachineScheduler.h
index 06e992179031..81b8741fea27 100644
--- a/include/llvm/CodeGen/MachineScheduler.h
+++ b/include/llvm/CodeGen/MachineScheduler.h
@@ -42,8 +42,8 @@
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
-// ScheduleDAGMI *DAG = new ScheduleDAGMI(C, CustomStrategy(C));
-// DAG->addMutation(new CustomDependencies(DAG->TII, DAG->TRI));
+// ScheduleDAGMI *DAG = createGenericSchedLive(C);
+// DAG->addMutation(new CustomDAGMutation(...));
// return DAG;
// }
//
@@ -75,12 +75,27 @@
#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
#define LLVM_CODEGEN_MACHINESCHEDULER_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
#include <memory>
+#include <string>
+#include <vector>
namespace llvm {
@@ -91,7 +106,6 @@ class LiveIntervals;
class MachineDominatorTree;
class MachineLoopInfo;
class RegisterClassInfo;
-class ScheduleDAGInstrs;
class SchedDFSResult;
class ScheduleHazardRecognizer;
@@ -126,6 +140,7 @@ public:
: MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
Registry.Add(this);
}
+
~MachineSchedRegistry() { Registry.Remove(this); }
// Accessors.
@@ -133,9 +148,11 @@ public:
MachineSchedRegistry *getNext() const {
return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
}
+
static MachineSchedRegistry *getList() {
return (MachineSchedRegistry *)Registry.getList();
}
+
static void setListener(MachinePassRegistryListener *L) {
Registry.setListener(L);
}
@@ -173,8 +190,9 @@ struct MachineSchedPolicy {
/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
class MachineSchedStrategy {
virtual void anchor();
+
public:
- virtual ~MachineSchedStrategy() {}
+ virtual ~MachineSchedStrategy() = default;
/// Optionally override the per-region scheduling policy.
virtual void initPolicy(MachineBasicBlock::iterator Begin,
@@ -256,8 +274,7 @@ public:
bool RemoveKillFlags)
: ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
LIS(C->LIS), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU),
- CurrentTop(), CurrentBottom(), NextClusterPred(nullptr),
- NextClusterSucc(nullptr) {
+ NextClusterPred(nullptr), NextClusterSucc(nullptr) {
#ifndef NDEBUG
NumInstrsScheduled = 0;
#endif
@@ -278,7 +295,8 @@ public:
///
/// ScheduleDAGMI takes ownership of the Mutation object.
void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
- Mutations.push_back(std::move(Mutation));
+ if (Mutation)
+ Mutations.push_back(std::move(Mutation));
}
/// \brief True if an edge can be added from PredSU to SuccSU without creating
@@ -362,6 +380,9 @@ protected:
MachineBasicBlock::iterator LiveRegionEnd;
+ /// Maps vregs to the SUnits of their uses in the current scheduling region.
+ VReg2SUnitMultiMap VRegUses;
+
// Map each SU to its summary of pressure changes. This array is updated for
// liveness during bottom-up scheduling. Top-down scheduling may proceed but
// has no affect on the pressure diffs.
@@ -474,6 +495,8 @@ protected:
void updateScheduledPressure(const SUnit *SU,
const std::vector<unsigned> &NewMaxPressure);
+
+ void collectVRegUses(SUnit &SU);
};
//===----------------------------------------------------------------------===//
@@ -518,9 +541,7 @@ public:
ArrayRef<SUnit*> elements() { return Queue; }
- iterator find(SUnit *SU) {
- return std::find(Queue.begin(), Queue.end(), SU);
- }
+ iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
void push(SUnit *SU) {
Queue.push_back(SU);
@@ -591,10 +612,6 @@ private:
/// instruction.
bool CheckPending;
- // For heuristics, keep a list of the nodes that immediately depend on the
- // most recently scheduled node.
- SmallPtrSet<const SUnit*, 8> NextSUs;
-
/// Number of cycles it takes to issue the instructions scheduled in this
/// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
/// See getStalls().
@@ -671,10 +688,6 @@ public:
/// Micro-ops issued in the current cycle
unsigned getCurrMOps() const { return CurrMOps; }
- /// Return true if the given SU is used by the most recently scheduled
- /// instruction.
- bool isNextSU(const SUnit *SU) const { return NextSUs.count(SU); }
-
// The latency of dependence chains leading into this zone.
unsigned getDependentLatency() const { return DependentLatency; }
@@ -728,10 +741,6 @@ public:
void releaseNode(SUnit *SU, unsigned ReadyCycle);
- void releaseTopNode(SUnit *SU);
-
- void releaseBottomNode(SUnit *SU);
-
void bumpCycle(unsigned NextCycle);
void incExecutedResources(unsigned PIdx, unsigned Count);
@@ -861,7 +870,7 @@ protected:
const TargetRegisterInfo *TRI;
SchedRemainder Rem;
-protected:
+
GenericSchedulerBase(const MachineSchedContext *C):
Context(C), SchedModel(nullptr), TRI(nullptr) {}
@@ -876,18 +885,6 @@ protected:
/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
class GenericScheduler : public GenericSchedulerBase {
- ScheduleDAGMILive *DAG;
-
- // State of the top and bottom scheduled instruction boundaries.
- SchedBoundary Top;
- SchedBoundary Bot;
-
- /// Candidate last picked from Top boundary.
- SchedCandidate TopCand;
- /// Candidate last picked from Bot boundary.
- SchedCandidate BotCand;
-
- MachineSchedPolicy RegionPolicy;
public:
GenericScheduler(const MachineSchedContext *C):
GenericSchedulerBase(C), DAG(nullptr), Top(SchedBoundary::TopQID, "TopQ"),
@@ -914,18 +911,37 @@ public:
void schedNode(SUnit *SU, bool IsTopNode) override;
void releaseTopNode(SUnit *SU) override {
- Top.releaseTopNode(SU);
+ if (SU->isScheduled)
+ return;
+
+ Top.releaseNode(SU, SU->TopReadyCycle);
TopCand.SU = nullptr;
}
void releaseBottomNode(SUnit *SU) override {
- Bot.releaseBottomNode(SU);
+ if (SU->isScheduled)
+ return;
+
+ Bot.releaseNode(SU, SU->BotReadyCycle);
BotCand.SU = nullptr;
}
void registerRoots() override;
protected:
+ ScheduleDAGMILive *DAG;
+
+ MachineSchedPolicy RegionPolicy;
+
+ // State of the top and bottom scheduled instruction boundaries.
+ SchedBoundary Top;
+ SchedBoundary Bot;
+
+ /// Candidate last picked from Top boundary.
+ SchedCandidate TopCand;
+ /// Candidate last picked from Bot boundary.
+ SchedCandidate BotCand;
+
void checkAcyclicLatency();
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
@@ -955,11 +971,12 @@ class PostGenericScheduler : public GenericSchedulerBase {
ScheduleDAGMI *DAG;
SchedBoundary Top;
SmallVector<SUnit*, 8> BotRoots;
+
public:
PostGenericScheduler(const MachineSchedContext *C):
GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
- ~PostGenericScheduler() override {}
+ ~PostGenericScheduler() override = default;
void initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
@@ -983,7 +1000,9 @@ public:
void schedNode(SUnit *SU, bool IsTopNode) override;
void releaseTopNode(SUnit *SU) override {
- Top.releaseTopNode(SU);
+ if (SU->isScheduled)
+ return;
+ Top.releaseNode(SU, SU->TopReadyCycle);
}
// Only called for roots.
@@ -997,6 +1016,29 @@ protected:
void pickNodeFromQueue(SchedCandidate &Cand);
};
-} // namespace llvm
+/// Create the standard converging machine scheduler. This will be used as the
+/// default scheduler if the target does not set a default.
+/// Adds default DAG mutations.
+ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);
-#endif
+/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
+ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);
+
+std::unique_ptr<ScheduleDAGMutation>
+createLoadClusterDAGMutation(const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI);
+
+std::unique_ptr<ScheduleDAGMutation>
+createStoreClusterDAGMutation(const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI);
+
+std::unique_ptr<ScheduleDAGMutation>
+createMacroFusionDAGMutation(const TargetInstrInfo *TII);
+
+std::unique_ptr<ScheduleDAGMutation>
+createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
diff --git a/include/llvm/CodeGen/MachineValueType.h b/include/llvm/CodeGen/MachineValueType.h
index 0bb53d1a5374..de7064f07c3e 100644
--- a/include/llvm/CodeGen/MachineValueType.h
+++ b/include/llvm/CodeGen/MachineValueType.h
@@ -178,8 +178,8 @@ class MVT {
SimpleValueType SimpleTy;
- LLVM_CONSTEXPR MVT() : SimpleTy(INVALID_SIMPLE_VALUE_TYPE) {}
- LLVM_CONSTEXPR MVT(SimpleValueType SVT) : SimpleTy(SVT) { }
+ constexpr MVT() : SimpleTy(INVALID_SIMPLE_VALUE_TYPE) {}
+ constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}
bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
diff --git a/include/llvm/CodeGen/PBQP/Graph.h b/include/llvm/CodeGen/PBQP/Graph.h
index 8301ca4d8536..83487e6a808a 100644
--- a/include/llvm/CodeGen/PBQP/Graph.h
+++ b/include/llvm/CodeGen/PBQP/Graph.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
#define LLVM_CODEGEN_PBQP_GRAPH_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include <algorithm>
#include <cassert>
@@ -109,13 +110,6 @@ namespace PBQP {
ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
}
- void invalidate() {
- NIds[0] = NIds[1] = Graph::invalidNodeId();
- ThisEdgeAdjIdxs[0] = ThisEdgeAdjIdxs[1] =
- NodeEntry::getInvalidAdjEdgeIdx();
- Costs = nullptr;
- }
-
void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
"Edge already connected to NIds[NIdx].");
@@ -123,15 +117,6 @@ namespace PBQP {
ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
}
- void connectTo(Graph &G, EdgeId ThisEdgeId, NodeId NId) {
- if (NId == NIds[0])
- connectToN(G, ThisEdgeId, 0);
- else {
- assert(NId == NIds[1] && "Edge does not connect NId.");
- connectToN(G, ThisEdgeId, 1);
- }
- }
-
void connect(Graph &G, EdgeId ThisEdgeId) {
connectToN(G, ThisEdgeId, 0);
connectToN(G, ThisEdgeId, 1);
@@ -262,9 +247,7 @@ namespace PBQP {
private:
NodeId findNextInUse(NodeId NId) const {
- while (NId < EndNId &&
- std::find(FreeNodeIds.begin(), FreeNodeIds.end(), NId) !=
- FreeNodeIds.end()) {
+ while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
++NId;
}
return NId;
@@ -288,9 +271,7 @@ namespace PBQP {
private:
EdgeId findNextInUse(EdgeId EId) const {
- while (EId < EndEId &&
- std::find(FreeEdgeIds.begin(), FreeEdgeIds.end(), EId) !=
- FreeEdgeIds.end()) {
+ while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
++EId;
}
return EId;
diff --git a/include/llvm/CodeGen/PBQP/Math.h b/include/llvm/CodeGen/PBQP/Math.h
index 2792608e29cc..278787550a43 100644
--- a/include/llvm/CodeGen/PBQP/Math.h
+++ b/include/llvm/CodeGen/PBQP/Math.h
@@ -27,121 +27,78 @@ public:
/// \brief Construct a PBQP vector of the given size.
explicit Vector(unsigned Length)
- : Length(Length), Data(new PBQPNum[Length]) {
- // llvm::dbgs() << "Constructing PBQP::Vector "
- // << this << " (length " << Length << ")\n";
- }
+ : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
/// \brief Construct a PBQP vector with initializer.
Vector(unsigned Length, PBQPNum InitVal)
- : Length(Length), Data(new PBQPNum[Length]) {
- // llvm::dbgs() << "Constructing PBQP::Vector "
- // << this << " (length " << Length << ", fill "
- // << InitVal << ")\n";
- std::fill(Data, Data + Length, InitVal);
+ : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
+ std::fill(Data.get(), Data.get() + Length, InitVal);
}
/// \brief Copy construct a PBQP vector.
Vector(const Vector &V)
- : Length(V.Length), Data(new PBQPNum[Length]) {
- // llvm::dbgs() << "Copy-constructing PBQP::Vector " << this
- // << " from PBQP::Vector " << &V << "\n";
- std::copy(V.Data, V.Data + Length, Data);
+ : Length(V.Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
+ std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
}
/// \brief Move construct a PBQP vector.
Vector(Vector &&V)
- : Length(V.Length), Data(V.Data) {
- V.Length = 0;
- V.Data = nullptr;
- }
-
- /// \brief Destroy this vector, return its memory.
- ~Vector() {
- // llvm::dbgs() << "Deleting PBQP::Vector " << this << "\n";
- delete[] Data;
- }
-
- /// \brief Copy-assignment operator.
- Vector& operator=(const Vector &V) {
- // llvm::dbgs() << "Assigning to PBQP::Vector " << this
- // << " from PBQP::Vector " << &V << "\n";
- delete[] Data;
- Length = V.Length;
- Data = new PBQPNum[Length];
- std::copy(V.Data, V.Data + Length, Data);
- return *this;
- }
-
- /// \brief Move-assignment operator.
- Vector& operator=(Vector &&V) {
- delete[] Data;
- Length = V.Length;
- Data = V.Data;
+ : Length(V.Length), Data(std::move(V.Data)) {
V.Length = 0;
- V.Data = nullptr;
- return *this;
}
/// \brief Comparison operator.
bool operator==(const Vector &V) const {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length != 0 && Data && "Invalid vector");
if (Length != V.Length)
return false;
- return std::equal(Data, Data + Length, V.Data);
+ return std::equal(Data.get(), Data.get() + Length, V.Data.get());
}
/// \brief Return the length of the vector
unsigned getLength() const {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length != 0 && Data && "Invalid vector");
return Length;
}
/// \brief Element access.
PBQPNum& operator[](unsigned Index) {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length != 0 && Data && "Invalid vector");
assert(Index < Length && "Vector element access out of bounds.");
return Data[Index];
}
/// \brief Const element access.
const PBQPNum& operator[](unsigned Index) const {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length != 0 && Data && "Invalid vector");
assert(Index < Length && "Vector element access out of bounds.");
return Data[Index];
}
/// \brief Add another vector to this one.
Vector& operator+=(const Vector &V) {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
- assert(Length == V.Length && "Vector length mismatch.");
- std::transform(Data, Data + Length, V.Data, Data, std::plus<PBQPNum>());
- return *this;
- }
-
- /// \brief Subtract another vector from this one.
- Vector& operator-=(const Vector &V) {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length != 0 && Data && "Invalid vector");
assert(Length == V.Length && "Vector length mismatch.");
- std::transform(Data, Data + Length, V.Data, Data, std::minus<PBQPNum>());
+ std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
+ std::plus<PBQPNum>());
return *this;
}
/// \brief Returns the index of the minimum value in this vector
unsigned minIndex() const {
- assert(Length != 0 && Data != nullptr && "Invalid vector");
- return std::min_element(Data, Data + Length) - Data;
+ assert(Length != 0 && Data && "Invalid vector");
+ return std::min_element(Data.get(), Data.get() + Length) - Data.get();
}
private:
unsigned Length;
- PBQPNum *Data;
+ std::unique_ptr<PBQPNum []> Data;
};
/// \brief Return a hash_value for the given vector.
inline hash_code hash_value(const Vector &V) {
- unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data);
- unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data + V.Length);
+ unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
+ unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
}
@@ -167,89 +124,67 @@ public:
/// \brief Construct a PBQP Matrix with the given dimensions.
Matrix(unsigned Rows, unsigned Cols) :
- Rows(Rows), Cols(Cols), Data(new PBQPNum[Rows * Cols]) {
+ Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
}
/// \brief Construct a PBQP Matrix with the given dimensions and initial
/// value.
Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
- : Rows(Rows), Cols(Cols), Data(new PBQPNum[Rows * Cols]) {
- std::fill(Data, Data + (Rows * Cols), InitVal);
+ : Rows(Rows), Cols(Cols),
+ Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+ std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
}
/// \brief Copy construct a PBQP matrix.
Matrix(const Matrix &M)
- : Rows(M.Rows), Cols(M.Cols), Data(new PBQPNum[Rows * Cols]) {
- std::copy(M.Data, M.Data + (Rows * Cols), Data);
+ : Rows(M.Rows), Cols(M.Cols),
+ Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+ std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
}
/// \brief Move construct a PBQP matrix.
Matrix(Matrix &&M)
- : Rows(M.Rows), Cols(M.Cols), Data(M.Data) {
+ : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
M.Rows = M.Cols = 0;
- M.Data = nullptr;
- }
-
- /// \brief Destroy this matrix, return its memory.
- ~Matrix() { delete[] Data; }
-
- /// \brief Copy-assignment operator.
- Matrix& operator=(const Matrix &M) {
- delete[] Data;
- Rows = M.Rows; Cols = M.Cols;
- Data = new PBQPNum[Rows * Cols];
- std::copy(M.Data, M.Data + (Rows * Cols), Data);
- return *this;
- }
-
- /// \brief Move-assignment operator.
- Matrix& operator=(Matrix &&M) {
- delete[] Data;
- Rows = M.Rows;
- Cols = M.Cols;
- Data = M.Data;
- M.Rows = M.Cols = 0;
- M.Data = nullptr;
- return *this;
}
/// \brief Comparison operator.
bool operator==(const Matrix &M) const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
if (Rows != M.Rows || Cols != M.Cols)
return false;
- return std::equal(Data, Data + (Rows * Cols), M.Data);
+ return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
}
/// \brief Return the number of rows in this matrix.
unsigned getRows() const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
return Rows;
}
/// \brief Return the number of cols in this matrix.
unsigned getCols() const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
return Cols;
}
/// \brief Matrix element access.
PBQPNum* operator[](unsigned R) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
assert(R < Rows && "Row out of bounds.");
- return Data + (R * Cols);
+ return Data.get() + (R * Cols);
}
/// \brief Matrix element access.
const PBQPNum* operator[](unsigned R) const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
assert(R < Rows && "Row out of bounds.");
- return Data + (R * Cols);
+ return Data.get() + (R * Cols);
}
/// \brief Returns the given row as a vector.
Vector getRowAsVector(unsigned R) const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Vector V(Cols);
for (unsigned C = 0; C < Cols; ++C)
V[C] = (*this)[R][C];
@@ -258,40 +193,16 @@ public:
/// \brief Returns the given column as a vector.
Vector getColAsVector(unsigned C) const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Vector V(Rows);
for (unsigned R = 0; R < Rows; ++R)
V[R] = (*this)[R][C];
return V;
}
- /// \brief Reset the matrix to the given value.
- Matrix& reset(PBQPNum Val = 0) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- std::fill(Data, Data + (Rows * Cols), Val);
- return *this;
- }
-
- /// \brief Set a single row of this matrix to the given value.
- Matrix& setRow(unsigned R, PBQPNum Val) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- assert(R < Rows && "Row out of bounds.");
- std::fill(Data + (R * Cols), Data + ((R + 1) * Cols), Val);
- return *this;
- }
-
- /// \brief Set a single column of this matrix to the given value.
- Matrix& setCol(unsigned C, PBQPNum Val) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- assert(C < Cols && "Column out of bounds.");
- for (unsigned R = 0; R < Rows; ++R)
- (*this)[R][C] = Val;
- return *this;
- }
-
/// \brief Matrix transpose.
Matrix transpose() const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Matrix M(Cols, Rows);
for (unsigned r = 0; r < Rows; ++r)
for (unsigned c = 0; c < Cols; ++c)
@@ -299,87 +210,33 @@ public:
return M;
}
- /// \brief Returns the diagonal of the matrix as a vector.
- ///
- /// Matrix must be square.
- Vector diagonalize() const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- assert(Rows == Cols && "Attempt to diagonalize non-square matrix.");
- Vector V(Rows);
- for (unsigned r = 0; r < Rows; ++r)
- V[r] = (*this)[r][r];
- return V;
- }
-
/// \brief Add the given matrix to this one.
Matrix& operator+=(const Matrix &M) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
assert(Rows == M.Rows && Cols == M.Cols &&
"Matrix dimensions mismatch.");
- std::transform(Data, Data + (Rows * Cols), M.Data, Data,
- std::plus<PBQPNum>());
+ std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
+ Data.get(), std::plus<PBQPNum>());
return *this;
}
Matrix operator+(const Matrix &M) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
Matrix Tmp(*this);
Tmp += M;
return Tmp;
}
- /// \brief Returns the minimum of the given row
- PBQPNum getRowMin(unsigned R) const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- assert(R < Rows && "Row out of bounds");
- return *std::min_element(Data + (R * Cols), Data + ((R + 1) * Cols));
- }
-
- /// \brief Returns the minimum of the given column
- PBQPNum getColMin(unsigned C) const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- PBQPNum MinElem = (*this)[0][C];
- for (unsigned R = 1; R < Rows; ++R)
- if ((*this)[R][C] < MinElem)
- MinElem = (*this)[R][C];
- return MinElem;
- }
-
- /// \brief Subtracts the given scalar from the elements of the given row.
- Matrix& subFromRow(unsigned R, PBQPNum Val) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- assert(R < Rows && "Row out of bounds");
- std::transform(Data + (R * Cols), Data + ((R + 1) * Cols),
- Data + (R * Cols),
- std::bind2nd(std::minus<PBQPNum>(), Val));
- return *this;
- }
-
- /// \brief Subtracts the given scalar from the elements of the given column.
- Matrix& subFromCol(unsigned C, PBQPNum Val) {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- for (unsigned R = 0; R < Rows; ++R)
- (*this)[R][C] -= Val;
- return *this;
- }
-
- /// \brief Returns true if this is a zero matrix.
- bool isZero() const {
- assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
- return find_if(Data, Data + (Rows * Cols),
- std::bind2nd(std::not_equal_to<PBQPNum>(), 0)) ==
- Data + (Rows * Cols);
- }
-
private:
unsigned Rows, Cols;
- PBQPNum *Data;
+ std::unique_ptr<PBQPNum []> Data;
};
/// \brief Return a hash_code for the given matrix.
inline hash_code hash_value(const Matrix &M) {
- unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data);
- unsigned *MEnd = reinterpret_cast<unsigned*>(M.Data + (M.Rows * M.Cols));
+ unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
+ unsigned *MEnd =
+ reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
}
diff --git a/include/llvm/CodeGen/PBQP/Solution.h b/include/llvm/CodeGen/PBQP/Solution.h
index a3bfaeb7e6c7..bd74805a2397 100644
--- a/include/llvm/CodeGen/PBQP/Solution.h
+++ b/include/llvm/CodeGen/PBQP/Solution.h
@@ -38,38 +38,6 @@ namespace PBQP {
Solution()
: r0Reductions(0), r1Reductions(0), r2Reductions(0), rNReductions(0) {}
- /// \brief Number of nodes for which selections have been made.
- /// @return Number of nodes for which selections have been made.
- unsigned numNodes() const { return selections.size(); }
-
- /// \brief Records a reduction via the R0 rule. Should be called from the
- /// solver only.
- void recordR0() { ++r0Reductions; }
-
- /// \brief Returns the number of R0 reductions applied to solve the problem.
- unsigned numR0Reductions() const { return r0Reductions; }
-
- /// \brief Records a reduction via the R1 rule. Should be called from the
- /// solver only.
- void recordR1() { ++r1Reductions; }
-
- /// \brief Returns the number of R1 reductions applied to solve the problem.
- unsigned numR1Reductions() const { return r1Reductions; }
-
- /// \brief Records a reduction via the R2 rule. Should be called from the
- /// solver only.
- void recordR2() { ++r2Reductions; }
-
- /// \brief Returns the number of R2 reductions applied to solve the problem.
- unsigned numR2Reductions() const { return r2Reductions; }
-
- /// \brief Records a reduction via the RN rule. Should be called from the
- /// solver only.
- void recordRN() { ++ rNReductions; }
-
- /// \brief Returns the number of RN reductions applied to solve the problem.
- unsigned numRNReductions() const { return rNReductions; }
-
/// \brief Set the selection for a given node.
/// @param nodeId Node id.
/// @param selection Selection for nodeId.
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index ae9e5dfe2d65..a9fd301691d6 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -20,8 +20,8 @@
namespace llvm {
-class Function;
class FunctionPass;
+class MachineFunction;
class MachineFunctionPass;
class ModulePass;
class Pass;
@@ -43,6 +43,9 @@ namespace llvm {
/// the entry block.
FunctionPass *createUnreachableBlockEliminationPass();
+ /// Insert mcount-like function calls.
+ FunctionPass *createCountingFunctionInserterPass();
+
/// MachineFunctionPrinter pass - This pass prints out the machine function to
/// the given stream as a debugging tool.
MachineFunctionPass *
@@ -53,6 +56,12 @@ namespace llvm {
/// using the MIR serialization format.
MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
+ /// This pass resets a MachineFunction when it has the FailedISel property
+ /// as if it was just created.
+ /// If EmitFallbackDiag is true, the pass will emit a
+ /// DiagnosticInfoISelFallback for every MachineFunction it resets.
+ MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag);
+
/// createCodeGenPreparePass - Transform the code to expose more pattern
/// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
@@ -115,6 +124,9 @@ namespace llvm {
// instruction and update the MachineFunctionInfo with that information.
extern char &ShrinkWrapID;
+ /// Greedy register allocator.
+ extern char &RAGreedyID;
+
/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
/// assigned in VirtRegMap.
extern char &VirtRegRewriterID;
@@ -172,6 +184,10 @@ namespace llvm {
/// branches.
extern char &BranchFolderPassID;
+ /// BranchRelaxation - This pass replaces branches that need to jump further
+ /// than is supported by a branch instruction.
+ extern char &BranchRelaxationPassID;
+
/// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
extern char &MachineFunctionPrinterPassID;
@@ -202,7 +218,8 @@ namespace llvm {
/// IfConverter - This pass performs machine code if conversion.
extern char &IfConverterID;
- FunctionPass *createIfConverter(std::function<bool(const Function &)> Ftor);
+ FunctionPass *createIfConverter(
+ std::function<bool(const MachineFunction &)> Ftor);
/// MachineBlockPlacement - This pass places basic blocks based on branch
/// probabilities.
@@ -313,7 +330,7 @@ namespace llvm {
extern char &UnpackMachineBundlesID;
FunctionPass *
- createUnpackMachineBundles(std::function<bool(const Function &)> Ftor);
+ createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
/// FinalizeMachineBundles - This pass finalize machine instruction
/// bundles (created earlier, e.g. during pre-RA scheduling).
@@ -374,6 +391,12 @@ namespace llvm {
/// and propagates register usage information of callee to caller
/// if available with PysicalRegisterUsageInfo pass.
FunctionPass *createRegUsageInfoPropPass();
+
+ /// This pass performs software pipelining on machine instructions.
+ extern char &MachinePipelinerID;
+
+ /// This pass frees the memory occupied by the MachineFunction.
+ FunctionPass *createFreeMachineFunctionPass();
} // End llvm namespace
/// Target machine pass initializer for passes with dependencies. Use with
diff --git a/include/llvm/CodeGen/PseudoSourceValue.h b/include/llvm/CodeGen/PseudoSourceValue.h
index c3f6fde9fb3f..681ccb4b997c 100644
--- a/include/llvm/CodeGen/PseudoSourceValue.h
+++ b/include/llvm/CodeGen/PseudoSourceValue.h
@@ -42,7 +42,8 @@ public:
ConstantPool,
FixedStack,
GlobalValueCallEntry,
- ExternalSymbolCallEntry
+ ExternalSymbolCallEntry,
+ TargetCustom
};
private:
@@ -67,6 +68,9 @@ public:
bool isGOT() const { return Kind == GOT; }
bool isConstantPool() const { return Kind == ConstantPool; }
bool isJumpTable() const { return Kind == JumpTable; }
+ unsigned getTargetCustom() const {
+ return (Kind >= TargetCustom) ? ((Kind+1) - TargetCustom) : 0;
+ }
/// Test whether the memory pointed to by this PseudoSourceValue has a
/// constant value.
diff --git a/include/llvm/CodeGen/RegAllocPBQP.h b/include/llvm/CodeGen/RegAllocPBQP.h
index 21952272ffdb..2cad90bbb703 100644
--- a/include/llvm/CodeGen/RegAllocPBQP.h
+++ b/include/llvm/CodeGen/RegAllocPBQP.h
@@ -89,26 +89,7 @@ public:
std::copy(OptVec.begin(), OptVec.end(), Opts.get());
}
- AllowedRegVector(const AllowedRegVector &Other)
- : NumOpts(Other.NumOpts), Opts(new unsigned[NumOpts]) {
- std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
- }
-
- AllowedRegVector(AllowedRegVector &&Other)
- : NumOpts(std::move(Other.NumOpts)), Opts(std::move(Other.Opts)) {}
-
- AllowedRegVector& operator=(const AllowedRegVector &Other) {
- NumOpts = Other.NumOpts;
- Opts.reset(new unsigned[NumOpts]);
- std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
- return *this;
- }
-
- AllowedRegVector& operator=(AllowedRegVector &&Other) {
- NumOpts = std::move(Other.NumOpts);
- Opts = std::move(Other.Opts);
- return *this;
- }
+ AllowedRegVector(AllowedRegVector &&) = default;
unsigned size() const { return NumOpts; }
unsigned operator[](size_t I) const { return Opts[I]; }
@@ -163,10 +144,6 @@ public:
return VRegItr->second;
}
- void eraseNodeIdForVReg(unsigned VReg) {
- VRegToNodeId.erase(VReg);
- }
-
AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
return AllowedRegVecs.getValue(std::move(Allowed));
}
@@ -199,8 +176,6 @@ public:
#endif
{}
- // FIXME: Re-implementing default behavior to work around MSVC. Remove once
- // MSVC synthesizes move constructors properly.
NodeMetadata(const NodeMetadata &Other)
: RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
@@ -215,48 +190,9 @@ public:
}
}
- // FIXME: Re-implementing default behavior to work around MSVC. Remove once
- // MSVC synthesizes move constructors properly.
- NodeMetadata(NodeMetadata &&Other)
- : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
- OptUnsafeEdges(std::move(Other.OptUnsafeEdges)), VReg(Other.VReg),
- AllowedRegs(std::move(Other.AllowedRegs))
-#ifndef NDEBUG
- , everConservativelyAllocatable(Other.everConservativelyAllocatable)
-#endif
- {}
-
- // FIXME: Re-implementing default behavior to work around MSVC. Remove once
- // MSVC synthesizes move constructors properly.
- NodeMetadata& operator=(const NodeMetadata &Other) {
- RS = Other.RS;
- NumOpts = Other.NumOpts;
- DeniedOpts = Other.DeniedOpts;
- OptUnsafeEdges.reset(new unsigned[NumOpts]);
- std::copy(Other.OptUnsafeEdges.get(), Other.OptUnsafeEdges.get() + NumOpts,
- OptUnsafeEdges.get());
- VReg = Other.VReg;
- AllowedRegs = Other.AllowedRegs;
-#ifndef NDEBUG
- everConservativelyAllocatable = Other.everConservativelyAllocatable;
-#endif
- return *this;
- }
+ NodeMetadata(NodeMetadata &&Other) = default;
- // FIXME: Re-implementing default behavior to work around MSVC. Remove once
- // MSVC synthesizes move constructors properly.
- NodeMetadata& operator=(NodeMetadata &&Other) {
- RS = Other.RS;
- NumOpts = Other.NumOpts;
- DeniedOpts = Other.DeniedOpts;
- OptUnsafeEdges = std::move(Other.OptUnsafeEdges);
- VReg = Other.VReg;
- AllowedRegs = std::move(Other.AllowedRegs);
-#ifndef NDEBUG
- everConservativelyAllocatable = Other.everConservativelyAllocatable;
-#endif
- return *this;
- }
+ NodeMetadata& operator=(NodeMetadata &&Other) = default;
void setVReg(unsigned VReg) { this->VReg = VReg; }
unsigned getVReg() const { return VReg; }
@@ -284,7 +220,6 @@ public:
#endif
}
-
void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
const bool* UnsafeOpts =
@@ -369,11 +304,6 @@ public:
handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
}
- void handleRemoveEdge(EdgeId EId) {
- handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
- handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
- }
-
void handleDisconnectEdge(EdgeId EId, NodeId NId) {
NodeMetadata& NMd = G.getNodeMetadata(NId);
const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
diff --git a/include/llvm/CodeGen/RegisterPressure.h b/include/llvm/CodeGen/RegisterPressure.h
index aaddac40ca76..313be355e7d7 100644
--- a/include/llvm/CodeGen/RegisterPressure.h
+++ b/include/llvm/CodeGen/RegisterPressure.h
@@ -278,7 +278,7 @@ public:
unsigned SparseIndex = getSparseIndexFromReg(Reg);
RegSet::const_iterator I = Regs.find(SparseIndex);
if (I == Regs.end())
- return 0;
+ return LaneBitmask::getNone();
return I->LaneMask;
}
@@ -288,11 +288,11 @@ public:
unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
auto InsertRes = Regs.insert(IndexMaskPair(SparseIndex, Pair.LaneMask));
if (!InsertRes.second) {
- unsigned PrevMask = InsertRes.first->LaneMask;
+ LaneBitmask PrevMask = InsertRes.first->LaneMask;
InsertRes.first->LaneMask |= Pair.LaneMask;
return PrevMask;
}
- return 0;
+ return LaneBitmask::getNone();
}
/// Clears the \p Pair.LaneMask lanes of \p Pair.Reg (mark them as dead).
@@ -301,8 +301,8 @@ public:
unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
RegSet::iterator I = Regs.find(SparseIndex);
if (I == Regs.end())
- return 0;
- unsigned PrevMask = I->LaneMask;
+ return LaneBitmask::getNone();
+ LaneBitmask PrevMask = I->LaneMask;
I->LaneMask &= ~Pair.LaneMask;
return PrevMask;
}
@@ -315,7 +315,7 @@ public:
void appendTo(ContainerT &To) const {
for (const IndexMaskPair &P : Regs) {
unsigned Reg = getRegFromSparseIndex(P.Index);
- if (P.LaneMask != 0)
+ if (P.LaneMask.any())
To.push_back(RegisterMaskPair(Reg, P.LaneMask));
}
}
diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h
index efe1a3c6d0f7..1b9232b90193 100644
--- a/include/llvm/CodeGen/RegisterScavenging.h
+++ b/include/llvm/CodeGen/RegisterScavenging.h
@@ -75,6 +75,12 @@ public:
/// Start tracking liveness from the begin of basic block \p MBB.
void enterBasicBlock(MachineBasicBlock &MBB);
+ /// Start tracking liveness from the end of basic block \p MBB.
+ /// Use backward() to move towards the beginning of the block. This is
+ /// preferred to enterBasicBlock() and forward() because it does not depend
+ /// on the presence of kill flags.
+ void enterBasicBlockEnd(MachineBasicBlock &MBB);
+
/// Move the internal MBB iterator and update register states.
void forward();
@@ -94,6 +100,17 @@ public:
while (MBBI != I) unprocess();
}
+ /// Update internal register state and move MBB iterator backwards.
+ /// Contrary to unprocess() this method gives precise results even in the
+ /// absence of kill flags.
+ void backward();
+
+ /// Call backward() as long as the internal iterator does not point to \p I.
+ void backward(MachineBasicBlock::iterator I) {
+ while (MBBI != I)
+ backward();
+ }
+
/// Move the internal MBB iterator but do not update register states.
void skipTo(MachineBasicBlock::iterator I) {
if (I == MachineBasicBlock::iterator(nullptr))
@@ -147,7 +164,7 @@ public:
}
/// Tell the scavenger a register is used.
- void setRegUsed(unsigned Reg, LaneBitmask LaneMask = ~0u);
+ void setRegUsed(unsigned Reg, LaneBitmask LaneMask = LaneBitmask::getAll());
private:
/// Returns true if a register is reserved. It is never "unused".
bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
@@ -168,6 +185,9 @@ private:
/// Add all Reg Units that Reg contains to BV.
void addRegUnits(BitVector &BV, unsigned Reg);
+ /// Remove all Reg Units that \p Reg contains from \p BV.
+ void removeRegUnits(BitVector &BV, unsigned Reg);
+
/// Return the candidate register that is unused for the longest after
/// StartMI. UseMI is set to the instruction where the search stopped.
///
@@ -177,9 +197,11 @@ private:
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI);
- /// Allow resetting register state info for multiple
- /// passes over/within the same function.
- void initRegState();
+ /// Initialize RegisterScavenger.
+ void init(MachineBasicBlock &MBB);
+
+ /// Mark live-in registers of basic block as used.
+ void setLiveInsUsed(const MachineBasicBlock &MBB);
};
} // End llvm namespace
diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h
index 16d305c7297f..ddfabb0c44d6 100644
--- a/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -333,6 +333,13 @@ namespace RTLIB {
MEMSET,
MEMMOVE,
+ // ELEMENT-WISE ATOMIC MEMORY
+ MEMCPY_ELEMENT_ATOMIC_1,
+ MEMCPY_ELEMENT_ATOMIC_2,
+ MEMCPY_ELEMENT_ATOMIC_4,
+ MEMCPY_ELEMENT_ATOMIC_8,
+ MEMCPY_ELEMENT_ATOMIC_16,
+
// EXCEPTION HANDLING
UNWIND_RESUME,
@@ -503,6 +510,10 @@ namespace RTLIB {
/// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getSYNC(unsigned Opc, MVT VT);
+
+ /// getMEMCPY_ELEMENT_ATOMIC - Return MEMCPY_ELEMENT_ATOMIC_* value for the
+ /// given element size or UNKNOW_LIBCALL if there is none.
+ Libcall getMEMCPY_ELEMENT_ATOMIC(uint64_t ElementSize);
}
}
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 6469cabd3de1..ed4e0bc8a4a1 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -679,24 +679,24 @@ namespace llvm {
};
template <> struct GraphTraits<SUnit*> {
- typedef SUnit NodeType;
+ typedef SUnit *NodeRef;
typedef SUnitIterator ChildIteratorType;
- static inline NodeType *getEntryNode(SUnit *N) { return N; }
- static inline ChildIteratorType child_begin(NodeType *N) {
+ static NodeRef getEntryNode(SUnit *N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) {
return SUnitIterator::begin(N);
}
- static inline ChildIteratorType child_end(NodeType *N) {
+ static ChildIteratorType child_end(NodeRef N) {
return SUnitIterator::end(N);
}
};
template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
- typedef std::vector<SUnit>::iterator nodes_iterator;
+ typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
static nodes_iterator nodes_begin(ScheduleDAG *G) {
- return G->SUnits.begin();
+ return nodes_iterator(G->SUnits.begin());
}
static nodes_iterator nodes_end(ScheduleDAG *G) {
- return G->SUnits.end();
+ return nodes_iterator(G->SUnits.end());
}
};
diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 12124ecc4b3e..2746765f6e45 100644
--- a/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -100,7 +100,7 @@ namespace llvm {
class ScheduleDAGInstrs : public ScheduleDAG {
protected:
const MachineLoopInfo *MLI;
- const MachineFrameInfo *MFI;
+ const MachineFrameInfo &MFI;
/// TargetSchedModel provides an interface to the machine model.
TargetSchedModel SchedModel;
@@ -138,11 +138,6 @@ namespace llvm {
/// scheduling region is mapped to an SUnit.
DenseMap<MachineInstr*, SUnit*> MISUnitMap;
- /// After calling BuildSchedGraph, each vreg used in the scheduling region
- /// is mapped to a set of SUnits. These include all local vreg uses, not
- /// just the uses for a singly defined vreg.
- VReg2SUnitMultiMap VRegUses;
-
/// State internal to DAG building.
/// -------------------------------
@@ -333,8 +328,6 @@ namespace llvm {
/// Returns a mask for which lanes get read/written by the given (register)
/// machine operand.
LaneBitmask getLaneMaskForMO(const MachineOperand &MO) const;
-
- void collectVRegUses(SUnit *SU);
};
/// newSUnit - Creates a new SUnit and return a ptr to it.
@@ -345,7 +338,6 @@ namespace llvm {
SUnits.emplace_back(MI, (unsigned)SUnits.size());
assert((Addr == nullptr || Addr == &SUnits[0]) &&
"SUnits std::vector reallocated on the fly!");
- SUnits.back().OrigNode = &SUnits.back();
return &SUnits.back();
}
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index 29cce873c2f3..7927982e782d 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -81,24 +81,10 @@ template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTLis
}
};
-template<> struct ilist_traits<SDNode> : public ilist_default_traits<SDNode> {
-private:
- mutable ilist_half_node<SDNode> Sentinel;
-public:
- SDNode *createSentinel() const {
- return static_cast<SDNode*>(&Sentinel);
- }
- static void destroySentinel(SDNode *) {}
-
- SDNode *provideInitialHead() const { return createSentinel(); }
- SDNode *ensureHead(SDNode*) const { return createSentinel(); }
- static void noteHead(SDNode*, SDNode*) {}
-
+template <> struct ilist_alloc_traits<SDNode> {
static void deleteNode(SDNode *) {
llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
}
-private:
- static void createNode(const SDNode &);
};
/// Keeps track of dbg_value information through SDISel. We do
@@ -197,8 +183,8 @@ class SelectionDAG {
/// The AllocatorType for allocating SDNodes. We use
/// pool allocation with recycling.
typedef RecyclingAllocator<BumpPtrAllocator, SDNode, sizeof(LargestSDNode),
- AlignOf<MostAlignedSDNode>::Alignment>
- NodeAllocatorType;
+ alignof(MostAlignedSDNode)>
+ NodeAllocatorType;
/// Pool allocation for nodes.
NodeAllocatorType NodeAllocator;
@@ -284,6 +270,22 @@ private:
SDNodeT(std::forward<ArgTypes>(Args)...);
}
+ /// Build a synthetic SDNodeT with the given args and extract its subclass
+ /// data as an integer (e.g. for use in a folding set).
+ ///
+ /// The args to this function are the same as the args to SDNodeT's
+ /// constructor, except the second arg (assumed to be a const DebugLoc&) is
+ /// omitted.
+ template <typename SDNodeT, typename... ArgTypes>
+ static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
+ ArgTypes &&... Args) {
+ // The compiler can reduce this expression to a constant iff we pass an
+ // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
+ // on the subclass data.
+ return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
+ .getRawSubclassData();
+ }
+
void createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
assert(!Node->OperandList && "Node already has operands");
SDUse *Ops = OperandRecycler.allocate(
@@ -663,11 +665,6 @@ public:
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
- /// Return a splat ISD::BUILD_VECTOR node, but with Op's SDLoc.
- SDValue getSplatBuildVector(EVT VT, SDValue Op) {
- return getSplatBuildVector(VT, SDLoc(Op), Op);
- }
-
/// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
/// the shuffle node in input but with swapped operands.
///
@@ -859,10 +856,7 @@ public:
SynchronizationScope SynchScope);
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
- SDValue Cmp, SDValue Swp, MachineMemOperand *MMO,
- AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
/// Gets a node for an atomic op, produces result (if relevant)
/// and chain and takes 2 operands.
@@ -871,26 +865,18 @@ public:
unsigned Alignment, AtomicOrdering Ordering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
- SDValue Ptr, SDValue Val, MachineMemOperand *MMO,
- AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
/// Gets a node for an atomic op, produces result and chain and
/// takes 1 operand.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
- SDValue Chain, SDValue Ptr, MachineMemOperand *MMO,
- AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
/// Gets a node for an atomic op, produces result and chain and takes N
/// operands.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTList, ArrayRef<SDValue> Ops,
- MachineMemOperand *MMO, AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
- SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
- SDVTList VTList, ArrayRef<SDValue> Ops,
- MachineMemOperand *MMO, AtomicOrdering Ordering,
- SynchronizationScope SynchScope);
+ MachineMemOperand *MMO);
/// Creates a MemIntrinsicNode that may produce a
/// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
@@ -968,14 +954,24 @@ public:
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
SDValue Mask, SDValue Src0, EVT MemVT,
- MachineMemOperand *MMO, ISD::LoadExtType);
+ MachineMemOperand *MMO, ISD::LoadExtType,
+ bool IsExpanding = false);
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, SDValue Mask, EVT MemVT,
- MachineMemOperand *MMO, bool IsTrunc);
+ MachineMemOperand *MMO, bool IsTruncating = false,
+ bool IsCompressing = false);
SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+
+ /// Return (create a new or find existing) a target-specific node.
+ /// TargetMemSDNode should be derived class from MemSDNode.
+ template <class TargetMemSDNode>
+ SDValue getTargetMemSDNode(SDVTList VTs, ArrayRef<SDValue> Ops,
+ const SDLoc &dl, EVT MemVT,
+ MachineMemOperand *MMO);
+
/// Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
@@ -1033,16 +1029,10 @@ public:
EVT VT2, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
- SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
- EVT VT2, EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
- EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
- SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
- EVT VT2, EVT VT3, SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
ArrayRef<SDValue> Ops);
@@ -1067,10 +1057,6 @@ public:
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
- EVT VT2);
- MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
- EVT VT2, SDValue Op1);
- MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
@@ -1083,9 +1069,6 @@ public:
SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
- MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
- EVT VT2, EVT VT3, EVT VT4,
- ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
@@ -1205,12 +1188,12 @@ public:
static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
switch (VT.getScalarType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP format");
- case MVT::f16: return APFloat::IEEEhalf;
- case MVT::f32: return APFloat::IEEEsingle;
- case MVT::f64: return APFloat::IEEEdouble;
- case MVT::f80: return APFloat::x87DoubleExtended;
- case MVT::f128: return APFloat::IEEEquad;
- case MVT::ppcf128: return APFloat::PPCDoubleDouble;
+ case MVT::f16: return APFloat::IEEEhalf();
+ case MVT::f32: return APFloat::IEEEsingle();
+ case MVT::f64: return APFloat::IEEEdouble();
+ case MVT::f80: return APFloat::x87DoubleExtended();
+ case MVT::f128: return APFloat::IEEEquad();
+ case MVT::ppcf128: return APFloat::PPCDoubleDouble();
}
}
@@ -1281,12 +1264,22 @@ public:
const;
/// Determine which bits of Op are known to be either zero or one and return
- /// them in the KnownZero/KnownOne bitsets. Targets can implement the
- /// computeKnownBitsForTargetNode method in the TargetLowering class to allow
- /// target nodes to be understood.
+ /// them in the KnownZero/KnownOne bitsets. For vectors, the known bits are
+ /// those that are shared by every vector element.
+ /// Targets can implement the computeKnownBitsForTargetNode method in the
+ /// TargetLowering class to allow target nodes to be understood.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
unsigned Depth = 0) const;
+ /// Determine which bits of Op are known to be either zero or one and return
+ /// them in the KnownZero/KnownOne bitsets. The DemandedElts argument allows
+ /// us to only collect the known bits that are shared by the requested vector
+ /// elements.
+ /// Targets can implement the computeKnownBitsForTargetNode method in the
+ /// TargetLowering class to allow target nodes to be understood.
+ void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ const APInt &DemandedElts, unsigned Depth = 0) const;
+
/// Test if the given value is known to have exactly one bit set. This differs
/// from computeKnownBits in that it doesn't necessarily determine which bit
/// is set.
@@ -1377,6 +1370,16 @@ public:
/// Test whether the given value is a constant int or similar node.
SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N);
+ /// Test whether the given value is a constant FP or similar node.
+ SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N);
+
+ /// \returns true if \p N is any kind of constant or build_vector of
+ /// constants, int or float. If a vector, it may not necessarily be a splat.
+ inline bool isConstantValueOfAnyType(SDValue N) {
+ return isConstantIntBuildVectorOrConstantInt(N) ||
+ isConstantFPBuildVectorOrConstantFP(N);
+ }
+
private:
void InsertNode(SDNode *N);
bool RemoveNodeFromCSEMaps(SDNode *N);
@@ -1386,7 +1389,7 @@ private:
void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
void *&InsertPos);
- SDNode *UpdadeSDLocOnMergedSDNode(SDNode *N, const SDLoc &loc);
+ SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
void DeleteNodeNotInCSEMaps(SDNode *N);
void DeallocateNode(SDNode *N);
@@ -1424,15 +1427,51 @@ private:
};
template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
- typedef SelectionDAG::allnodes_iterator nodes_iterator;
+ typedef pointer_iterator<SelectionDAG::allnodes_iterator> nodes_iterator;
static nodes_iterator nodes_begin(SelectionDAG *G) {
- return G->allnodes_begin();
+ return nodes_iterator(G->allnodes_begin());
}
static nodes_iterator nodes_end(SelectionDAG *G) {
- return G->allnodes_end();
+ return nodes_iterator(G->allnodes_end());
}
};
+template <class TargetMemSDNode>
+SDValue SelectionDAG::getTargetMemSDNode(SDVTList VTs,
+ ArrayRef<SDValue> Ops,
+ const SDLoc &dl, EVT MemVT,
+ MachineMemOperand *MMO) {
+
+ /// Compose node ID and try to find an existing node.
+ FoldingSetNodeID ID;
+ unsigned Opcode =
+ TargetMemSDNode(dl.getIROrder(), DebugLoc(), VTs, MemVT, MMO).getOpcode();
+ ID.AddInteger(Opcode);
+ ID.AddPointer(VTs.VTs);
+ for (auto& Op : Ops) {
+ ID.AddPointer(Op.getNode());
+ ID.AddInteger(Op.getResNo());
+ }
+ ID.AddInteger(MemVT.getRawBits());
+ ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
+ ID.AddInteger(getSyntheticNodeSubclassData<TargetMemSDNode>(
+ dl.getIROrder(), VTs, MemVT, MMO));
+
+ void *IP = nullptr;
+ if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
+ cast<TargetMemSDNode>(E)->refineAlignment(MMO);
+ return SDValue(E, 0);
+ }
+
+ /// Existing node was not found. Create a new one.
+ auto *N = newSDNode<TargetMemSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
+ MemVT, MMO);
+ createOperands(N, Ops);
+ CSEMap.InsertNode(N, IP);
+ InsertNode(N);
+ return SDValue(N, 0);
+}
+
} // end namespace llvm
#endif
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index cfcc4117f93b..d4b7170eac3c 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -19,23 +19,37 @@
#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
#define LLVM_CODEGEN_SELECTIONDAGNODES_H
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <tuple>
namespace llvm {
@@ -44,12 +58,9 @@ class GlobalValue;
class MachineBasicBlock;
class MachineConstantPoolValue;
class SDNode;
-class HandleSDNode;
class Value;
class MCSymbol;
template <typename T> struct DenseMapInfo;
-template <typename T> struct simplify_type;
-template <typename T> struct ilist_traits;
void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
bool force = false);
@@ -64,6 +75,7 @@ struct SDVTList {
};
namespace ISD {
+
/// Node predicates
/// If N is a BUILD_VECTOR node whose elements are all the same constant or
@@ -89,7 +101,8 @@ namespace ISD {
/// Return true if the node has at least one operand and all operands of the
/// specified node are ISD::UNDEF.
bool allOperandsUndef(const SDNode *N);
-} // end llvm:ISD namespace
+
+} // end namespace ISD
//===----------------------------------------------------------------------===//
/// Unlike LLVM values, Selection DAG nodes may return multiple
@@ -107,6 +120,7 @@ class SDValue {
SDNode *Node; // The node defining the value we are using.
unsigned ResNo; // Which return value of the node we are using.
+
public:
SDValue() : Node(nullptr), ResNo(0) {}
SDValue(SDNode *node, unsigned resno);
@@ -188,29 +202,30 @@ public:
inline bool hasOneUse() const;
};
-
template<> struct DenseMapInfo<SDValue> {
static inline SDValue getEmptyKey() {
SDValue V;
V.ResNo = -1U;
return V;
}
+
static inline SDValue getTombstoneKey() {
SDValue V;
V.ResNo = -2U;
return V;
}
+
static unsigned getHashValue(const SDValue &Val) {
return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
(unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
}
+
static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
return LHS == RHS;
}
};
template <> struct isPodLike<SDValue> { static const bool value = true; };
-
/// Allow casting operators to work directly on
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDValue> {
@@ -244,7 +259,7 @@ class SDUse {
void operator=(const SDUse &U) = delete;
public:
- SDUse() : Val(), User(nullptr), Prev(nullptr), Next(nullptr) {}
+ SDUse() : User(nullptr), Prev(nullptr), Next(nullptr) {}
/// Normally SDUse will just implicitly convert to an SDValue that it holds.
operator const SDValue&() const { return Val; }
@@ -372,14 +387,6 @@ public:
bool hasAllowReciprocal() const { return AllowReciprocal; }
bool hasVectorReduction() const { return VectorReduction; }
- /// Return a raw encoding of the flags.
- /// This function should only be used to add data to the NodeID value.
- unsigned getRawFlags() const {
- return (NoUnsignedWrap << 0) | (NoSignedWrap << 1) | (Exact << 2) |
- (UnsafeAlgebra << 3) | (NoNaNs << 4) | (NoInfs << 5) |
- (NoSignedZeros << 6) | (AllowReciprocal << 7);
- }
-
/// Clear any flags in this flag set that aren't also set in Flags.
void intersectWith(const SDNodeFlags *Flags) {
NoUnsignedWrap &= Flags->NoUnsignedWrap;
@@ -400,15 +407,90 @@ private:
/// The operation that this node performs.
int16_t NodeType;
- /// This tracks whether this node has one or more dbg_value
- /// nodes corresponding to it.
- uint16_t HasDebugValue : 1;
-
protected:
- /// This member is defined by this class, but is not used for
- /// anything. Subclasses can use it to hold whatever state they find useful.
- /// This field is initialized to zero by the ctor.
- uint16_t SubclassData : 15;
+ // We define a set of mini-helper classes to help us interpret the bits in our
+ // SubclassData. These are designed to fit within a uint16_t so they pack
+ // with NodeType.
+
+ class SDNodeBitfields {
+ friend class SDNode;
+ friend class MemIntrinsicSDNode;
+ friend class MemSDNode;
+
+ uint16_t HasDebugValue : 1;
+ uint16_t IsMemIntrinsic : 1;
+ };
+ enum { NumSDNodeBits = 2 };
+
+ class ConstantSDNodeBitfields {
+ friend class ConstantSDNode;
+
+ uint16_t : NumSDNodeBits;
+
+ uint16_t IsOpaque : 1;
+ };
+
+ class MemSDNodeBitfields {
+ friend class MemSDNode;
+ friend class MemIntrinsicSDNode;
+ friend class AtomicSDNode;
+
+ uint16_t : NumSDNodeBits;
+
+ uint16_t IsVolatile : 1;
+ uint16_t IsNonTemporal : 1;
+ uint16_t IsDereferenceable : 1;
+ uint16_t IsInvariant : 1;
+ };
+ enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
+
+ class LSBaseSDNodeBitfields {
+ friend class LSBaseSDNode;
+ uint16_t : NumMemSDNodeBits;
+
+ uint16_t AddressingMode : 3; // enum ISD::MemIndexedMode
+ };
+ enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
+
+ class LoadSDNodeBitfields {
+ friend class LoadSDNode;
+ friend class MaskedLoadSDNode;
+
+ uint16_t : NumLSBaseSDNodeBits;
+
+ uint16_t ExtTy : 2; // enum ISD::LoadExtType
+ uint16_t IsExpanding : 1;
+ };
+
+ class StoreSDNodeBitfields {
+ friend class StoreSDNode;
+ friend class MaskedStoreSDNode;
+
+ uint16_t : NumLSBaseSDNodeBits;
+
+ uint16_t IsTruncating : 1;
+ uint16_t IsCompressing : 1;
+ };
+
+ union {
+ char RawSDNodeBits[sizeof(uint16_t)];
+ SDNodeBitfields SDNodeBits;
+ ConstantSDNodeBitfields ConstantSDNodeBits;
+ MemSDNodeBitfields MemSDNodeBits;
+ LSBaseSDNodeBitfields LSBaseSDNodeBits;
+ LoadSDNodeBitfields LoadSDNodeBits;
+ StoreSDNodeBitfields StoreSDNodeBits;
+ };
+
+ // RawSDNodeBits must cover the entirety of the union. This means that all of
+ // the union's members must have size <= RawSDNodeBits. We write the RHS as
+ // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
+ static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
+ static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
+ static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
+ static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
+ static_assert(sizeof(LoadSDNodeBitfields) <= 4, "field too wide");
+ static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
private:
/// Unique id per SDNode in the DAG.
@@ -441,7 +523,6 @@ private:
static const EVT *getValueTypeList(EVT VT);
friend class SelectionDAG;
- friend struct ilist_traits<SDNode>;
// TODO: unfriend HandleSDNode once we fix its operand handling.
friend class HandleSDNode;
@@ -481,7 +562,8 @@ public:
/// proper classof relationship.
bool isMemIntrinsic() const {
return (NodeType == ISD::INTRINSIC_W_CHAIN ||
- NodeType == ISD::INTRINSIC_VOID) && ((SubclassData >> 13) & 1);
+ NodeType == ISD::INTRINSIC_VOID) &&
+ SDNodeBits.IsMemIntrinsic;
}
/// Test if this node has a post-isel opcode, directly
@@ -496,11 +578,8 @@ public:
return ~NodeType;
}
- /// Get this bit.
- bool getHasDebugValue() const { return HasDebugValue; }
-
- /// Set this bit.
- void setHasDebugValue(bool b) { HasDebugValue = b; }
+ bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
+ void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
/// Return true if there are no uses of this node.
bool use_empty() const { return UseList == nullptr; }
@@ -538,9 +617,11 @@ public:
class use_iterator
: public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
SDUse *Op;
- explicit use_iterator(SDUse *op) : Op(op) {
- }
+
friend class SDNode;
+
+ explicit use_iterator(SDUse *op) : Op(op) {}
+
public:
typedef std::iterator<std::forward_iterator_tag,
SDUse, ptrdiff_t>::reference reference;
@@ -668,6 +749,7 @@ public:
}
typedef SDUse* op_iterator;
+
op_iterator op_begin() const { return OperandList; }
op_iterator op_end() const { return OperandList+NumOperands; }
ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
@@ -767,7 +849,6 @@ public:
void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
unsigned depth = 100) const;
-
/// Dump this node, for debugging.
void dump() const;
@@ -815,10 +896,10 @@ protected:
/// SDNodes are created without any operands, and never own the operand
/// storage. To add operands, see SelectionDAG::createOperands.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
- : NodeType(Opc), HasDebugValue(false), SubclassData(0), NodeId(-1),
- OperandList(nullptr), ValueList(VTs.VTs), UseList(nullptr),
- NumOperands(0), NumValues(VTs.NumVTs), IROrder(Order),
+ : NodeType(Opc), NodeId(-1), OperandList(nullptr), ValueList(VTs.VTs),
+ UseList(nullptr), NumOperands(0), NumValues(VTs.NumVTs), IROrder(Order),
debugLoc(std::move(dl)) {
+ memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
assert(NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!");
@@ -851,16 +932,19 @@ public:
if (I)
DL = I->getDebugLoc();
}
+
unsigned getIROrder() const { return IROrder; }
const DebugLoc &getDebugLoc() const { return DL; }
};
-
// Define inline functions from the SDValue class.
inline SDValue::SDValue(SDNode *node, unsigned resno)
: Node(node), ResNo(resno) {
- assert((!Node || ResNo < Node->getNumValues()) &&
+ // Explicitly check for !ResNo to avoid use-after-free, because there are
+ // callers that use SDValue(N, 0) with a deleted N to indicate successful
+ // combines.
+ assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!");
assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.");
}
@@ -868,48 +952,63 @@ inline SDValue::SDValue(SDNode *node, unsigned resno)
inline unsigned SDValue::getOpcode() const {
return Node->getOpcode();
}
+
inline EVT SDValue::getValueType() const {
return Node->getValueType(ResNo);
}
+
inline unsigned SDValue::getNumOperands() const {
return Node->getNumOperands();
}
+
inline const SDValue &SDValue::getOperand(unsigned i) const {
return Node->getOperand(i);
}
+
inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
return Node->getConstantOperandVal(i);
}
+
inline bool SDValue::isTargetOpcode() const {
return Node->isTargetOpcode();
}
+
inline bool SDValue::isTargetMemoryOpcode() const {
return Node->isTargetMemoryOpcode();
}
+
inline bool SDValue::isMachineOpcode() const {
return Node->isMachineOpcode();
}
+
inline unsigned SDValue::getMachineOpcode() const {
return Node->getMachineOpcode();
}
+
inline bool SDValue::isUndef() const {
return Node->isUndef();
}
+
inline bool SDValue::use_empty() const {
return !Node->hasAnyUseOfValue(ResNo);
}
+
inline bool SDValue::hasOneUse() const {
return Node->hasNUsesOfValue(1, ResNo);
}
+
inline const DebugLoc &SDValue::getDebugLoc() const {
return Node->getDebugLoc();
}
+
inline void SDValue::dump() const {
return Node->dump();
}
+
inline void SDValue::dumpr() const {
return Node->dumpr();
}
+
// Define inline functions from the SDUse class.
inline void SDUse::set(const SDValue &V) {
@@ -956,9 +1055,11 @@ static bool isBinOpWithFlags(unsigned Opcode) {
class BinaryWithFlagsSDNode : public SDNode {
public:
SDNodeFlags Flags;
+
BinaryWithFlagsSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
SDVTList VTs, const SDNodeFlags &NodeFlags)
: SDNode(Opc, Order, dl, VTs), Flags(NodeFlags) {}
+
static bool classof(const SDNode *N) {
return isBinOpWithFlags(N->getOpcode());
}
@@ -970,6 +1071,7 @@ public:
/// the AllNodes list.
class HandleSDNode : public SDNode {
SDUse Op;
+
public:
explicit HandleSDNode(SDValue X)
: SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
@@ -987,6 +1089,7 @@ public:
OperandList = &Op;
}
~HandleSDNode();
+
const SDValue &getValue() const { return Op; }
};
@@ -1032,25 +1135,27 @@ public:
return MMO->getAlignment();
}
- /// Return the SubclassData value, which contains an
+ /// Return the SubclassData value, without HasDebugValue. This contains an
/// encoding of the volatile flag, as well as bits used by subclasses. This
/// function should only be used to compute a FoldingSetNodeID value.
+ /// The HasDebugValue bit is masked out because CSE map needs to match
+ /// nodes with debug info with nodes without debug info.
unsigned getRawSubclassData() const {
- return SubclassData;
- }
-
- // We access subclass data here so that we can check consistency
- // with MachineMemOperand information.
- bool isVolatile() const { return (SubclassData >> 5) & 1; }
- bool isNonTemporal() const { return (SubclassData >> 6) & 1; }
- bool isInvariant() const { return (SubclassData >> 7) & 1; }
-
- AtomicOrdering getOrdering() const {
- return AtomicOrdering((SubclassData >> 8) & 15);
- }
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((SubclassData >> 12) & 1);
- }
+ uint16_t Data;
+ union {
+ char RawSDNodeBits[sizeof(uint16_t)];
+ SDNodeBitfields SDNodeBits;
+ };
+ memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
+ SDNodeBits.HasDebugValue = 0;
+ memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
+ return Data;
+ }
+
+ bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
+ bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
+ bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
+ bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
// Returns the offset from the location of the access.
int64_t getSrcValueOffset() const { return MMO->getOffset(); }
@@ -1061,6 +1166,14 @@ public:
/// Returns the Ranges that describes the dereference.
const MDNode *getRanges() const { return MMO->getRanges(); }
+ /// Return the synchronization scope for this memory operation.
+ SynchronizationScope getSynchScope() const { return MMO->getSynchScope(); }
+
+ /// Return the atomic ordering requirements for this memory operation. For
+ /// cmpxchg atomic operations, return the atomic ordering requirements when
+ /// store occurs.
+ AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
+
/// Return the type of the in-memory value.
EVT getMemoryVT() const { return MemoryVT; }
@@ -1123,57 +1236,27 @@ public:
/// This is an SDNode representing atomic operations.
class AtomicSDNode : public MemSDNode {
- /// For cmpxchg instructions, the ordering requirements when a store does not
- /// occur.
- AtomicOrdering FailureOrdering;
-
- void InitAtomic(AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
- // This must match encodeMemSDNodeFlags() in SelectionDAG.cpp.
- assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) ==
- SuccessOrdering &&
- "Ordering may not require more than 4 bits!");
- assert((AtomicOrdering)((unsigned)FailureOrdering & 15) ==
- FailureOrdering &&
- "Ordering may not require more than 4 bits!");
- assert((SynchScope & 1) == SynchScope &&
- "SynchScope may not require more than 1 bit!");
- SubclassData |= (unsigned)SuccessOrdering << 8;
- SubclassData |= SynchScope << 12;
- this->FailureOrdering = FailureOrdering;
- assert(getSuccessOrdering() == SuccessOrdering &&
- "Ordering encoding error!");
- assert(getFailureOrdering() == FailureOrdering &&
- "Ordering encoding error!");
- assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
- }
-
public:
AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
- EVT MemVT, MachineMemOperand *MMO,
- AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope)
- : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
- InitAtomic(SuccessOrdering, FailureOrdering, SynchScope);
- }
+ EVT MemVT, MachineMemOperand *MMO)
+ : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {}
const SDValue &getBasePtr() const { return getOperand(1); }
const SDValue &getVal() const { return getOperand(2); }
- AtomicOrdering getSuccessOrdering() const {
- return getOrdering();
+ /// Returns true if this SDNode represents cmpxchg atomic operation, false
+ /// otherwise.
+ bool isCompareAndSwap() const {
+ unsigned Op = getOpcode();
+ return Op == ISD::ATOMIC_CMP_SWAP ||
+ Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
}
- // Not quite enough room in SubclassData for everything, so failure gets its
- // own field.
+ /// For cmpxchg atomic operations, return the atomic ordering requirements
+ /// when store does not occur.
AtomicOrdering getFailureOrdering() const {
- return FailureOrdering;
- }
-
- bool isCompareAndSwap() const {
- unsigned Op = getOpcode();
- return Op == ISD::ATOMIC_CMP_SWAP || Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
+ assert(isCompareAndSwap() && "Must be cmpxchg operation");
+ return MMO->getFailureOrdering();
}
// Methods to support isa and dyn_cast
@@ -1205,7 +1288,7 @@ public:
MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
: MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
- SubclassData |= 1u << 13;
+ SDNodeBits.IsMemIntrinsic = true;
}
// Methods to support isa and dyn_cast
@@ -1230,8 +1313,10 @@ class ShuffleVectorSDNode : public SDNode {
// The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
// is freed when the SelectionDAG object is destroyed.
const int *Mask;
+
protected:
friend class SelectionDAG;
+
ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
: SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
@@ -1240,12 +1325,14 @@ public:
EVT VT = getValueType(0);
return makeArrayRef(Mask, VT.getVectorNumElements());
}
+
int getMaskElt(unsigned Idx) const {
assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
return Mask[Idx];
}
bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
+
int getSplatIndex() const {
assert(isSplat() && "Cannot get splat index for non-splat!");
EVT VT = getValueType(0);
@@ -1255,6 +1342,7 @@ public:
}
llvm_unreachable("Splat with all undef indices?");
}
+
static bool isSplatMask(const int *Mask, EVT VT);
/// Change values in a shuffle permute mask assuming
@@ -1279,16 +1367,18 @@ public:
class ConstantSDNode : public SDNode {
const ConstantInt *Value;
+
friend class SelectionDAG;
+
ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
const DebugLoc &DL, EVT VT)
: SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DL,
getSDVTList(VT)),
Value(val) {
- SubclassData |= (uint16_t)isOpaque;
+ ConstantSDNodeBits.IsOpaque = isOpaque;
}
-public:
+public:
const ConstantInt *getConstantIntValue() const { return Value; }
const APInt &getAPIntValue() const { return Value->getValue(); }
uint64_t getZExtValue() const { return Value->getZExtValue(); }
@@ -1298,7 +1388,7 @@ public:
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
- bool isOpaque() const { return SubclassData & 1; }
+ bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::Constant ||
@@ -1308,7 +1398,9 @@ public:
class ConstantFPSDNode : public SDNode {
const ConstantFP *Value;
+
friend class SelectionDAG;
+
ConstantFPSDNode(bool isTarget, const ConstantFP *val, const DebugLoc &DL,
EVT VT)
: SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, DL,
@@ -1316,7 +1408,6 @@ class ConstantFPSDNode : public SDNode {
Value(val) {}
public:
-
const APFloat& getValueAPF() const { return Value->getValueAPF(); }
const ConstantFP *getConstantFPValue() const { return Value; }
@@ -1359,16 +1450,26 @@ public:
/// Returns true if \p V is a constant integer zero.
bool isNullConstant(SDValue V);
+
/// Returns true if \p V is an FP constant with a value of positive zero.
bool isNullFPConstant(SDValue V);
+
/// Returns true if \p V is an integer constant with all bits set.
bool isAllOnesConstant(SDValue V);
+
/// Returns true if \p V is a constant integer one.
bool isOneConstant(SDValue V);
+
/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
/// constant is canonicalized to be operand 1.
bool isBitwiseNot(SDValue V);
+/// Returns the SDNode if it is a constant splat BuildVector or constant int.
+ConstantSDNode *isConstOrConstSplat(SDValue V);
+
+/// Returns the SDNode if it is a constant splat BuildVector or constant float.
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue V);
+
class GlobalAddressSDNode : public SDNode {
const GlobalValue *TheGlobal;
int64_t Offset;
@@ -1379,7 +1480,6 @@ class GlobalAddressSDNode : public SDNode {
unsigned char TargetFlags);
public:
-
const GlobalValue *getGlobal() const { return TheGlobal; }
int64_t getOffset() const { return Offset; }
unsigned char getTargetFlags() const { return TargetFlags; }
@@ -1396,13 +1496,15 @@ public:
class FrameIndexSDNode : public SDNode {
int FI;
+
friend class SelectionDAG;
+
FrameIndexSDNode(int fi, EVT VT, bool isTarg)
: SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
0, DebugLoc(), getSDVTList(VT)), FI(fi) {
}
-public:
+public:
int getIndex() const { return FI; }
static bool classof(const SDNode *N) {
@@ -1414,13 +1516,15 @@ public:
class JumpTableSDNode : public SDNode {
int JTI;
unsigned char TargetFlags;
+
friend class SelectionDAG;
+
JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned char TF)
: SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
}
-public:
+public:
int getIndex() const { return JTI; }
unsigned char getTargetFlags() const { return TargetFlags; }
@@ -1438,7 +1542,9 @@ class ConstantPoolSDNode : public SDNode {
int Offset; // It's a MachineConstantPoolValue if top bit is set.
unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
unsigned char TargetFlags;
+
friend class SelectionDAG;
+
ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
@@ -1447,6 +1553,7 @@ class ConstantPoolSDNode : public SDNode {
assert(Offset >= 0 && "Offset is too large");
Val.ConstVal = c;
}
+
ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
EVT VT, int o, unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
@@ -1456,8 +1563,8 @@ class ConstantPoolSDNode : public SDNode {
Val.MachineCPVal = v;
Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
}
-public:
+public:
bool isMachineConstantPoolEntry() const {
return Offset < 0;
}
@@ -1494,13 +1601,13 @@ class TargetIndexSDNode : public SDNode {
unsigned char TargetFlags;
int Index;
int64_t Offset;
+
friend class SelectionDAG;
-public:
+public:
TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
: SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
TargetFlags(TF), Index(Idx), Offset(Ofs) {}
-public:
unsigned char getTargetFlags() const { return TargetFlags; }
int getIndex() const { return Index; }
@@ -1513,15 +1620,17 @@ public:
class BasicBlockSDNode : public SDNode {
MachineBasicBlock *MBB;
+
friend class SelectionDAG;
+
/// Debug info is meaningful and potentially useful here, but we create
/// blocks out of order when they're jumped to, which makes it a bit
/// harder. Let's see if we need it first.
explicit BasicBlockSDNode(MachineBasicBlock *mbb)
: SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
{}
-public:
+public:
MachineBasicBlock *getBasicBlock() const { return MBB; }
static bool classof(const SDNode *N) {
@@ -1533,6 +1642,7 @@ public:
class BuildVectorSDNode : public SDNode {
// These are constructed as SDNodes and then cast to BuildVectorSDNodes.
explicit BuildVectorSDNode() = delete;
+
public:
/// Check if this is a constant splat, and if so, find the
/// smallest element size that splats the vector. If MinSplatBits is
@@ -1591,7 +1701,9 @@ public:
///
class SrcValueSDNode : public SDNode {
const Value *V;
+
friend class SelectionDAG;
+
/// Create a SrcValue for a general value.
explicit SrcValueSDNode(const Value *v)
: SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
@@ -1607,12 +1719,14 @@ public:
class MDNodeSDNode : public SDNode {
const MDNode *MD;
+
friend class SelectionDAG;
+
explicit MDNodeSDNode(const MDNode *md)
: SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
{}
-public:
+public:
const MDNode *getMD() const { return MD; }
static bool classof(const SDNode *N) {
@@ -1622,12 +1736,13 @@ public:
class RegisterSDNode : public SDNode {
unsigned Reg;
+
friend class SelectionDAG;
+
RegisterSDNode(unsigned reg, EVT VT)
- : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {
- }
-public:
+ : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
+public:
unsigned getReg() const { return Reg; }
static bool classof(const SDNode *N) {
@@ -1638,12 +1753,14 @@ public:
class RegisterMaskSDNode : public SDNode {
// The memory for RegMask is not owned by the node.
const uint32_t *RegMask;
+
friend class SelectionDAG;
+
RegisterMaskSDNode(const uint32_t *mask)
: SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
RegMask(mask) {}
-public:
+public:
const uint32_t *getRegMask() const { return RegMask; }
static bool classof(const SDNode *N) {
@@ -1655,12 +1772,15 @@ class BlockAddressSDNode : public SDNode {
const BlockAddress *BA;
int64_t Offset;
unsigned char TargetFlags;
+
friend class SelectionDAG;
+
BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
int64_t o, unsigned char Flags)
: SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
BA(ba), Offset(o), TargetFlags(Flags) {
}
+
public:
const BlockAddress *getBlockAddress() const { return BA; }
int64_t getOffset() const { return Offset; }
@@ -1674,7 +1794,9 @@ public:
class EHLabelSDNode : public SDNode {
MCSymbol *Label;
+
friend class SelectionDAG;
+
EHLabelSDNode(unsigned Order, const DebugLoc &dl, MCSymbol *L)
: SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {}
@@ -1691,12 +1813,12 @@ class ExternalSymbolSDNode : public SDNode {
unsigned char TargetFlags;
friend class SelectionDAG;
+
ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned char TF, EVT VT)
: SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
- 0, DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {
- }
-public:
+ 0, DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {}
+public:
const char *getSymbol() const { return Symbol; }
unsigned char getTargetFlags() const { return TargetFlags; }
@@ -1723,13 +1845,14 @@ public:
class CondCodeSDNode : public SDNode {
ISD::CondCode Condition;
+
friend class SelectionDAG;
+
explicit CondCodeSDNode(ISD::CondCode Cond)
: SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
- Condition(Cond) {
- }
-public:
+ Condition(Cond) {}
+public:
ISD::CondCode get() const { return Condition; }
static bool classof(const SDNode *N) {
@@ -1741,7 +1864,9 @@ public:
/// future and most targets don't support it.
class CvtRndSatSDNode : public SDNode {
ISD::CvtCode CvtCode;
+
friend class SelectionDAG;
+
explicit CvtRndSatSDNode(EVT VT, unsigned Order, const DebugLoc &dl,
ISD::CvtCode Code)
: SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT)), CvtCode(Code) {
@@ -1759,13 +1884,14 @@ public:
/// to parameterize some operations.
class VTSDNode : public SDNode {
EVT ValueType;
+
friend class SelectionDAG;
+
explicit VTSDNode(EVT VT)
: SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
- ValueType(VT) {
- }
-public:
+ ValueType(VT) {}
+public:
EVT getVT() const { return ValueType; }
static bool classof(const SDNode *N) {
@@ -1780,8 +1906,8 @@ public:
SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
MachineMemOperand *MMO)
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
- SubclassData |= AM << 2;
- assert(getAddressingMode() == AM && "MemIndexedMode encoding error!");
+ LSBaseSDNodeBits.AddressingMode = AM;
+ assert(getAddressingMode() == AM && "Value truncated");
}
const SDValue &getOffset() const {
@@ -1791,7 +1917,7 @@ public:
/// Return the addressing mode for this load or store:
/// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
ISD::MemIndexedMode getAddressingMode() const {
- return ISD::MemIndexedMode((SubclassData >> 2) & 7);
+ return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
}
/// Return true if this is a pre/post inc/dec load/store.
@@ -1809,21 +1935,21 @@ public:
/// This class is used to represent ISD::LOAD nodes.
class LoadSDNode : public LSBaseSDNode {
friend class SelectionDAG;
+
LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
- SubclassData |= (unsigned short)ETy;
- assert(getExtensionType() == ETy && "LoadExtType encoding error!");
+ LoadSDNodeBits.ExtTy = ETy;
assert(readMem() && "Load MachineMemOperand is not a load!");
assert(!writeMem() && "Load MachineMemOperand is a store!");
}
-public:
+public:
/// Return whether this is a plain node,
/// or one of the varieties of value-extending loads.
ISD::LoadExtType getExtensionType() const {
- return ISD::LoadExtType(SubclassData & 3);
+ return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
}
const SDValue &getBasePtr() const { return getOperand(1); }
@@ -1837,21 +1963,21 @@ public:
/// This class is used to represent ISD::STORE nodes.
class StoreSDNode : public LSBaseSDNode {
friend class SelectionDAG;
+
StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
MachineMemOperand *MMO)
: LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
- SubclassData |= (unsigned short)isTrunc;
- assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!");
+ StoreSDNodeBits.IsTruncating = isTrunc;
assert(!readMem() && "Store MachineMemOperand is a load!");
assert(writeMem() && "Store MachineMemOperand is not a store!");
}
-public:
+public:
/// Return true if the op does a truncation before store.
/// For integers this is the same as doing a TRUNCATE and storing the result.
/// For floats, it is the same as doing an FP_ROUND and storing the result.
- bool isTruncatingStore() const { return SubclassData & 1; }
+ bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
const SDValue &getValue() const { return getOperand(1); }
const SDValue &getBasePtr() const { return getOperand(2); }
@@ -1866,6 +1992,7 @@ public:
class MaskedLoadStoreSDNode : public MemSDNode {
public:
friend class SelectionDAG;
+
MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
const DebugLoc &dl, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
@@ -1889,34 +2016,48 @@ class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
public:
friend class SelectionDAG;
MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
- ISD::LoadExtType ETy, EVT MemVT, MachineMemOperand *MMO)
+ ISD::LoadExtType ETy, bool IsExpanding, EVT MemVT,
+ MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, MemVT, MMO) {
- SubclassData |= (unsigned short)ETy;
+ LoadSDNodeBits.ExtTy = ETy;
+ LoadSDNodeBits.IsExpanding = IsExpanding;
}
ISD::LoadExtType getExtensionType() const {
- return ISD::LoadExtType(SubclassData & 3);
+ return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
}
+
const SDValue &getSrc0() const { return getOperand(3); }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MLOAD;
}
+
+ bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
};
/// This class is used to represent an MSTORE node
class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
-
public:
friend class SelectionDAG;
+
MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
- bool isTrunc, EVT MemVT, MachineMemOperand *MMO)
+ bool isTrunc, bool isCompressing, EVT MemVT,
+ MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, MemVT, MMO) {
- SubclassData |= (unsigned short)isTrunc;
+ StoreSDNodeBits.IsTruncating = isTrunc;
+ StoreSDNodeBits.IsCompressing = isCompressing;
}
+
/// Return true if the op does a truncation before store.
/// For integers this is the same as doing a TRUNCATE and storing the result.
/// For floats, it is the same as doing an FP_ROUND and storing the result.
- bool isTruncatingStore() const { return SubclassData & 1; }
+ bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
+
+ /// Returns true if the op does a compression to the vector before storing.
+ /// The node contiguously stores the active elements (integers or floats)
+ /// in src (those with their respective bit set in writemask k) to unaligned
+ /// memory at base_addr.
+ bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
const SDValue &getValue() const { return getOperand(3); }
@@ -1931,6 +2072,7 @@ public:
class MaskedGatherScatterSDNode : public MemSDNode {
public:
friend class SelectionDAG;
+
MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
const DebugLoc &dl, SDVTList VTs, EVT MemVT,
MachineMemOperand *MMO)
@@ -1956,6 +2098,7 @@ public:
class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
public:
friend class SelectionDAG;
+
MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO) {}
@@ -1968,9 +2111,9 @@ public:
/// This class is used to represent an MSCATTER node
///
class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
-
public:
friend class SelectionDAG;
+
MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemVT, MachineMemOperand *MMO)
: MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO) {}
@@ -1989,6 +2132,7 @@ public:
private:
friend class SelectionDAG;
+
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
: SDNode(Opc, Order, DL, VTs), MemRefs(nullptr), MemRefsEnd(nullptr) {}
@@ -2021,6 +2165,7 @@ class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
unsigned Operand;
SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+
public:
bool operator==(const SDNodeIterator& x) const {
return Operand == x.Operand;
@@ -2055,13 +2200,14 @@ public:
};
template <> struct GraphTraits<SDNode*> {
- typedef SDNode NodeType;
+ typedef SDNode *NodeRef;
typedef SDNodeIterator ChildIteratorType;
- static inline NodeType *getEntryNode(SDNode *N) { return N; }
- static inline ChildIteratorType child_begin(NodeType *N) {
+
+ static NodeRef getEntryNode(SDNode *N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) {
return SDNodeIterator::begin(N);
}
- static inline ChildIteratorType child_end(NodeType *N) {
+ static ChildIteratorType child_end(NodeRef N) {
return SDNodeIterator::end(N);
}
};
@@ -2078,6 +2224,7 @@ typedef AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
typedef GlobalAddressSDNode MostAlignedSDNode;
namespace ISD {
+
/// Returns true if the specified node is a non-extending and unindexed load.
inline bool isNormalLoad(const SDNode *N) {
const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
@@ -2138,8 +2285,9 @@ namespace ISD {
return isa<StoreSDNode>(N) &&
cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
-}
-} // end llvm namespace
+} // end namespace ISD
+
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H
diff --git a/include/llvm/CodeGen/SlotIndexes.h b/include/llvm/CodeGen/SlotIndexes.h
index afb0288b024f..2ac3b3d86cb6 100644
--- a/include/llvm/CodeGen/SlotIndexes.h
+++ b/include/llvm/CodeGen/SlotIndexes.h
@@ -69,23 +69,8 @@ namespace llvm {
};
template <>
- struct ilist_traits<IndexListEntry> : public ilist_default_traits<IndexListEntry> {
- private:
- mutable ilist_half_node<IndexListEntry> Sentinel;
- public:
- IndexListEntry *createSentinel() const {
- return static_cast<IndexListEntry*>(&Sentinel);
- }
- void destroySentinel(IndexListEntry *) const {}
-
- IndexListEntry *provideInitialHead() const { return createSentinel(); }
- IndexListEntry *ensureHead(IndexListEntry*) const { return createSentinel(); }
- static void noteHead(IndexListEntry*, IndexListEntry*) {}
- void deleteNode(IndexListEntry *N) {}
-
- private:
- void createNode(const IndexListEntry &);
- };
+ struct ilist_alloc_traits<IndexListEntry>
+ : public ilist_noalloc_traits<IndexListEntry> {};
/// SlotIndex - An opaque wrapper around machine indexes.
class SlotIndex {
@@ -361,9 +346,8 @@ namespace llvm {
IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
IndexListEntry *entry =
- static_cast<IndexListEntry*>(
- ileAllocator.Allocate(sizeof(IndexListEntry),
- alignOf<IndexListEntry>()));
+ static_cast<IndexListEntry *>(ileAllocator.Allocate(
+ sizeof(IndexListEntry), alignof(IndexListEntry)));
new (entry) IndexListEntry(mi, index);
@@ -421,7 +405,8 @@ namespace llvm {
/// Returns the base index for the given instruction.
SlotIndex getInstructionIndex(const MachineInstr &MI) const {
// Instructions inside a bundle have the same number as the bundle itself.
- Mi2IndexMap::const_iterator itr = mi2iMap.find(&getBundleStart(MI));
+ const MachineInstr &BundleStart = *getBundleStart(MI.getIterator());
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleStart);
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
return itr->second;
}
@@ -632,11 +617,12 @@ namespace llvm {
}
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
- /// maps used by register allocator.
- void replaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
+ /// maps used by register allocator. \returns the index where the new
+ /// instruction was inserted.
+ SlotIndex replaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(&MI);
if (mi2iItr == mi2iMap.end())
- return;
+ return SlotIndex();
SlotIndex replaceBaseIndex = mi2iItr->second;
IndexListEntry *miEntry(replaceBaseIndex.listEntry());
assert(miEntry->getInstr() == &MI &&
@@ -644,6 +630,7 @@ namespace llvm {
miEntry->setInstr(&NewMI);
mi2iMap.erase(mi2iItr);
mi2iMap.insert(std::make_pair(&NewMI, replaceBaseIndex));
+ return replaceBaseIndex;
}
/// Add the given MachineBasicBlock into the maps.
diff --git a/include/llvm/CodeGen/StackMaps.h b/include/llvm/CodeGen/StackMaps.h
index 918848f6b2a1..7b55b7968635 100644
--- a/include/llvm/CodeGen/StackMaps.h
+++ b/include/llvm/CodeGen/StackMaps.h
@@ -22,6 +22,37 @@ class AsmPrinter;
class MCExpr;
class MCStreamer;
+/// \brief MI-level stackmap operands.
+///
+/// MI stackmap operations take the form:
+/// <id>, <numBytes>, live args...
+class StackMapOpers {
+public:
+ /// Enumerate the meta operands.
+ enum { IDPos, NBytesPos };
+
+private:
+ const MachineInstr* MI;
+
+public:
+ explicit StackMapOpers(const MachineInstr *MI);
+
+ /// Return the ID for the given stackmap
+ uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
+
+ /// Return the number of patchable bytes the given stackmap should emit.
+ uint32_t getNumPatchBytes() const {
+ return MI->getOperand(NBytesPos).getImm();
+ }
+
+ /// Get the operand index of the variable list of non-argument operands.
+ /// These hold the "live state".
+ unsigned getVarIdx() const {
+ // Skip ID, nShadowBytes.
+ return 2;
+ }
+};
+
/// \brief MI-level patchpoint operands.
///
/// MI patchpoint operations take the form:
@@ -44,36 +75,57 @@ public:
private:
const MachineInstr *MI;
bool HasDef;
- bool IsAnyReg;
+
+ unsigned getMetaIdx(unsigned Pos = 0) const {
+ assert(Pos < MetaEnd && "Meta operand index out of range.");
+ return (HasDef ? 1 : 0) + Pos;
+ }
+
+ const MachineOperand &getMetaOper(unsigned Pos) const {
+ return MI->getOperand(getMetaIdx(Pos));
+ }
public:
explicit PatchPointOpers(const MachineInstr *MI);
- bool isAnyReg() const { return IsAnyReg; }
+ bool isAnyReg() const { return (getCallingConv() == CallingConv::AnyReg); }
bool hasDef() const { return HasDef; }
- unsigned getMetaIdx(unsigned Pos = 0) const {
- assert(Pos < MetaEnd && "Meta operand index out of range.");
- return (HasDef ? 1 : 0) + Pos;
+ /// Return the ID for the given patchpoint.
+ uint64_t getID() const { return getMetaOper(IDPos).getImm(); }
+
+ /// Return the number of patchable bytes the given patchpoint should emit.
+ uint32_t getNumPatchBytes() const {
+ return getMetaOper(NBytesPos).getImm();
}
- const MachineOperand &getMetaOper(unsigned Pos) {
- return MI->getOperand(getMetaIdx(Pos));
+ /// Returns the target of the underlying call.
+ const MachineOperand &getCallTarget() const {
+ return getMetaOper(TargetPos);
+ }
+
+ /// Returns the calling convention
+ CallingConv::ID getCallingConv() const {
+ return getMetaOper(CCPos).getImm();
}
unsigned getArgIdx() const { return getMetaIdx() + MetaEnd; }
+ /// Return the number of call arguments
+ uint32_t getNumCallArgs() const {
+ return MI->getOperand(getMetaIdx(NArgPos)).getImm();
+ }
+
/// Get the operand index of the variable list of non-argument operands.
/// These hold the "live state".
unsigned getVarIdx() const {
- return getMetaIdx() + MetaEnd +
- MI->getOperand(getMetaIdx(NArgPos)).getImm();
+ return getMetaIdx() + MetaEnd + getNumCallArgs();
}
/// Get the index at which stack map locations will be recorded.
/// Arguments are not recorded unless the anyregcc convention is used.
unsigned getStackMapStartIdx() const {
- if (IsAnyReg)
+ if (isAnyReg())
return getArgIdx();
return getVarIdx();
}
@@ -167,7 +219,7 @@ public:
void reset() {
CSInfos.clear();
ConstPool.clear();
- FnStackSize.clear();
+ FnInfos.clear();
}
/// \brief Generate a stackmap record for a stackmap instruction.
@@ -191,7 +243,13 @@ private:
typedef SmallVector<Location, 8> LocationVec;
typedef SmallVector<LiveOutReg, 8> LiveOutVec;
typedef MapVector<uint64_t, uint64_t> ConstantPool;
- typedef MapVector<const MCSymbol *, uint64_t> FnStackSizeMap;
+
+ struct FunctionInfo {
+ uint64_t StackSize;
+ uint64_t RecordCount;
+ FunctionInfo() : StackSize(0), RecordCount(1) {}
+ explicit FunctionInfo(uint64_t StackSize) : StackSize(StackSize), RecordCount(1) {}
+ };
struct CallsiteInfo {
const MCExpr *CSOffsetExpr;
@@ -205,12 +263,13 @@ private:
LiveOuts(std::move(LiveOuts)) {}
};
+ typedef MapVector<const MCSymbol *, FunctionInfo> FnInfoMap;
typedef std::vector<CallsiteInfo> CallsiteInfoList;
AsmPrinter &AP;
CallsiteInfoList CSInfos;
ConstantPool ConstPool;
- FnStackSizeMap FnStackSize;
+ FnInfoMap FnInfos;
MachineInstr::const_mop_iterator
parseOperand(MachineInstr::const_mop_iterator MOI,
diff --git a/include/llvm/CodeGen/TailDuplicator.h b/include/llvm/CodeGen/TailDuplicator.h
index 8e65199418a6..b667245fd3c0 100644
--- a/include/llvm/CodeGen/TailDuplicator.h
+++ b/include/llvm/CodeGen/TailDuplicator.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CODEGEN_TAILDUPLICATOR_H
#define LLVM_CODEGEN_TAILDUPLICATOR_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -26,6 +27,8 @@
namespace llvm {
+extern cl::opt<unsigned> TailDupIndirectBranchSize;
+
/// Utility class to perform tail duplication.
class TailDuplicator {
const TargetInstrInfo *TII;
@@ -33,7 +36,10 @@ class TailDuplicator {
const MachineBranchProbabilityInfo *MBPI;
const MachineModuleInfo *MMI;
MachineRegisterInfo *MRI;
+ MachineFunction *MF;
bool PreRegAlloc;
+ bool LayoutMode;
+ unsigned TailDupSize;
// A list of virtual registers for which to update SSA form.
SmallVector<unsigned, 16> SSAUpdateVRs;
@@ -45,14 +51,33 @@ class TailDuplicator {
DenseMap<unsigned, AvailableValsTy> SSAUpdateVals;
public:
- void initMF(MachineFunction &MF, const MachineModuleInfo *MMI,
- const MachineBranchProbabilityInfo *MBPI);
- bool tailDuplicateBlocks(MachineFunction &MF);
+ /// Prepare to run on a specific machine function.
+ /// @param MF - Function that will be processed
+ /// @param MBPI - Branch Probability Info. Used to propagate correct
+ /// probabilities when modifying the CFG.
+ /// @param LayoutMode - When true, don't use the existing layout to make
+ /// decisions.
+ /// @param TailDupSize - Maxmimum size of blocks to tail-duplicate. Zero
+ /// default implies using the command line value TailDupSize.
+ void initMF(MachineFunction &MF,
+ const MachineBranchProbabilityInfo *MBPI,
+ bool LayoutMode, unsigned TailDupSize = 0);
+ bool tailDuplicateBlocks();
static bool isSimpleBB(MachineBasicBlock *TailBB);
- bool shouldTailDuplicate(const MachineFunction &MF, bool IsSimple,
- MachineBasicBlock &TailBB);
- bool tailDuplicateAndUpdate(MachineFunction &MF, bool IsSimple,
- MachineBasicBlock *MBB);
+ bool shouldTailDuplicate(bool IsSimple, MachineBasicBlock &TailBB);
+ /// Returns true if TailBB can successfully be duplicated into PredBB
+ bool canTailDuplicate(MachineBasicBlock *TailBB, MachineBasicBlock *PredBB);
+ /// Tail duplicate a single basic block into its predecessors, and then clean
+ /// up.
+ /// If \p DuplicatePreds is not null, it will be updated to contain the list
+ /// of predecessors that received a copy of \p MBB.
+ /// If \p RemovalCallback is non-null. It will be called before MBB is
+ /// deleted.
+ bool tailDuplicateAndUpdate(
+ bool IsSimple, MachineBasicBlock *MBB,
+ MachineBasicBlock *ForcedLayoutPred,
+ SmallVectorImpl<MachineBasicBlock*> *DuplicatedPreds = nullptr,
+ llvm::function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
private:
typedef TargetInstrInfo::RegSubRegPair RegSubRegPair;
@@ -65,7 +90,7 @@ private:
SmallVectorImpl<std::pair<unsigned, RegSubRegPair>> &Copies,
const DenseSet<unsigned> &UsedByPhi, bool Remove);
void duplicateInstruction(MachineInstr *MI, MachineBasicBlock *TailBB,
- MachineBasicBlock *PredBB, MachineFunction &MF,
+ MachineBasicBlock *PredBB,
DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
const DenseSet<unsigned> &UsedByPhi);
void updateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
@@ -76,15 +101,18 @@ private:
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
const DenseSet<unsigned> &RegsUsedByPhi,
SmallVectorImpl<MachineInstr *> &Copies);
- bool tailDuplicate(MachineFunction &MF, bool IsSimple,
+ bool tailDuplicate(bool IsSimple,
MachineBasicBlock *TailBB,
+ MachineBasicBlock *ForcedLayoutPred,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallVectorImpl<MachineInstr *> &Copies);
void appendCopies(MachineBasicBlock *MBB,
SmallVectorImpl<std::pair<unsigned,RegSubRegPair>> &CopyInfos,
SmallVectorImpl<MachineInstr *> &Copies);
- void removeDeadBlock(MachineBasicBlock *MBB);
+ void removeDeadBlock(
+ MachineBasicBlock *MBB,
+ llvm::function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
};
} // End llvm namespace
diff --git a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index c856435f5ddc..cc71fa3918a1 100644
--- a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -33,7 +33,7 @@ namespace llvm {
class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
bool UseInitArray;
- mutable unsigned NextUniqueID = 0;
+ mutable unsigned NextUniqueID = 1; // ID 0 is reserved for execute-only sections
protected:
MCSymbolRefExpr::VariantKind PLTRelativeVariantKind =
@@ -53,15 +53,13 @@ public:
const Constant *C,
unsigned &Align) const override;
- MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
+ MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
- MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
+ MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
- MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
+ MCSection *getSectionForJumpTable(const Function &F,
const TargetMachine &TM) const override;
bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
@@ -69,14 +67,14 @@ public:
/// Return an MCExpr to use for a reference to the specified type info global
/// variable from exception handling information.
- const MCExpr *
- getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
- Mangler &Mang, const TargetMachine &TM,
- MachineModuleInfo *MMI,
- MCStreamer &Streamer) const override;
+ const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+ unsigned Encoding,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
// The symbol that gets passed to .cfi_personality.
- MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
+ MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
const TargetMachine &TM,
MachineModuleInfo *MMI) const override;
@@ -87,7 +85,7 @@ public:
const MCSymbol *KeySym) const override;
const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
- const GlobalValue *RHS, Mangler &Mang,
+ const GlobalValue *RHS,
const TargetMachine &TM) const override;
};
@@ -98,17 +96,17 @@ public:
~TargetLoweringObjectFileMachO() override {}
TargetLoweringObjectFileMachO();
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
/// Emit the module flags that specify the garbage collection information.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- Mangler &Mang, const TargetMachine &TM) const override;
+ const TargetMachine &TM) const override;
- MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
+ MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
- MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
+ MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
@@ -116,14 +114,14 @@ public:
unsigned &Align) const override;
/// The mach-o version of this method defaults to returning a stub reference.
- const MCExpr *
- getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
- Mangler &Mang, const TargetMachine &TM,
- MachineModuleInfo *MMI,
- MCStreamer &Streamer) const override;
+ const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+ unsigned Encoding,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
// The symbol that gets passed to .cfi_personality.
- MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
+ MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
const TargetMachine &TM,
MachineModuleInfo *MMI) const override;
@@ -134,7 +132,7 @@ public:
MCStreamer &Streamer) const override;
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
- Mangler &Mang, const TargetMachine &TM) const override;
+ const TargetMachine &TM) const override;
};
@@ -145,33 +143,32 @@ class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
public:
~TargetLoweringObjectFileCOFF() override {}
- MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+ MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
- MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
+ MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
- Mangler &Mang, const TargetMachine &TM) const override;
+ const TargetMachine &TM) const override;
- MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
+ MCSection *getSectionForJumpTable(const Function &F,
const TargetMachine &TM) const override;
/// Emit Obj-C garbage collection and linker options. Only linker option
/// emission is implemented for COFF.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- Mangler &Mang, const TargetMachine &TM) const override;
+ const TargetMachine &TM) const override;
MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
MCSection *getStaticDtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
- void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
- const Mangler &Mang) const override;
+ void emitLinkerFlagsForGlobal(raw_ostream &OS,
+ const GlobalValue *GV) const override;
};
} // end namespace llvm
diff --git a/include/llvm/CodeGen/TargetPassConfig.h b/include/llvm/CodeGen/TargetPassConfig.h
index 9309655a972e..2287f9aca4bf 100644
--- a/include/llvm/CodeGen/TargetPassConfig.h
+++ b/include/llvm/CodeGen/TargetPassConfig.h
@@ -94,8 +94,10 @@ public:
private:
PassManagerBase *PM;
- AnalysisID StartBefore, StartAfter;
- AnalysisID StopAfter;
+ AnalysisID StartBefore = nullptr;
+ AnalysisID StartAfter = nullptr;
+ AnalysisID StopBefore = nullptr;
+ AnalysisID StopAfter = nullptr;
bool Started;
bool Stopped;
bool AddingMachinePasses;
@@ -143,11 +145,14 @@ public:
/// This function expects that at least one of the StartAfter or the
/// StartBefore pass IDs is null.
void setStartStopPasses(AnalysisID StartBefore, AnalysisID StartAfter,
- AnalysisID StopAfter) {
- if (StartAfter)
- assert(!StartBefore && "Start after and start before passes are given");
+ AnalysisID StopBefore, AnalysisID StopAfter) {
+ assert(!(StartBefore && StartAfter) &&
+ "Start after and start before passes are given");
+ assert(!(StopBefore && StopAfter) &&
+ "Stop after and stop before passed are given");
this->StartBefore = StartBefore;
this->StartAfter = StartAfter;
+ this->StopBefore = StopBefore;
this->StopAfter = StopAfter;
Started = (StartAfter == nullptr) && (StartBefore == nullptr);
}
@@ -218,6 +223,14 @@ public:
virtual bool addIRTranslator() { return true; }
/// This method may be implemented by targets that want to run passes
+ /// immediately before legalization.
+ virtual void addPreLegalizeMachineIR() {}
+
+ /// This method should install a legalize pass, which converts the instruction
+ /// sequence into one that can be selected by the target.
+ virtual bool addLegalizeMachineIR() { return true; }
+
+ /// This method may be implemented by targets that want to run passes
/// immediately before the register bank selection.
virtual void addPreRegBankSelect() {}
@@ -226,6 +239,16 @@ public:
/// class or register banks.
virtual bool addRegBankSelect() { return true; }
+ /// This method may be implemented by targets that want to run passes
+ /// immediately before the (global) instruction selection.
+ virtual void addPreGlobalInstructionSelect() {}
+
+ /// This method should install a (global) instruction selector pass, which
+ /// converts possibly generic instructions to fully target-specific
+ /// instructions, thereby constraining all generic virtual registers to
+ /// register classes.
+ virtual bool addGlobalInstructionSelect() { return true; }
+
/// Add the complete, standard set of LLVM CodeGen passes.
/// Fully developed targets will not generally override this.
virtual void addMachinePasses();
@@ -263,6 +286,16 @@ public:
/// verification is enabled.
void addVerifyPass(const std::string &Banner);
+ /// Check whether or not GlobalISel should abort on error.
+ /// When this is disable, GlobalISel will fall back on SDISel instead of
+ /// erroring out.
+ virtual bool isGlobalISelAbortEnabled() const;
+
+ /// Check whether or not a diagnostic should be emitted when GlobalISel
+ /// uses the fallback path. In other words, it will emit a diagnostic
+ /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
+ virtual bool reportDiagnosticWhenGlobalISelFallback() const;
+
protected:
// Helper to verify the analysis is really immutable.
void setOpt(bool &Opt, bool Val);
diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h
index 524a90803df8..2699fa28f0f1 100644
--- a/include/llvm/CodeGen/ValueTypes.h
+++ b/include/llvm/CodeGen/ValueTypes.h
@@ -34,9 +34,9 @@ namespace llvm {
Type *LLVMTy;
public:
- LLVM_CONSTEXPR EVT() : V(MVT::INVALID_SIMPLE_VALUE_TYPE), LLVMTy(nullptr) {}
- LLVM_CONSTEXPR EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(nullptr) {}
- LLVM_CONSTEXPR EVT(MVT S) : V(S), LLVMTy(nullptr) {}
+ constexpr EVT() : V(MVT::INVALID_SIMPLE_VALUE_TYPE), LLVMTy(nullptr) {}
+ constexpr EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(nullptr) {}
+ constexpr EVT(MVT S) : V(S), LLVMTy(nullptr) {}
bool operator==(EVT VT) const {
return !(*this != VT);
diff --git a/include/llvm/Config/abi-breaking.h.cmake b/include/llvm/Config/abi-breaking.h.cmake
new file mode 100644
index 000000000000..e5697f79e93d
--- /dev/null
+++ b/include/llvm/Config/abi-breaking.h.cmake
@@ -0,0 +1,48 @@
+/*===------- llvm/Config/abi-breaking.h - llvm configuration -------*- C -*-===*/
+/* */
+/* The LLVM Compiler Infrastructure */
+/* */
+/* This file is distributed under the University of Illinois Open Source */
+/* License. See LICENSE.TXT for details. */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+/* This file controls the C++ ABI break introduced in LLVM public header. */
+
+#ifndef LLVM_ABI_BREAKING_CHECKS_H
+#define LLVM_ABI_BREAKING_CHECKS_H
+
+/* Define to enable checks that alter the LLVM C++ ABI */
+#cmakedefine01 LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+/* Define to disable the link-time checking of mismatch for
+ LLVM_ENABLE_ABI_BREAKING_CHECKS */
+#cmakedefine01 LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+#if !LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+
+// ABI_BREAKING_CHECKS protection: provides link-time failure when clients build
+// mismatch with LLVM
+#if defined(_MSC_VER)
+// Use pragma with MSVC
+#define LLVM_XSTR(s) LLVM_STR(s)
+#define LLVM_STR(s) #s
+#pragma detect_mismatch("LLVM_ENABLE_ABI_BREAKING_CHECKS", LLVM_XSTR(LLVM_ENABLE_ABI_BREAKING_CHECKS))
+#undef LLVM_XSTR
+#undef LLVM_STR
+#elif defined(_WIN32) || defined(__CYGWIN__) // Win32 w/o #pragma detect_mismatch
+// FIXME: Implement checks without weak.
+#elif defined(__cplusplus)
+namespace llvm {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+extern int EnableABIBreakingChecks;
+__attribute__((weak, visibility ("hidden"))) int *VerifyEnableABIBreakingChecks = &EnableABIBreakingChecks;
+#else
+extern int DisableABIBreakingChecks;
+__attribute__((weak, visibility ("hidden"))) int *VerifyDisableABIBreakingChecks = &DisableABIBreakingChecks;
+#endif
+}
+#endif // _MSC_VER
+
+#endif // LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING
+
+#endif
diff --git a/include/llvm/Config/config.h.cmake b/include/llvm/Config/config.h.cmake
index 45b30d9171d9..ff41d75373c8 100644
--- a/include/llvm/Config/config.h.cmake
+++ b/include/llvm/Config/config.h.cmake
@@ -7,62 +7,50 @@
/* Bug report URL. */
#define BUG_REPORT_URL "${BUG_REPORT_URL}"
-/* Define if you want backtraces on crash */
-#cmakedefine ENABLE_BACKTRACES
+/* Define to 1 to enable backtraces, and to 0 otherwise. */
+#cmakedefine01 ENABLE_BACKTRACES
-/* Define to enable crash overrides */
-#cmakedefine ENABLE_CRASH_OVERRIDES
-
-/* Define if position independent code is enabled */
-#cmakedefine ENABLE_PIC
-
-/* Define to 1 if you have the `arc4random' function. */
-#cmakedefine HAVE_DECL_ARC4RANDOM ${HAVE_DECL_ARC4RANDOM}
+/* Define to 1 to enable crash overrides, and to 0 otherwise. */
+#cmakedefine01 ENABLE_CRASH_OVERRIDES
/* Define to 1 if you have the `backtrace' function. */
#cmakedefine HAVE_BACKTRACE ${HAVE_BACKTRACE}
-/* Define to 1 if you have the `bcopy' function. */
-#undef HAVE_BCOPY
+/* Define to 1 if you have the <CrashReporterClient.h> header file. */
+#cmakedefine HAVE_CRASHREPORTERCLIENT_H
-/* Define to 1 if you have the `closedir' function. */
-#cmakedefine HAVE_CLOSEDIR ${HAVE_CLOSEDIR}
+/* can use __crashreporter_info__ */
+#cmakedefine01 HAVE_CRASHREPORTER_INFO
-/* Define to 1 if you have the <cxxabi.h> header file. */
-#cmakedefine HAVE_CXXABI_H ${HAVE_CXXABI_H}
+/* Define to 1 if you have the declaration of `arc4random', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_ARC4RANDOM
-/* Define to 1 if you have the <CrashReporterClient.h> header file. */
-#undef HAVE_CRASHREPORTERCLIENT_H
+/* Define to 1 if you have the declaration of `FE_ALL_EXCEPT', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_FE_ALL_EXCEPT
-/* can use __crashreporter_info__ */
-#undef HAVE_CRASHREPORTER_INFO
+/* Define to 1 if you have the declaration of `FE_INEXACT', and to 0 if you
+ don't. */
+#cmakedefine01 HAVE_DECL_FE_INEXACT
/* Define to 1 if you have the declaration of `strerror_s', and to 0 if you
don't. */
#cmakedefine01 HAVE_DECL_STRERROR_S
/* Define to 1 if you have the DIA SDK installed, and to 0 if you don't. */
-#cmakedefine HAVE_DIA_SDK ${HAVE_DIA_SDK}
+#cmakedefine01 LLVM_ENABLE_DIA_SDK
/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
*/
#cmakedefine HAVE_DIRENT_H ${HAVE_DIRENT_H}
-/* Define if you have the GNU dld library. */
-#undef HAVE_DLD
-
-/* Define to 1 if you have the `dlerror' function. */
-#cmakedefine HAVE_DLERROR ${HAVE_DLERROR}
-
/* Define to 1 if you have the <dlfcn.h> header file. */
#cmakedefine HAVE_DLFCN_H ${HAVE_DLFCN_H}
/* Define if dlopen() is available on this platform. */
#cmakedefine HAVE_DLOPEN ${HAVE_DLOPEN}
-/* Define if you have the _dyld_func_lookup function. */
-#undef HAVE_DYLD
-
/* Define to 1 if you have the <errno.h> header file. */
#cmakedefine HAVE_ERRNO_H ${HAVE_ERRNO_H}
@@ -84,12 +72,12 @@
/* Define to 1 if you have the <ffi.h> header file. */
#cmakedefine HAVE_FFI_H ${HAVE_FFI_H}
+/* Define to 1 if you have the `futimens' function. */
+#cmakedefine HAVE_FUTIMENS ${HAVE_FUTIMENS}
+
/* Define to 1 if you have the `futimes' function. */
#cmakedefine HAVE_FUTIMES ${HAVE_FUTIMES}
-/* Define to 1 if you have the `futimens' function */
-#cmakedefine HAVE_FUTIMENS ${HAVE_FUTIMENS}
-
/* Define to 1 if you have the `getcwd' function. */
#cmakedefine HAVE_GETCWD ${HAVE_GETCWD}
@@ -114,14 +102,8 @@
/* Define to 1 if you have the `isatty' function. */
#cmakedefine HAVE_ISATTY 1
-/* Define if you have the libdl library or equivalent. */
-#cmakedefine HAVE_LIBDL ${HAVE_LIBDL}
-
-/* Define to 1 if you have the `m' library (-lm). */
-#undef HAVE_LIBM
-
-/* Define to 1 if you have the `ole32' library (-lole32). */
-#undef HAVE_LIBOLE32
+/* Define to 1 if you have the `edit' library (-ledit). */
+#cmakedefine HAVE_LIBEDIT ${HAVE_LIBEDIT}
/* Define to 1 if you have the `psapi' library (-lpsapi). */
#cmakedefine HAVE_LIBPSAPI ${HAVE_LIBPSAPI}
@@ -132,35 +114,22 @@
/* Define to 1 if you have the `shell32' library (-lshell32). */
#cmakedefine HAVE_LIBSHELL32 ${HAVE_LIBSHELL32}
-/* Define to 1 if you have the 'z' library (-lz). */
+/* Define to 1 if you have the `z' library (-lz). */
#cmakedefine HAVE_LIBZ ${HAVE_LIBZ}
-/* Define to 1 if you have the 'edit' library (-ledit). */
-#cmakedefine HAVE_LIBEDIT ${HAVE_LIBEDIT}
-
-/* Define to 1 if you have the <limits.h> header file. */
-#cmakedefine HAVE_LIMITS_H ${HAVE_LIMITS_H}
-
/* Define to 1 if you have the <link.h> header file. */
#cmakedefine HAVE_LINK_H ${HAVE_LINK_H}
-/* Define if you can use -rdynamic. */
-#define HAVE_LINK_EXPORT_DYNAMIC 1
-
-/* Define if you can use -Wl,-R. to pass -R. to the linker, in order to add
- the current directory to the dynamic linker search path. */
-#undef HAVE_LINK_R
-
-/* Define to 1 if you have the `longjmp' function. */
-#cmakedefine HAVE_LONGJMP ${HAVE_LONGJMP}
+/* Define to 1 if you have the `lseek64' function. */
+#cmakedefine HAVE_LSEEK64 ${HAVE_LSEEK64}
/* Define to 1 if you have the <mach/mach.h> header file. */
#cmakedefine HAVE_MACH_MACH_H ${HAVE_MACH_MACH_H}
-/* Define to 1 if you have the <mach-o/dyld.h> header file. */
-#cmakedefine HAVE_MACH_O_DYLD_H ${HAVE_MACH_O_DYLD_H}
+/* Define to 1 if you have the `mallctl' function. */
+#cmakedefine HAVE_MALLCTL ${HAVE_MALLCTL}
-/* Define if mallinfo() is available on this platform. */
+/* Define to 1 if you have the `mallinfo' function. */
#cmakedefine HAVE_MALLINFO ${HAVE_MALLINFO}
/* Define to 1 if you have the <malloc.h> header file. */
@@ -172,9 +141,6 @@
/* Define to 1 if you have the `malloc_zone_statistics' function. */
#cmakedefine HAVE_MALLOC_ZONE_STATISTICS ${HAVE_MALLOC_ZONE_STATISTICS}
-/* Define to 1 if you have the `mallctl` function. */
-#cmakedefine HAVE_MALLCTL ${HAVE_MALLCTL}
-
/* Define to 1 if you have the `mkdtemp' function. */
#cmakedefine HAVE_MKDTEMP ${HAVE_MKDTEMP}
@@ -184,21 +150,11 @@
/* Define to 1 if you have the `mktemp' function. */
#cmakedefine HAVE_MKTEMP ${HAVE_MKTEMP}
-/* Define to 1 if you have a working `mmap' system call. */
-#undef HAVE_MMAP
-
-/* Define if mmap() uses MAP_ANONYMOUS to map anonymous pages, or undefine if
- it uses MAP_ANON */
-#undef HAVE_MMAP_ANONYMOUS
-
-/* Define if mmap() can map files into memory */
-#undef HAVE_MMAP_FILE
-
/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
#cmakedefine HAVE_NDIR_H ${HAVE_NDIR_H}
-/* Define to 1 if you have the `opendir' function. */
-#cmakedefine HAVE_OPENDIR ${HAVE_OPENDIR}
+/* Define to 1 if you have the `posix_fallocate' function. */
+#cmakedefine HAVE_POSIX_FALLOCATE ${HAVE_POSIX_FALLOCATE}
/* Define to 1 if you have the `posix_spawn' function. */
#cmakedefine HAVE_POSIX_SPAWN ${HAVE_POSIX_SPAWN}
@@ -206,12 +162,6 @@
/* Define to 1 if you have the `pread' function. */
#cmakedefine HAVE_PREAD ${HAVE_PREAD}
-/* Define if libtool can extract symbol lists from object files. */
-#undef HAVE_PRELOADED_SYMBOLS
-
-/* Define to have the %a format string */
-#undef HAVE_PRINTF_A
-
/* Have pthread_getspecific */
#cmakedefine HAVE_PTHREAD_GETSPECIFIC ${HAVE_PTHREAD_GETSPECIFIC}
@@ -224,12 +174,6 @@
/* Have pthread_rwlock_init */
#cmakedefine HAVE_PTHREAD_RWLOCK_INIT ${HAVE_PTHREAD_RWLOCK_INIT}
-/* Define to 1 if srand48/lrand48/drand48 exist in <stdlib.h> */
-#cmakedefine HAVE_RAND48 ${HAVE_RAND48}
-
-/* Define to 1 if you have the `readdir' function. */
-#cmakedefine HAVE_READDIR ${HAVE_READDIR}
-
/* Define to 1 if you have the `realpath' function. */
#cmakedefine HAVE_REALPATH ${HAVE_REALPATH}
@@ -239,39 +183,18 @@
/* Define to 1 if you have the `setenv' function. */
#cmakedefine HAVE_SETENV ${HAVE_SETENV}
-/* Define to 1 if you have the `setjmp' function. */
-#cmakedefine HAVE_SETJMP ${HAVE_SETJMP}
-
/* Define to 1 if you have the `setrlimit' function. */
#cmakedefine HAVE_SETRLIMIT ${HAVE_SETRLIMIT}
-/* Define if you have the shl_load function. */
-#undef HAVE_SHL_LOAD
-
/* Define to 1 if you have the `sigaltstack' function. */
#cmakedefine HAVE_SIGALTSTACK ${HAVE_SIGALTSTACK}
-/* Define to 1 if you have the `siglongjmp' function. */
-#cmakedefine HAVE_SIGLONGJMP ${HAVE_SIGLONGJMP}
-
/* Define to 1 if you have the <signal.h> header file. */
#cmakedefine HAVE_SIGNAL_H ${HAVE_SIGNAL_H}
-/* Define to 1 if you have the `sigsetjmp' function. */
-#cmakedefine HAVE_SIGSETJMP ${HAVE_SIGSETJMP}
-
/* Define to 1 if you have the <stdint.h> header file. */
#cmakedefine HAVE_STDINT_H ${HAVE_STDINT_H}
-/* Set to 1 if the std::isinf function is found in <cmath> */
-#undef HAVE_STD_ISINF_IN_CMATH
-
-/* Set to 1 if the std::isnan function is found in <cmath> */
-#undef HAVE_STD_ISNAN_IN_CMATH
-
-/* Define to 1 if you have the `strdup' function. */
-#cmakedefine HAVE_STRDUP ${HAVE_STRDUP}
-
/* Define to 1 if you have the `strerror' function. */
#cmakedefine HAVE_STRERROR ${HAVE_STRERROR}
@@ -281,11 +204,8 @@
/* Define to 1 if you have the `strtoll' function. */
#cmakedefine HAVE_STRTOLL ${HAVE_STRTOLL}
-/* Define to 1 if you have the `strtoq' function. */
-#cmakedefine HAVE_STRTOQ ${HAVE_STRTOQ}
-
/* Define to 1 if you have the `sysconf' function. */
-#undef HAVE_SYSCONF
+#cmakedefine HAVE_SYSCONF ${HAVE_SYSCONF}
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
*/
@@ -295,7 +215,7 @@
#cmakedefine HAVE_SYS_IOCTL_H ${HAVE_SYS_IOCTL_H}
/* Define to 1 if you have the <sys/mman.h> header file. */
-#cmakedefine HAVE_SYS_MMAN_H ${}
+#cmakedefine HAVE_SYS_MMAN_H ${HAVE_SYS_MMAN_H}
/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
*/
@@ -334,12 +254,6 @@
/* Define to 1 if you have the <unistd.h> header file. */
#cmakedefine HAVE_UNISTD_H ${HAVE_UNISTD_H}
-/* Define to 1 if you have the `_Unwind_Backtrace' function. */
-#cmakedefine HAVE_UNWIND_BACKTRACE ${HAVE_UNWIND_BACKTRACE}
-
-/* Define to 1 if you have the <utime.h> header file. */
-#cmakedefine HAVE_UTIME_H ${HAVE_UTIME_H}
-
/* Define to 1 if the system has the type `u_int64_t'. */
#cmakedefine HAVE_U_INT64_T ${HAVE_U_INT64_T}
@@ -355,6 +269,12 @@
/* Have host's _alloca */
#cmakedefine HAVE__ALLOCA ${HAVE__ALLOCA}
+/* Define to 1 if you have the `_chsize_s' function. */
+#cmakedefine HAVE__CHSIZE_S ${HAVE__CHSIZE_S}
+
+/* Define to 1 if you have the `_Unwind_Backtrace' function. */
+#cmakedefine HAVE__UNWIND_BACKTRACE ${HAVE__UNWIND_BACKTRACE}
+
/* Have host's __alloca */
#cmakedefine HAVE___ALLOCA ${HAVE___ALLOCA}
@@ -376,9 +296,6 @@
/* Have host's __divdi3 */
#cmakedefine HAVE___DIVDI3 ${HAVE___DIVDI3}
-/* Define to 1 if you have the `__dso_handle' function. */
-#undef HAVE___DSO_HANDLE
-
/* Have host's __fixdfdi */
#cmakedefine HAVE___FIXDFDI ${HAVE___FIXDFDI}
@@ -410,53 +327,27 @@
#cmakedefine HAVE____CHKSTK_MS ${HAVE____CHKSTK_MS}
/* Linker version detected at compile time. */
-#undef HOST_LINK_VERSION
-
-/* Installation directory for binary executables */
-#cmakedefine LLVM_BINDIR "${LLVM_BINDIR}"
+#cmakedefine HOST_LINK_VERSION "${HOST_LINK_VERSION}"
-/* Time at which LLVM was configured */
-#cmakedefine LLVM_CONFIGTIME "${LLVM_CONFIGTIME}"
-
-/* Installation directory for data files */
-#cmakedefine LLVM_DATADIR "${LLVM_DATADIR}"
+/* Define if we link Polly to the tools */
+#cmakedefine LINK_POLLY_INTO_TOOLS
-/* Target triple LLVM will generate code for by default
- * Doesn't use `cmakedefine` because it is allowed to be empty.
- */
+/* Target triple LLVM will generate code for by default */
+/* Doesn't use `cmakedefine` because it is allowed to be empty. */
#define LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}"
-/* Installation directory for documentation */
-#cmakedefine LLVM_DOCSDIR "${LLVM_DOCSDIR}"
-
-/* Define if LLVM is built with asserts and checks that change the layout of
- client-visible data structures. */
-#cmakedefine LLVM_ENABLE_ABI_BREAKING_CHECKS
-
/* Define if threads enabled */
#cmakedefine01 LLVM_ENABLE_THREADS
/* Define if zlib compression is available */
#cmakedefine01 LLVM_ENABLE_ZLIB
-/* Installation directory for config files */
-#cmakedefine LLVM_ETCDIR "${LLVM_ETCDIR}"
-
/* Has gcc/MSVC atomic intrinsics */
#cmakedefine01 LLVM_HAS_ATOMICS
/* Host triple LLVM will be executed on */
#cmakedefine LLVM_HOST_TRIPLE "${LLVM_HOST_TRIPLE}"
-/* Installation directory for include files */
-#cmakedefine LLVM_INCLUDEDIR "${LLVM_INCLUDEDIR}"
-
-/* Installation directory for .info files */
-#cmakedefine LLVM_INFODIR "${LLVM_INFODIR}"
-
-/* Installation directory for man pages */
-#cmakedefine LLVM_MANDIR "${LLVM_MANDIR}"
-
/* LLVM architecture name for the native architecture, if available */
#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}
@@ -488,10 +379,13 @@
#cmakedefine LLVM_PREFIX "${LLVM_PREFIX}"
/* Define if we have the Intel JIT API runtime support library */
-#cmakedefine LLVM_USE_INTEL_JITEVENTS 1
+#cmakedefine01 LLVM_USE_INTEL_JITEVENTS
/* Define if we have the oprofile JIT-support library */
-#cmakedefine LLVM_USE_OPROFILE 1
+#cmakedefine01 LLVM_USE_OPROFILE
+
+/* LLVM version information */
+#cmakedefine LLVM_VERSION_INFO "${LLVM_VERSION_INFO}"
/* Major version of the LLVM API */
#define LLVM_VERSION_MAJOR ${LLVM_VERSION_MAJOR}
@@ -505,32 +399,9 @@
/* LLVM version string */
#define LLVM_VERSION_STRING "${PACKAGE_VERSION}"
-/* LLVM version information */
-#cmakedefine LLVM_VERSION_INFO "${LLVM_VERSION_INFO}"
-
-/* Define if we link Polly to the tools */
-#cmakedefine LINK_POLLY_INTO_TOOLS
-
-/* Define if the OS needs help to load dependent libraries for dlopen(). */
-#cmakedefine LTDL_DLOPEN_DEPLIBS ${LTDL_DLOPEN_DEPLIBS}
-
-/* Define to the sub-directory in which libtool stores uninstalled libraries.
- */
-#undef LTDL_OBJDIR
-
/* Define to the extension used for shared libraries, say, ".so". */
#cmakedefine LTDL_SHLIB_EXT "${LTDL_SHLIB_EXT}"
-/* Define to the system default library search path. */
-#cmakedefine LTDL_SYSSEARCHPATH "${LTDL_SYSSEARCHPATH}"
-
-/* Define if /dev/zero should be used when mapping RWX memory, or undefine if
- its not necessary */
-#undef NEED_DEV_ZERO_FOR_MMAP
-
-/* Define if dlsym() requires a leading underscore in symbol names. */
-#undef NEED_USCORE
-
/* Define to the address where bug reports for this package should be sent. */
#cmakedefine PACKAGE_BUGREPORT "${PACKAGE_BUGREPORT}"
@@ -552,24 +423,6 @@
/* Define as the return type of signal handlers (`int' or `void'). */
#cmakedefine RETSIGTYPE ${RETSIGTYPE}
-/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
-#undef STAT_MACROS_BROKEN
-
-/* Define to 1 if you have the ANSI C header files. */
-#undef STDC_HEADERS
-
-/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
-#undef TIME_WITH_SYS_TIME
-
-/* Define to 1 if your <sys/time.h> declares `struct tm'. */
-#undef TM_IN_SYS_TIME
-
-/* Define to `int' if <sys/types.h> does not define. */
-#undef pid_t
-
-/* Define to `unsigned int' if <sys/types.h> does not define. */
-#undef size_t
-
/* Define to a function replacing strtoll */
#cmakedefine strtoll ${strtoll}
@@ -582,7 +435,4 @@
/* Define to a function implementing strdup */
#cmakedefine strdup ${strdup}
-/* Define to 1 if you have the `_chsize_s' function. */
-#cmakedefine HAVE__CHSIZE_S ${HAVE__CHSIZE_S}
-
#endif
diff --git a/include/llvm/Config/llvm-config.h.cmake b/include/llvm/Config/llvm-config.h.cmake
index e0f30678c0ab..359997057618 100644
--- a/include/llvm/Config/llvm-config.h.cmake
+++ b/include/llvm/Config/llvm-config.h.cmake
@@ -14,46 +14,21 @@
#ifndef LLVM_CONFIG_H
#define LLVM_CONFIG_H
-/* Installation directory for binary executables */
-#cmakedefine LLVM_BINDIR "${LLVM_BINDIR}"
-
-/* Time at which LLVM was configured */
-#cmakedefine LLVM_CONFIGTIME "${LLVM_CONFIGTIME}"
-
-/* Installation directory for data files */
-#cmakedefine LLVM_DATADIR "${LLVM_DATADIR}"
+/* Define if we link Polly to the tools */
+#cmakedefine LINK_POLLY_INTO_TOOLS
/* Target triple LLVM will generate code for by default */
#cmakedefine LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}"
-/* Installation directory for documentation */
-#cmakedefine LLVM_DOCSDIR "${LLVM_DOCSDIR}"
-
-/* Define if LLVM is built with asserts and checks that change the layout of
- client-visible data structures. */
-#cmakedefine LLVM_ENABLE_ABI_BREAKING_CHECKS
-
/* Define if threads enabled */
#cmakedefine01 LLVM_ENABLE_THREADS
-/* Installation directory for config files */
-#cmakedefine LLVM_ETCDIR "${LLVM_ETCDIR}"
-
/* Has gcc/MSVC atomic intrinsics */
#cmakedefine01 LLVM_HAS_ATOMICS
/* Host triple LLVM will be executed on */
#cmakedefine LLVM_HOST_TRIPLE "${LLVM_HOST_TRIPLE}"
-/* Installation directory for include files */
-#cmakedefine LLVM_INCLUDEDIR "${LLVM_INCLUDEDIR}"
-
-/* Installation directory for .info files */
-#cmakedefine LLVM_INFODIR "${LLVM_INFODIR}"
-
-/* Installation directory for man pages */
-#cmakedefine LLVM_MANDIR "${LLVM_MANDIR}"
-
/* LLVM architecture name for the native architecture, if available */
#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}
@@ -85,10 +60,10 @@
#cmakedefine LLVM_PREFIX "${LLVM_PREFIX}"
/* Define if we have the Intel JIT API runtime support library */
-#cmakedefine LLVM_USE_INTEL_JITEVENTS 1
+#cmakedefine01 LLVM_USE_INTEL_JITEVENTS
/* Define if we have the oprofile JIT-support library */
-#cmakedefine LLVM_USE_OPROFILE 1
+#cmakedefine01 LLVM_USE_OPROFILE
/* Major version of the LLVM API */
#define LLVM_VERSION_MAJOR ${LLVM_VERSION_MAJOR}
@@ -102,7 +77,4 @@
/* LLVM version string */
#define LLVM_VERSION_STRING "${PACKAGE_VERSION}"
-/* Define if we link Polly to the tools */
-#cmakedefine LINK_POLLY_INTO_TOOLS
-
#endif
diff --git a/include/llvm/DebugInfo/CodeView/ByteStream.h b/include/llvm/DebugInfo/CodeView/ByteStream.h
deleted file mode 100644
index f398c93723e7..000000000000
--- a/include/llvm/DebugInfo/CodeView/ByteStream.h
+++ /dev/null
@@ -1,58 +0,0 @@
-//===- ByteStream.h - Reads stream data from a byte sequence ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_BYTESTREAM_H
-#define LLVM_DEBUGINFO_CODEVIEW_BYTESTREAM_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
-#include "llvm/Support/Error.h"
-#include <cstdint>
-#include <memory>
-#include <type_traits>
-
-namespace llvm {
-namespace codeview {
-class StreamReader;
-
-template <bool Writable = false> class ByteStream : public StreamInterface {
- typedef typename std::conditional<Writable, MutableArrayRef<uint8_t>,
- ArrayRef<uint8_t>>::type ArrayType;
-
-public:
- ByteStream() {}
- explicit ByteStream(ArrayType Data) : Data(Data) {}
- ~ByteStream() override {}
-
- Error readBytes(uint32_t Offset, uint32_t Size,
- ArrayRef<uint8_t> &Buffer) const override;
- Error readLongestContiguousChunk(uint32_t Offset,
- ArrayRef<uint8_t> &Buffer) const override;
-
- Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) const override;
-
- uint32_t getLength() const override;
-
- Error commit() const override;
-
- ArrayRef<uint8_t> data() const { return Data; }
- StringRef str() const;
-
-private:
- ArrayType Data;
-};
-
-extern template class ByteStream<true>;
-extern template class ByteStream<false>;
-
-} // end namespace pdb
-} // end namespace llvm
-
-#endif // LLVM_DEBUGINFO_CODEVIEW_BYTESTREAM_H
diff --git a/include/llvm/DebugInfo/CodeView/CVDebugRecord.h b/include/llvm/DebugInfo/CodeView/CVDebugRecord.h
new file mode 100644
index 000000000000..5a0bb4266ba2
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/CVDebugRecord.h
@@ -0,0 +1,55 @@
+//===- CVDebugRecord.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CVDEBUGRECORD_H
+#define LLVM_DEBUGINFO_CODEVIEW_CVDEBUGRECORD_H
+
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace OMF {
+struct Signature {
+ enum ID : uint32_t {
+ PDB70 = 0x53445352, // RSDS
+ PDB20 = 0x3031424e, // NB10
+ CV50 = 0x3131424e, // NB11
+ CV41 = 0x3930424e, // NB09
+ };
+
+ support::ulittle32_t CVSignature;
+ support::ulittle32_t Offset;
+};
+}
+
+namespace codeview {
+struct PDB70DebugInfo {
+ support::ulittle32_t CVSignature;
+ uint8_t Signature[16];
+ support::ulittle32_t Age;
+ // char PDBFileName[];
+};
+
+struct PDB20DebugInfo {
+ support::ulittle32_t CVSignature;
+ support::ulittle32_t Offset;
+ support::ulittle32_t Signature;
+ support::ulittle32_t Age;
+ // char PDBFileName[];
+};
+
+union DebugInfo {
+ struct OMF::Signature Signature;
+ struct PDB20DebugInfo PDB20;
+ struct PDB70DebugInfo PDB70;
+};
+}
+}
+
+#endif
+
diff --git a/include/llvm/DebugInfo/CodeView/CVRecord.h b/include/llvm/DebugInfo/CodeView/CVRecord.h
index dba359fcbe82..a327d450db55 100644
--- a/include/llvm/DebugInfo/CodeView/CVRecord.h
+++ b/include/llvm/DebugInfo/CodeView/CVRecord.h
@@ -11,46 +11,73 @@
#define LLVM_DEBUGINFO_CODEVIEW_RECORDITERATOR_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
-#include "llvm/DebugInfo/CodeView/StreamReader.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
namespace llvm {
+
namespace codeview {
-template <typename Kind> struct CVRecord {
- uint32_t Length;
+template <typename Kind> class CVRecord {
+public:
+ CVRecord() = default;
+ CVRecord(Kind K, ArrayRef<uint8_t> Data) : Type(K), RecordData(Data) {}
+
+ uint32_t length() const { return RecordData.size(); }
+ Kind kind() const { return Type; }
+ ArrayRef<uint8_t> data() const { return RecordData; }
+
+ ArrayRef<uint8_t> content() const {
+ return RecordData.drop_front(sizeof(RecordPrefix));
+ }
+
+ Optional<uint32_t> hash() const { return Hash; }
+
+ void setHash(uint32_t Value) { Hash = Value; }
+
Kind Type;
- ArrayRef<uint8_t> Data;
- ArrayRef<uint8_t> RawData;
+ ArrayRef<uint8_t> RecordData;
+ Optional<uint32_t> Hash;
};
-template <typename Kind> struct VarStreamArrayExtractor<CVRecord<Kind>> {
- Error operator()(StreamRef Stream, uint32_t &Len,
- CVRecord<Kind> &Item) const {
+} // end namespace codeview
+
+namespace msf {
+
+template <typename Kind>
+struct VarStreamArrayExtractor<codeview::CVRecord<Kind>> {
+ Error operator()(ReadableStreamRef Stream, uint32_t &Len,
+ codeview::CVRecord<Kind> &Item) const {
+ using namespace codeview;
const RecordPrefix *Prefix = nullptr;
StreamReader Reader(Stream);
uint32_t Offset = Reader.getOffset();
if (auto EC = Reader.readObject(Prefix))
return EC;
- Item.Length = Prefix->RecordLen;
- if (Item.Length < 2)
+ if (Prefix->RecordLen < 2)
return make_error<CodeViewError>(cv_error_code::corrupt_record);
- Item.Type = static_cast<Kind>(uint16_t(Prefix->RecordKind));
+ Kind K = static_cast<Kind>(uint16_t(Prefix->RecordKind));
Reader.setOffset(Offset);
+ ArrayRef<uint8_t> RawData;
if (auto EC =
- Reader.readBytes(Item.RawData, Item.Length + sizeof(uint16_t)))
+ Reader.readBytes(RawData, Prefix->RecordLen + sizeof(uint16_t)))
return EC;
- Item.Data = Item.RawData.slice(sizeof(RecordPrefix));
- Len = Prefix->RecordLen + 2;
+ Item = codeview::CVRecord<Kind>(K, RawData);
+ Len = Item.length();
return Error::success();
}
};
-}
-}
-#endif
+} // end namespace msf
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_RECORDITERATOR_H
diff --git a/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h b/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h
index 7c88956c984e..b2d3f5ea34a8 100644
--- a/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h
+++ b/include/llvm/DebugInfo/CodeView/CVSymbolVisitor.h
@@ -18,83 +18,17 @@
namespace llvm {
namespace codeview {
+class SymbolVisitorCallbacks;
-template <typename Derived> class CVSymbolVisitor {
+class CVSymbolVisitor {
public:
- CVSymbolVisitor(SymbolVisitorDelegate *Delegate) : Delegate(Delegate) {}
+ CVSymbolVisitor(SymbolVisitorCallbacks &Callbacks);
- bool hadError() const { return HadError; }
-
- template <typename T>
- bool consumeObject(ArrayRef<uint8_t> &Data, const T *&Res) {
- if (Data.size() < sizeof(*Res)) {
- HadError = true;
- return false;
- }
- Res = reinterpret_cast<const T *>(Data.data());
- Data = Data.drop_front(sizeof(*Res));
- return true;
- }
-
-/// Actions to take on known symbols. By default, they do nothing. Visit methods
-/// for member records take the FieldData by non-const reference and are
-/// expected to consume the trailing bytes used by the field.
-/// FIXME: Make the visitor interpret the trailing bytes so that clients don't
-/// need to.
-#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
- void visit##Name(SymbolRecordKind Kind, Name &Record) {}
-#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
-#include "CVSymbolTypes.def"
-
- void visitSymbolRecord(const CVRecord<SymbolKind> &Record) {
- ArrayRef<uint8_t> Data = Record.Data;
- auto *DerivedThis = static_cast<Derived *>(this);
- DerivedThis->visitSymbolBegin(Record.Type, Data);
- uint32_t RecordOffset = Delegate ? Delegate->getRecordOffset(Data) : 0;
- switch (Record.Type) {
- default:
- DerivedThis->visitUnknownSymbol(Record.Type, Data);
- break;
-#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
- case EnumName: { \
- SymbolRecordKind RK = static_cast<SymbolRecordKind>(EnumName); \
- auto Result = Name::deserialize(RK, RecordOffset, Data); \
- if (Result.getError()) \
- return parseError(); \
- DerivedThis->visit##Name(Record.Type, *Result); \
- break; \
- }
-#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName) \
- SYMBOL_RECORD(EnumVal, EnumVal, AliasName)
-#include "CVSymbolTypes.def"
- }
- DerivedThis->visitSymbolEnd(Record.Type, Record.Data);
- }
-
- /// Visits the symbol records in Data. Sets the error flag on parse failures.
- void visitSymbolStream(const CVSymbolArray &Symbols) {
- for (const auto &I : Symbols) {
- visitSymbolRecord(I);
- if (hadError())
- break;
- }
- }
-
- /// Action to take on unknown symbols. By default, they are ignored.
- void visitUnknownSymbol(SymbolKind Kind, ArrayRef<uint8_t> Data) {}
-
- /// Paired begin/end actions for all symbols. Receives all record data,
- /// including the fixed-length record prefix.
- void visitSymbolBegin(SymbolKind Leaf, ArrayRef<uint8_t> RecordData) {}
- void visitSymbolEnd(SymbolKind Leaf, ArrayRef<uint8_t> OriginalSymData) {}
-
- /// Helper for returning from a void function when the stream is corrupted.
- void parseError() { HadError = true; }
+ Error visitSymbolRecord(CVSymbol &Record);
+ Error visitSymbolStream(const CVSymbolArray &Symbols);
private:
- SymbolVisitorDelegate *Delegate;
- /// Whether a symbol stream parsing error was encountered.
- bool HadError = false;
+ SymbolVisitorCallbacks &Callbacks;
};
} // end namespace codeview
diff --git a/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h b/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
index 930ac6930c24..d1b0363a4133 100644
--- a/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
+++ b/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
@@ -10,6 +10,7 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
+#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
@@ -22,16 +23,14 @@ class CVTypeVisitor {
public:
explicit CVTypeVisitor(TypeVisitorCallbacks &Callbacks);
- Error visitTypeRecord(const CVRecord<TypeLeafKind> &Record);
+ Error visitTypeRecord(CVType &Record);
+ Error visitMemberRecord(CVMemberRecord &Record);
/// Visits the type records in Data. Sets the error flag on parse failures.
Error visitTypeStream(const CVTypeArray &Types);
- Error skipPadding(ArrayRef<uint8_t> &Data);
-
- /// Visits individual member records of a field list record. Member records do
- /// not describe their own length, and need special handling.
- Error visitFieldList(const CVRecord<TypeLeafKind> &Record);
+ Error visitFieldListMemberStream(ArrayRef<uint8_t> FieldList);
+ Error visitFieldListMemberStream(msf::StreamReader Reader);
private:
/// The interface to the class that gets notified of each visitation.
diff --git a/include/llvm/DebugInfo/CodeView/CodeView.h b/include/llvm/DebugInfo/CodeView/CodeView.h
index 1ee203b4f8fa..e21cfa3d030a 100644
--- a/include/llvm/DebugInfo/CodeView/CodeView.h
+++ b/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -21,8 +21,6 @@ namespace codeview {
enum class TypeRecordKind : uint16_t {
#define TYPE_RECORD(lf_ename, value, name) name = value,
#include "TypeRecords.def"
- // FIXME: Add serialization support
- FieldList = 0x1203,
};
/// Duplicate copy of the above enum, but using the official CV names. Useful
@@ -278,6 +276,7 @@ enum class MethodOptions : uint16_t {
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(MethodOptions)
/// Equivalent to CV_modifier_t.
+/// TODO: Add flag for _Atomic modifier
enum class ModifierOptions : uint16_t {
None = 0x0000,
Const = 0x0001,
@@ -526,7 +525,7 @@ enum class RegisterId : uint16_t {
};
/// These values correspond to the THUNK_ORDINAL enumeration.
-enum class ThunkOrdinal {
+enum class ThunkOrdinal : uint8_t {
Standard,
ThisAdjustor,
Vcall,
@@ -536,7 +535,7 @@ enum class ThunkOrdinal {
BranchIsland
};
-enum class TrampolineType { TrampIncremental, BranchIsland };
+enum class TrampolineType : uint16_t { TrampIncremental, BranchIsland };
// These values correspond to the CV_SourceChksum_t enumeration.
enum class FileChecksumKind : uint8_t { None, MD5, SHA1, SHA256 };
diff --git a/include/llvm/DebugInfo/CodeView/CodeViewError.h b/include/llvm/DebugInfo/CodeView/CodeViewError.h
index 69ff29aab6f1..0556fd0e19f2 100644
--- a/include/llvm/DebugInfo/CodeView/CodeViewError.h
+++ b/include/llvm/DebugInfo/CodeView/CodeViewError.h
@@ -21,6 +21,7 @@ enum class cv_error_code {
insufficient_buffer,
operation_unsupported,
corrupt_record,
+ unknown_member_record,
};
/// Base class for errors originating when parsing raw PDB files
diff --git a/include/llvm/DebugInfo/CodeView/CodeViewOStream.h b/include/llvm/DebugInfo/CodeView/CodeViewOStream.h
deleted file mode 100644
index 14d057a249a5..000000000000
--- a/include/llvm/DebugInfo/CodeView/CodeViewOStream.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===- CodeViewOStream.h ----------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEWOSTREAM_H
-#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEWOSTREAM_H
-
-#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include "llvm/DebugInfo/CodeView/TypeIndex.h"
-
-namespace llvm {
-namespace codeview {
-
-template <typename Writer> class CodeViewOStream {
-private:
- CodeViewOStream(const CodeViewOStream &) = delete;
- CodeViewOStream &operator=(const CodeViewOStream &) = delete;
-
-public:
- typedef typename Writer::LabelType LabelType;
-
-public:
- explicit CodeViewOStream(Writer &W);
-
-private:
- uint64_t size() const { return W.tell(); }
-
-private:
- Writer &W;
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h b/include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h
new file mode 100644
index 000000000000..5a036b9d5b6c
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/CodeViewRecordIO.h
@@ -0,0 +1,170 @@
+//===- CodeViewRecordIO.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
+#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
+#include "llvm/DebugInfo/MSF/StreamWriter.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+namespace codeview {
+
+class CodeViewRecordIO {
+ uint32_t getCurrentOffset() const {
+ return (isWriting()) ? Writer->getOffset() : Reader->getOffset();
+ }
+
+public:
+ explicit CodeViewRecordIO(msf::StreamReader &Reader) : Reader(&Reader) {}
+ explicit CodeViewRecordIO(msf::StreamWriter &Writer) : Writer(&Writer) {}
+
+ Error beginRecord(Optional<uint32_t> MaxLength);
+ Error endRecord();
+
+ Error mapInteger(TypeIndex &TypeInd);
+
+ bool isReading() const { return Reader != nullptr; }
+ bool isWriting() const { return !isReading(); }
+
+ uint32_t maxFieldLength() const;
+
+ template <typename T> Error mapObject(T &Value) {
+ if (isWriting())
+ return Writer->writeObject(Value);
+
+ const T *ValuePtr;
+ if (auto EC = Reader->readObject(ValuePtr))
+ return EC;
+ Value = *ValuePtr;
+ return Error::success();
+ }
+
+ template <typename T> Error mapInteger(T &Value) {
+ if (isWriting())
+ return Writer->writeInteger(Value);
+
+ return Reader->readInteger(Value);
+ }
+
+ template <typename T> Error mapEnum(T &Value) {
+ if (sizeof(Value) > maxFieldLength())
+ return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
+
+ using U = typename std::underlying_type<T>::type;
+ U X;
+ if (isWriting())
+ X = static_cast<U>(Value);
+
+ if (auto EC = mapInteger(X))
+ return EC;
+ if (isReading())
+ Value = static_cast<T>(X);
+ return Error::success();
+ }
+
+ Error mapEncodedInteger(int64_t &Value);
+ Error mapEncodedInteger(uint64_t &Value);
+ Error mapEncodedInteger(APSInt &Value);
+ Error mapStringZ(StringRef &Value);
+ Error mapGuid(StringRef &Guid);
+
+ Error mapStringZVectorZ(std::vector<StringRef> &Value);
+
+ template <typename SizeType, typename T, typename ElementMapper>
+ Error mapVectorN(T &Items, const ElementMapper &Mapper) {
+ SizeType Size;
+ if (isWriting()) {
+ Size = static_cast<SizeType>(Items.size());
+ if (auto EC = Writer->writeInteger(Size))
+ return EC;
+
+ for (auto &X : Items) {
+ if (auto EC = Mapper(*this, X))
+ return EC;
+ }
+ } else {
+ if (auto EC = Reader->readInteger(Size))
+ return EC;
+ for (SizeType I = 0; I < Size; ++I) {
+ typename T::value_type Item;
+ if (auto EC = Mapper(*this, Item))
+ return EC;
+ Items.push_back(Item);
+ }
+ }
+
+ return Error::success();
+ }
+
+ template <typename T, typename ElementMapper>
+ Error mapVectorTail(T &Items, const ElementMapper &Mapper) {
+ if (isWriting()) {
+ for (auto &Item : Items) {
+ if (auto EC = Mapper(*this, Item))
+ return EC;
+ }
+ } else {
+ typename T::value_type Field;
+ // Stop when we run out of bytes or we hit record padding bytes.
+ while (!Reader->empty() && Reader->peek() < 0xf0 /* LF_PAD0 */) {
+ if (auto EC = Mapper(*this, Field))
+ return EC;
+ Items.push_back(Field);
+ }
+ }
+ return Error::success();
+ }
+
+ Error mapByteVectorTail(ArrayRef<uint8_t> &Bytes);
+ Error mapByteVectorTail(std::vector<uint8_t> &Bytes);
+
+ Error skipPadding();
+
+private:
+ Error writeEncodedSignedInteger(const int64_t &Value);
+ Error writeEncodedUnsignedInteger(const uint64_t &Value);
+
+ struct RecordLimit {
+ uint32_t BeginOffset;
+ Optional<uint32_t> MaxLength;
+
+ Optional<uint32_t> bytesRemaining(uint32_t CurrentOffset) const {
+ if (!MaxLength.hasValue())
+ return None;
+ assert(CurrentOffset >= BeginOffset);
+
+ uint32_t BytesUsed = CurrentOffset - BeginOffset;
+ if (BytesUsed >= *MaxLength)
+ return 0;
+ return *MaxLength - BytesUsed;
+ }
+ };
+
+ SmallVector<RecordLimit, 2> Limits;
+
+ msf::StreamReader *Reader = nullptr;
+ msf::StreamWriter *Writer = nullptr;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
diff --git a/include/llvm/DebugInfo/CodeView/EnumTables.h b/include/llvm/DebugInfo/CodeView/EnumTables.h
index 021288e57618..10d1c581a196 100644
--- a/include/llvm/DebugInfo/CodeView/EnumTables.h
+++ b/include/llvm/DebugInfo/CodeView/EnumTables.h
@@ -20,6 +20,7 @@
namespace llvm {
namespace codeview {
ArrayRef<EnumEntry<SymbolKind>> getSymbolTypeNames();
+ArrayRef<EnumEntry<TypeLeafKind>> getTypeLeafNames();
ArrayRef<EnumEntry<uint16_t>> getRegisterNames();
ArrayRef<EnumEntry<uint8_t>> getProcSymFlagNames();
ArrayRef<EnumEntry<uint16_t>> getLocalFlagNames();
diff --git a/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h b/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h
deleted file mode 100644
index 75a075157d22..000000000000
--- a/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//===- FieldListRecordBuilder.h ---------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_FIELDLISTRECORDBUILDER_H
-#define LLVM_DEBUGINFO_CODEVIEW_FIELDLISTRECORDBUILDER_H
-
-#include "llvm/DebugInfo/CodeView/ListRecordBuilder.h"
-#include "llvm/DebugInfo/CodeView/TypeRecord.h"
-
-namespace llvm {
-namespace codeview {
-
-class MethodInfo {
-public:
- MethodInfo() : Access(), Kind(), Options(), Type(), VTableSlotOffset(-1) {}
-
- MethodInfo(MemberAccess Access, MethodKind Kind, MethodOptions Options,
- TypeIndex Type, int32_t VTableSlotOffset)
- : Access(Access), Kind(Kind), Options(Options), Type(Type),
- VTableSlotOffset(VTableSlotOffset) {}
-
- MemberAccess getAccess() const { return Access; }
- MethodKind getKind() const { return Kind; }
- MethodOptions getOptions() const { return Options; }
- TypeIndex getType() const { return Type; }
- int32_t getVTableSlotOffset() const { return VTableSlotOffset; }
-
-private:
- MemberAccess Access;
- MethodKind Kind;
- MethodOptions Options;
- TypeIndex Type;
- int32_t VTableSlotOffset;
-};
-
-class FieldListRecordBuilder : public ListRecordBuilder {
-private:
- FieldListRecordBuilder(const FieldListRecordBuilder &) = delete;
- void operator=(const FieldListRecordBuilder &) = delete;
-
-public:
- FieldListRecordBuilder();
-
- void reset() { ListRecordBuilder::reset(); }
-
- void writeBaseClass(const BaseClassRecord &Record);
- void writeEnumerator(const EnumeratorRecord &Record);
- void writeDataMember(const DataMemberRecord &Record);
- void writeOneMethod(const OneMethodRecord &Record);
- void writeOverloadedMethod(const OverloadedMethodRecord &Record);
- void writeNestedType(const NestedTypeRecord &Record);
- void writeStaticDataMember(const StaticDataMemberRecord &Record);
- void writeVirtualBaseClass(const VirtualBaseClassRecord &Record);
- void writeVFPtr(const VFPtrRecord &Type);
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h b/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h
deleted file mode 100644
index 00bf03d417a2..000000000000
--- a/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//===- ListRecordBuilder.h --------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_LISTRECORDBUILDER_H
-#define LLVM_DEBUGINFO_CODEVIEW_LISTRECORDBUILDER_H
-
-#include "llvm/DebugInfo/CodeView/TypeRecordBuilder.h"
-
-namespace llvm {
-namespace codeview {
-class TypeTableBuilder;
-
-class ListRecordBuilder {
-private:
- ListRecordBuilder(const ListRecordBuilder &) = delete;
- ListRecordBuilder &operator=(const ListRecordBuilder &) = delete;
-
-protected:
- const int MethodKindShift = 2;
-
- explicit ListRecordBuilder(TypeRecordKind Kind);
-
-public:
- llvm::StringRef str() { return Builder.str(); }
-
- void reset() {
- Builder.reset(Kind);
- ContinuationOffsets.clear();
- SubrecordStart = 0;
- }
-
- void writeListContinuation(const ListContinuationRecord &R);
-
- /// Writes this list record as a possible sequence of records.
- TypeIndex writeListRecord(TypeTableBuilder &Table);
-
-protected:
- void finishSubRecord();
-
- TypeRecordBuilder &getBuilder() { return Builder; }
-
-private:
- size_t getLastContinuationStart() const {
- return ContinuationOffsets.empty() ? 0 : ContinuationOffsets.back();
- }
- size_t getLastContinuationEnd() const { return Builder.size(); }
- size_t getLastContinuationSize() const {
- return getLastContinuationEnd() - getLastContinuationStart();
- }
-
- TypeRecordKind Kind;
- TypeRecordBuilder Builder;
- SmallVector<size_t, 4> ContinuationOffsets;
- size_t SubrecordStart = 0;
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h b/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h
deleted file mode 100644
index 002f885c7c5a..000000000000
--- a/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- MemoryTypeTableBuilder.h ---------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_MEMORYTYPETABLEBUILDER_H
-#define LLVM_DEBUGINFO_CODEVIEW_MEMORYTYPETABLEBUILDER_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/CodeView/TypeTableBuilder.h"
-#include <vector>
-
-namespace llvm {
-namespace codeview {
-
-class MemoryTypeTableBuilder : public TypeTableBuilder {
-public:
- MemoryTypeTableBuilder() {}
-
- bool empty() const { return Records.empty(); }
-
- template <typename TFunc> void ForEachRecord(TFunc Func) {
- uint32_t Index = TypeIndex::FirstNonSimpleIndex;
-
- for (StringRef R : Records) {
- Func(TypeIndex(Index), R);
- ++Index;
- }
- }
-
-protected:
- TypeIndex writeRecord(llvm::StringRef Data) override;
-
-private:
- std::vector<StringRef> Records;
- BumpPtrAllocator RecordStorage;
- DenseMap<StringRef, TypeIndex> HashedRecords;
-};
-
-} // end namespace codeview
-} // end namespace llvm
-
-#endif // LLVM_DEBUGINFO_CODEVIEW_MEMORYTYPETABLEBUILDER_H
diff --git a/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h b/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h
deleted file mode 100644
index faa404d41b1f..000000000000
--- a/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//===- MethodListRecordBuilder.h --------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_METHODLISTRECORDBUILDER_H
-#define LLVM_DEBUGINFO_CODEVIEW_METHODLISTRECORDBUILDER_H
-
-#include "llvm/DebugInfo/CodeView/ListRecordBuilder.h"
-
-namespace llvm {
-namespace codeview {
-
-class MethodInfo;
-
-class MethodListRecordBuilder : public ListRecordBuilder {
-private:
- MethodListRecordBuilder(const MethodListRecordBuilder &) = delete;
- MethodListRecordBuilder &operator=(const MethodListRecordBuilder &) = delete;
-
-public:
- MethodListRecordBuilder();
-
- void writeMethod(MemberAccess Access, MethodKind Kind, MethodOptions Options,
- TypeIndex Type, int32_t VTableSlotOffset);
- void writeMethod(const MethodInfo &Method);
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/CodeView/ModuleSubstream.h b/include/llvm/DebugInfo/CodeView/ModuleSubstream.h
index 6affac801d4d..8860ae42fc09 100644
--- a/include/llvm/DebugInfo/CodeView/ModuleSubstream.h
+++ b/include/llvm/DebugInfo/CodeView/ModuleSubstream.h
@@ -11,8 +11,8 @@
#define LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAM_H
#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
@@ -59,29 +59,31 @@ struct ColumnNumberEntry {
class ModuleSubstream {
public:
ModuleSubstream();
- ModuleSubstream(ModuleSubstreamKind Kind, StreamRef Data);
- static Error initialize(StreamRef Stream, ModuleSubstream &Info);
+ ModuleSubstream(ModuleSubstreamKind Kind, msf::ReadableStreamRef Data);
+ static Error initialize(msf::ReadableStreamRef Stream, ModuleSubstream &Info);
uint32_t getRecordLength() const;
ModuleSubstreamKind getSubstreamKind() const;
- StreamRef getRecordData() const;
+ msf::ReadableStreamRef getRecordData() const;
private:
ModuleSubstreamKind Kind;
- StreamRef Data;
+ msf::ReadableStreamRef Data;
};
-template <> struct VarStreamArrayExtractor<ModuleSubstream> {
- Error operator()(StreamRef Stream, uint32_t &Length,
- ModuleSubstream &Info) const {
- if (auto EC = ModuleSubstream::initialize(Stream, Info))
+typedef msf::VarStreamArray<ModuleSubstream> ModuleSubstreamArray;
+} // namespace codeview
+
+namespace msf {
+template <> struct VarStreamArrayExtractor<codeview::ModuleSubstream> {
+ Error operator()(ReadableStreamRef Stream, uint32_t &Length,
+ codeview::ModuleSubstream &Info) const {
+ if (auto EC = codeview::ModuleSubstream::initialize(Stream, Info))
return EC;
Length = Info.getRecordLength();
return Error::success();
}
};
-
-typedef VarStreamArray<ModuleSubstream> ModuleSubstreamArray;
-}
-}
+} // namespace msf
+} // namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAM_H
diff --git a/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h b/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h
index 6df230903712..f9927d660933 100644
--- a/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h
+++ b/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h
@@ -10,28 +10,75 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAMVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAMVISITOR_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/DebugInfo/CodeView/Line.h"
#include "llvm/DebugInfo/CodeView/ModuleSubstream.h"
-#include "llvm/DebugInfo/CodeView/StreamReader.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
namespace llvm {
+
namespace codeview {
struct LineColumnEntry {
support::ulittle32_t NameIndex;
- FixedStreamArray<LineNumberEntry> LineNumbers;
- FixedStreamArray<ColumnNumberEntry> Columns;
+ msf::FixedStreamArray<LineNumberEntry> LineNumbers;
+ msf::FixedStreamArray<ColumnNumberEntry> Columns;
};
-template <> class VarStreamArrayExtractor<LineColumnEntry> {
+struct FileChecksumEntry {
+ uint32_t FileNameOffset; // Byte offset of filename in global stringtable.
+ FileChecksumKind Kind; // The type of checksum.
+ ArrayRef<uint8_t> Checksum; // The bytes of the checksum.
+};
+
+typedef msf::VarStreamArray<LineColumnEntry> LineInfoArray;
+typedef msf::VarStreamArray<FileChecksumEntry> FileChecksumArray;
+
+class IModuleSubstreamVisitor {
public:
- VarStreamArrayExtractor(const LineSubstreamHeader *Header) : Header(Header) {}
+ virtual ~IModuleSubstreamVisitor() = default;
- Error operator()(StreamRef Stream, uint32_t &Len,
- LineColumnEntry &Item) const {
+ virtual Error visitUnknown(ModuleSubstreamKind Kind,
+ msf::ReadableStreamRef Data) = 0;
+ virtual Error visitSymbols(msf::ReadableStreamRef Data);
+ virtual Error visitLines(msf::ReadableStreamRef Data,
+ const LineSubstreamHeader *Header,
+ const LineInfoArray &Lines);
+ virtual Error visitStringTable(msf::ReadableStreamRef Data);
+ virtual Error visitFileChecksums(msf::ReadableStreamRef Data,
+ const FileChecksumArray &Checksums);
+ virtual Error visitFrameData(msf::ReadableStreamRef Data);
+ virtual Error visitInlineeLines(msf::ReadableStreamRef Data);
+ virtual Error visitCrossScopeImports(msf::ReadableStreamRef Data);
+ virtual Error visitCrossScopeExports(msf::ReadableStreamRef Data);
+ virtual Error visitILLines(msf::ReadableStreamRef Data);
+ virtual Error visitFuncMDTokenMap(msf::ReadableStreamRef Data);
+ virtual Error visitTypeMDTokenMap(msf::ReadableStreamRef Data);
+ virtual Error visitMergedAssemblyInput(msf::ReadableStreamRef Data);
+ virtual Error visitCoffSymbolRVA(msf::ReadableStreamRef Data);
+};
+
+Error visitModuleSubstream(const ModuleSubstream &R,
+ IModuleSubstreamVisitor &V);
+} // end namespace codeview
+
+namespace msf {
+
+template <> class VarStreamArrayExtractor<codeview::LineColumnEntry> {
+public:
+ VarStreamArrayExtractor(const codeview::LineSubstreamHeader *Header)
+ : Header(Header) {}
+
+ Error operator()(ReadableStreamRef Stream, uint32_t &Len,
+ codeview::LineColumnEntry &Item) const {
+ using namespace codeview;
const LineFileBlockHeader *BlockHeader;
StreamReader Reader(Stream);
if (auto EC = Reader.readObject(BlockHeader))
@@ -61,19 +108,14 @@ public:
}
private:
- const LineSubstreamHeader *Header;
+ const codeview::LineSubstreamHeader *Header;
};
-struct FileChecksumEntry {
- uint32_t FileNameOffset; // Byte offset of filename in global stringtable.
- FileChecksumKind Kind; // The type of checksum.
- ArrayRef<uint8_t> Checksum; // The bytes of the checksum.
-};
-
-template <> class VarStreamArrayExtractor<FileChecksumEntry> {
+template <> class VarStreamArrayExtractor<codeview::FileChecksumEntry> {
public:
- Error operator()(StreamRef Stream, uint32_t &Len,
- FileChecksumEntry &Item) const {
+ Error operator()(ReadableStreamRef Stream, uint32_t &Len,
+ codeview::FileChecksumEntry &Item) const {
+ using namespace codeview;
const FileChecksum *Header;
StreamReader Reader(Stream);
if (auto EC = Reader.readObject(Header))
@@ -87,35 +129,8 @@ public:
}
};
-typedef VarStreamArray<LineColumnEntry> LineInfoArray;
-typedef VarStreamArray<FileChecksumEntry> FileChecksumArray;
-
-class IModuleSubstreamVisitor {
-public:
- virtual ~IModuleSubstreamVisitor() {}
-
- virtual Error visitUnknown(ModuleSubstreamKind Kind, StreamRef Data) = 0;
- virtual Error visitSymbols(StreamRef Data);
- virtual Error visitLines(StreamRef Data, const LineSubstreamHeader *Header,
- const LineInfoArray &Lines);
- virtual Error visitStringTable(StreamRef Data);
- virtual Error visitFileChecksums(StreamRef Data,
- const FileChecksumArray &Checksums);
- virtual Error visitFrameData(StreamRef Data);
- virtual Error visitInlineeLines(StreamRef Data);
- virtual Error visitCrossScopeImports(StreamRef Data);
- virtual Error visitCrossScopeExports(StreamRef Data);
- virtual Error visitILLines(StreamRef Data);
- virtual Error visitFuncMDTokenMap(StreamRef Data);
- virtual Error visitTypeMDTokenMap(StreamRef Data);
- virtual Error visitMergedAssemblyInput(StreamRef Data);
- virtual Error visitCoffSymbolRVA(StreamRef Data);
-};
-
-Error visitModuleSubstream(const ModuleSubstream &R,
- IModuleSubstreamVisitor &V);
+} // end namespace msf
-} // namespace codeview
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAMVISITOR_H
diff --git a/include/llvm/DebugInfo/CodeView/RecordSerialization.h b/include/llvm/DebugInfo/CodeView/RecordSerialization.h
index 84179f5f81f7..97b6f561bb97 100644
--- a/include/llvm/DebugInfo/CodeView/RecordSerialization.h
+++ b/include/llvm/DebugInfo/CodeView/RecordSerialization.h
@@ -13,8 +13,11 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/Endian.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
#include <cinttypes>
#include <tuple>
@@ -24,8 +27,12 @@ using llvm::support::little32_t;
using llvm::support::ulittle16_t;
using llvm::support::ulittle32_t;
+/// Limit on the size of all codeview symbol and type records, including the
+/// RecordPrefix. MSVC does not emit any records larger than this.
+enum : unsigned { MaxRecordLength = 0xFF00 };
+
struct RecordPrefix {
- ulittle16_t RecordLen; // Record length, starting from &Leaf.
+ ulittle16_t RecordLen; // Record length, starting from &RecordKind.
ulittle16_t RecordKind; // Record kind enum (SymRecordKind or TypeRecordKind)
};
@@ -34,54 +41,40 @@ struct RecordPrefix {
StringRef getBytesAsCharacters(ArrayRef<uint8_t> LeafData);
StringRef getBytesAsCString(ArrayRef<uint8_t> LeafData);
-/// Consumes sizeof(T) bytes from the given byte sequence. Returns an error if
-/// there are not enough bytes remaining. Reinterprets the consumed bytes as a
-/// T object and points 'Res' at them.
-template <typename T, typename U>
-inline std::error_code consumeObject(U &Data, const T *&Res) {
- if (Data.size() < sizeof(*Res))
- return std::make_error_code(std::errc::illegal_byte_sequence);
- Res = reinterpret_cast<const T *>(Data.data());
- Data = Data.drop_front(sizeof(*Res));
- return std::error_code();
-}
-
-inline std::error_code consume(ArrayRef<uint8_t> &Data) {
- return std::error_code();
-}
+inline Error consume(msf::StreamReader &Reader) { return Error::success(); }
/// Decodes a numeric "leaf" value. These are integer literals encountered in
/// the type stream. If the value is positive and less than LF_NUMERIC (1 <<
/// 15), it is emitted directly in Data. Otherwise, it has a tag like LF_CHAR
/// that indicates the bitwidth and sign of the numeric data.
-std::error_code consume(ArrayRef<uint8_t> &Data, APSInt &Num);
-std::error_code consume(StringRef &Data, APSInt &Num);
+Error consume(msf::StreamReader &Reader, APSInt &Num);
/// Decodes a numeric leaf value that is known to be a particular type.
-std::error_code consume_numeric(ArrayRef<uint8_t> &Data, uint64_t &Value);
+Error consume_numeric(msf::StreamReader &Reader, uint64_t &Value);
/// Decodes signed and unsigned fixed-length integers.
-std::error_code consume(ArrayRef<uint8_t> &Data, uint32_t &Item);
-std::error_code consume(StringRef &Data, uint32_t &Item);
-std::error_code consume(ArrayRef<uint8_t> &Data, int32_t &Item);
+Error consume(msf::StreamReader &Reader, uint32_t &Item);
+Error consume(msf::StreamReader &Reader, int32_t &Item);
/// Decodes a null terminated string.
-std::error_code consume(ArrayRef<uint8_t> &Data, StringRef &Item);
+Error consume(msf::StreamReader &Reader, StringRef &Item);
+
+Error consume(StringRef &Data, APSInt &Num);
+Error consume(StringRef &Data, uint32_t &Item);
/// Decodes an arbitrary object whose layout matches that of the underlying
/// byte sequence, and returns a pointer to the object.
-template <typename T>
-std::error_code consume(ArrayRef<uint8_t> &Data, T *&Item) {
- return consumeObject(Data, Item);
+template <typename T> Error consume(msf::StreamReader &Reader, T *&Item) {
+ return Reader.readObject(Item);
}
template <typename T, typename U> struct serialize_conditional_impl {
serialize_conditional_impl(T &Item, U Func) : Item(Item), Func(Func) {}
- std::error_code deserialize(ArrayRef<uint8_t> &Data) const {
+ Error deserialize(msf::StreamReader &Reader) const {
if (!Func())
- return std::error_code();
- return consume(Data, Item);
+ return Error::success();
+ return consume(Reader, Item);
}
T &Item;
@@ -96,22 +89,8 @@ serialize_conditional_impl<T, U> serialize_conditional(T &Item, U Func) {
template <typename T, typename U> struct serialize_array_impl {
serialize_array_impl(ArrayRef<T> &Item, U Func) : Item(Item), Func(Func) {}
- std::error_code deserialize(ArrayRef<uint8_t> &Data) const {
- uint32_t N = Func();
- if (N == 0)
- return std::error_code();
-
- uint32_t Size = sizeof(T) * N;
-
- if (Size / sizeof(T) != N)
- return std::make_error_code(std::errc::illegal_byte_sequence);
-
- if (Data.size() < Size)
- return std::make_error_code(std::errc::illegal_byte_sequence);
-
- Item = ArrayRef<T>(reinterpret_cast<const T *>(Data.data()), N);
- Data = Data.drop_front(Size);
- return std::error_code();
+ Error deserialize(msf::StreamReader &Reader) const {
+ return Reader.readArray(Item, Func());
}
ArrayRef<T> &Item;
@@ -121,15 +100,15 @@ template <typename T, typename U> struct serialize_array_impl {
template <typename T> struct serialize_vector_tail_impl {
serialize_vector_tail_impl(std::vector<T> &Item) : Item(Item) {}
- std::error_code deserialize(ArrayRef<uint8_t> &Data) const {
+ Error deserialize(msf::StreamReader &Reader) const {
T Field;
// Stop when we run out of bytes or we hit record padding bytes.
- while (!Data.empty() && Data.front() < LF_PAD0) {
- if (auto EC = consume(Data, Field))
+ while (!Reader.empty() && Reader.peek() < LF_PAD0) {
+ if (auto EC = consume(Reader, Field))
return EC;
Item.push_back(Field);
}
- return std::error_code();
+ return Error::success();
}
std::vector<T> &Item;
@@ -139,21 +118,18 @@ struct serialize_null_term_string_array_impl {
serialize_null_term_string_array_impl(std::vector<StringRef> &Item)
: Item(Item) {}
- std::error_code deserialize(ArrayRef<uint8_t> &Data) const {
- if (Data.empty())
- return std::make_error_code(std::errc::illegal_byte_sequence);
+ Error deserialize(msf::StreamReader &Reader) const {
+ if (Reader.empty())
+ return make_error<CodeViewError>(cv_error_code::insufficient_buffer,
+ "Null terminated string is empty!");
- StringRef Field;
- // Stop when we run out of bytes or we hit record padding bytes.
- while (Data.front() != 0) {
- if (auto EC = consume(Data, Field))
+ while (Reader.peek() != 0) {
+ StringRef Field;
+ if (auto EC = Reader.readZeroString(Field))
return EC;
Item.push_back(Field);
- if (Data.empty())
- return std::make_error_code(std::errc::illegal_byte_sequence);
}
- Data = Data.drop_front(1);
- return std::error_code();
+ return Reader.skip(1);
}
std::vector<StringRef> &Item;
@@ -162,10 +138,9 @@ struct serialize_null_term_string_array_impl {
template <typename T> struct serialize_arrayref_tail_impl {
serialize_arrayref_tail_impl(ArrayRef<T> &Item) : Item(Item) {}
- std::error_code deserialize(ArrayRef<uint8_t> &Data) const {
- uint32_t Count = Data.size() / sizeof(T);
- Item = ArrayRef<T>(reinterpret_cast<const T *>(Data.begin()), Count);
- return std::error_code();
+ Error deserialize(msf::StreamReader &Reader) const {
+ uint32_t Count = Reader.bytesRemaining() / sizeof(T);
+ return Reader.readArray(Item, Count);
}
ArrayRef<T> &Item;
@@ -174,8 +149,8 @@ template <typename T> struct serialize_arrayref_tail_impl {
template <typename T> struct serialize_numeric_impl {
serialize_numeric_impl(T &Item) : Item(Item) {}
- std::error_code deserialize(ArrayRef<uint8_t> &Data) const {
- return consume_numeric(Data, Item);
+ Error deserialize(msf::StreamReader &Reader) const {
+ return consume_numeric(Reader, Item);
}
T &Item;
@@ -226,52 +201,50 @@ template <typename T> serialize_numeric_impl<T> serialize_numeric(T &Item) {
#define CV_NUMERIC_FIELD(I) serialize_numeric(I)
template <typename T, typename U>
-std::error_code consume(ArrayRef<uint8_t> &Data,
- const serialize_conditional_impl<T, U> &Item) {
- return Item.deserialize(Data);
+Error consume(msf::StreamReader &Reader,
+ const serialize_conditional_impl<T, U> &Item) {
+ return Item.deserialize(Reader);
}
template <typename T, typename U>
-std::error_code consume(ArrayRef<uint8_t> &Data,
- const serialize_array_impl<T, U> &Item) {
- return Item.deserialize(Data);
+Error consume(msf::StreamReader &Reader,
+ const serialize_array_impl<T, U> &Item) {
+ return Item.deserialize(Reader);
}
-inline std::error_code
-consume(ArrayRef<uint8_t> &Data,
- const serialize_null_term_string_array_impl &Item) {
- return Item.deserialize(Data);
+inline Error consume(msf::StreamReader &Reader,
+ const serialize_null_term_string_array_impl &Item) {
+ return Item.deserialize(Reader);
}
template <typename T>
-std::error_code consume(ArrayRef<uint8_t> &Data,
- const serialize_vector_tail_impl<T> &Item) {
- return Item.deserialize(Data);
+Error consume(msf::StreamReader &Reader,
+ const serialize_vector_tail_impl<T> &Item) {
+ return Item.deserialize(Reader);
}
template <typename T>
-std::error_code consume(ArrayRef<uint8_t> &Data,
- const serialize_arrayref_tail_impl<T> &Item) {
- return Item.deserialize(Data);
+Error consume(msf::StreamReader &Reader,
+ const serialize_arrayref_tail_impl<T> &Item) {
+ return Item.deserialize(Reader);
}
template <typename T>
-std::error_code consume(ArrayRef<uint8_t> &Data,
- const serialize_numeric_impl<T> &Item) {
- return Item.deserialize(Data);
+Error consume(msf::StreamReader &Reader,
+ const serialize_numeric_impl<T> &Item) {
+ return Item.deserialize(Reader);
}
template <typename T, typename U, typename... Args>
-std::error_code consume(ArrayRef<uint8_t> &Data, T &&X, U &&Y,
- Args &&... Rest) {
- if (auto EC = consume(Data, X))
+Error consume(msf::StreamReader &Reader, T &&X, U &&Y, Args &&... Rest) {
+ if (auto EC = consume(Reader, X))
return EC;
- return consume(Data, Y, std::forward<Args>(Rest)...);
+ return consume(Reader, Y, std::forward<Args>(Rest)...);
}
#define CV_DESERIALIZE(...) \
if (auto EC = consume(__VA_ARGS__)) \
- return EC;
+ return std::move(EC);
}
}
diff --git a/include/llvm/DebugInfo/CodeView/StreamRef.h b/include/llvm/DebugInfo/CodeView/StreamRef.h
deleted file mode 100644
index a4f244a32289..000000000000
--- a/include/llvm/DebugInfo/CodeView/StreamRef.h
+++ /dev/null
@@ -1,104 +0,0 @@
-//===- StreamRef.h - A copyable reference to a stream -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_STREAMREF_H
-#define LLVM_DEBUGINFO_CODEVIEW_STREAMREF_H
-
-#include "llvm/DebugInfo/CodeView/CodeViewError.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
-
-namespace llvm {
-namespace codeview {
-
-class StreamRef {
-public:
- StreamRef() : Stream(nullptr), ViewOffset(0), Length(0) {}
- StreamRef(const StreamInterface &Stream)
- : Stream(&Stream), ViewOffset(0), Length(Stream.getLength()) {}
- StreamRef(const StreamInterface &Stream, uint32_t Offset, uint32_t Length)
- : Stream(&Stream), ViewOffset(Offset), Length(Length) {}
-
- // Use StreamRef.slice() instead.
- StreamRef(const StreamRef &S, uint32_t Offset, uint32_t Length) = delete;
-
- Error readBytes(uint32_t Offset, uint32_t Size,
- ArrayRef<uint8_t> &Buffer) const {
- if (ViewOffset + Offset < Offset)
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
- if (Size + Offset > Length)
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
- return Stream->readBytes(ViewOffset + Offset, Size, Buffer);
- }
-
- // Given an offset into the stream, read as much as possible without copying
- // any data.
- Error readLongestContiguousChunk(uint32_t Offset,
- ArrayRef<uint8_t> &Buffer) const {
- if (Offset >= Length)
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
-
- if (auto EC = Stream->readLongestContiguousChunk(Offset, Buffer))
- return EC;
- // This StreamRef might refer to a smaller window over a larger stream. In
- // that case we will have read out more bytes than we should return, because
- // we should not read past the end of the current view.
- uint32_t MaxLength = Length - Offset;
- if (Buffer.size() > MaxLength)
- Buffer = Buffer.slice(0, MaxLength);
- return Error::success();
- }
-
- Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const {
- if (Data.size() + Offset > Length)
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
- return Stream->writeBytes(ViewOffset + Offset, Data);
- }
-
- uint32_t getLength() const { return Length; }
-
- Error commit() const { return Stream->commit(); }
-
- StreamRef drop_front(uint32_t N) const {
- if (!Stream)
- return StreamRef();
-
- N = std::min(N, Length);
- return StreamRef(*Stream, ViewOffset + N, Length - N);
- }
-
- StreamRef keep_front(uint32_t N) const {
- if (!Stream)
- return StreamRef();
- N = std::min(N, Length);
- return StreamRef(*Stream, ViewOffset, N);
- }
-
- StreamRef slice(uint32_t Offset, uint32_t Len) const {
- return drop_front(Offset).keep_front(Len);
- }
-
- bool operator==(const StreamRef &Other) const {
- if (Stream != Other.Stream)
- return false;
- if (ViewOffset != Other.ViewOffset)
- return false;
- if (Length != Other.Length)
- return false;
- return true;
- }
-
-private:
- const StreamInterface *Stream;
- uint32_t ViewOffset;
- uint32_t Length;
-};
-}
-}
-
-#endif // LLVM_DEBUGINFO_CODEVIEW_STREAMREF_H
diff --git a/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h b/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
new file mode 100644
index 000000000000..13c2bb14ecf5
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/SymbolDeserializer.h
@@ -0,0 +1,74 @@
+//===- SymbolDeserializer.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDESERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDESERIALIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
+#include "llvm/DebugInfo/MSF/ByteStream.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+class SymbolVisitorDelegate;
+class SymbolDeserializer : public SymbolVisitorCallbacks {
+ struct MappingInfo {
+ explicit MappingInfo(ArrayRef<uint8_t> RecordData)
+ : Stream(RecordData), Reader(Stream), Mapping(Reader) {}
+
+ msf::ByteStream Stream;
+ msf::StreamReader Reader;
+ SymbolRecordMapping Mapping;
+ };
+
+public:
+ explicit SymbolDeserializer(SymbolVisitorDelegate *Delegate)
+ : Delegate(Delegate) {}
+
+ Error visitSymbolBegin(CVSymbol &Record) override {
+ assert(!Mapping && "Already in a symbol mapping!");
+ Mapping = llvm::make_unique<MappingInfo>(Record.content());
+ return Mapping->Mapping.visitSymbolBegin(Record);
+ }
+ Error visitSymbolEnd(CVSymbol &Record) override {
+ assert(Mapping && "Not in a symbol mapping!");
+ auto EC = Mapping->Mapping.visitSymbolEnd(Record);
+ Mapping.reset();
+ return EC;
+ }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownRecord(CVSymbol &CVR, Name &Record) override { \
+ return visitKnownRecordImpl(CVR, Record); \
+ }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "CVSymbolTypes.def"
+
+private:
+ template <typename T> Error visitKnownRecordImpl(CVSymbol &CVR, T &Record) {
+
+ Record.RecordOffset =
+ Delegate ? Delegate->getRecordOffset(Mapping->Reader) : 0;
+ if (auto EC = Mapping->Mapping.visitKnownRecord(CVR, Record))
+ return EC;
+ return Error::success();
+ }
+
+ SymbolVisitorDelegate *Delegate;
+ std::unique_ptr<MappingInfo> Mapping;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h b/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h
index 30b0a40451cb..823636c398de 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolDumpDelegate.h
@@ -10,20 +10,17 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
-#include "SymbolVisitorDelegate.h"
-
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
-
-#include <stdint.h>
+#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
+#include <cstdint>
namespace llvm {
-
namespace codeview {
class SymbolDumpDelegate : public SymbolVisitorDelegate {
public:
- virtual ~SymbolDumpDelegate() {}
+ ~SymbolDumpDelegate() override = default;
virtual void printRelocatedField(StringRef Label, uint32_t RelocOffset,
uint32_t Offset,
@@ -31,6 +28,7 @@ public:
virtual void printBinaryBlockWithRelocs(StringRef Label,
ArrayRef<uint8_t> Block) = 0;
};
+
} // end namespace codeview
} // end namespace llvm
diff --git a/include/llvm/DebugInfo/CodeView/SymbolDumper.h b/include/llvm/DebugInfo/CodeView/SymbolDumper.h
index 648e40f55810..eb63f7895a1e 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolDumper.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolDumper.h
@@ -35,11 +35,11 @@ public:
/// and true otherwise. This should be called in order, since the dumper
/// maintains state about previous records which are necessary for cross
/// type references.
- bool dump(const CVRecord<SymbolKind> &Record);
+ Error dump(CVRecord<SymbolKind> &Record);
/// Dumps the type records in Data. Returns false if there was a type stream
/// parse error, and true otherwise.
- bool dump(const CVSymbolArray &Symbols);
+ Error dump(const CVSymbolArray &Symbols);
private:
ScopedPrinter &W;
diff --git a/include/llvm/DebugInfo/CodeView/SymbolRecord.h b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
index 77e894fba4a9..57772d39e972 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolRecord.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
@@ -11,23 +11,24 @@
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
+#include <cstddef>
+#include <cstdint>
+#include <vector>
namespace llvm {
namespace codeview {
-using llvm::support::ulittle16_t;
-using llvm::support::ulittle32_t;
-using llvm::support::little32_t;
-
class SymbolRecord {
protected:
explicit SymbolRecord(SymbolRecordKind Kind) : Kind(Kind) {}
@@ -42,216 +43,119 @@ private:
// S_GPROC32, S_LPROC32, S_GPROC32_ID, S_LPROC32_ID, S_LPROC32_DPC or
// S_LPROC32_DPC_ID
class ProcSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t PtrParent;
- ulittle32_t PtrEnd;
- ulittle32_t PtrNext;
- ulittle32_t CodeSize;
- ulittle32_t DbgStart;
- ulittle32_t DbgEnd;
- TypeIndex FunctionType;
- ulittle32_t CodeOffset;
- ulittle16_t Segment;
- uint8_t Flags; // ProcSymFlags enum
- // Name: The null-terminated name follows.
- };
-
- ProcSym(SymbolRecordKind Kind, uint32_t RecordOffset, const Hdr *H,
- StringRef Name)
- : SymbolRecord(Kind), RecordOffset(RecordOffset), Header(*H), Name(Name) {
- }
+ static constexpr uint32_t RelocationOffset = 32;
- static ErrorOr<ProcSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return ProcSym(Kind, RecordOffset, H, Name);
- }
+public:
+ explicit ProcSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ ProcSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+ : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, CodeOffset);
- }
-
- uint32_t RecordOffset;
- Hdr Header;
+ return RecordOffset + RelocationOffset;
+ }
+
+ uint32_t Parent = 0;
+ uint32_t End = 0;
+ uint32_t Next = 0;
+ uint32_t CodeSize = 0;
+ uint32_t DbgStart = 0;
+ uint32_t DbgEnd = 0;
+ TypeIndex FunctionType;
+ uint32_t CodeOffset = 0;
+ uint16_t Segment = 0;
+ ProcSymFlags Flags = ProcSymFlags::None;
StringRef Name;
+
+ uint32_t RecordOffset = 0;
};
// S_THUNK32
class Thunk32Sym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Parent;
- ulittle32_t End;
- ulittle32_t Next;
- ulittle32_t Off;
- ulittle16_t Seg;
- ulittle16_t Len;
- uint8_t Ord; // ThunkOrdinal enumeration
- // Name: The null-terminated name follows.
- // Variant portion of thunk
- };
-
- Thunk32Sym(SymbolRecordKind Kind, uint32_t RecordOffset, const Hdr *H,
- StringRef Name, ArrayRef<uint8_t> VariantData)
- : SymbolRecord(Kind), RecordOffset(RecordOffset), Header(*H), Name(Name),
- VariantData(VariantData) {}
-
- static ErrorOr<Thunk32Sym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- ArrayRef<uint8_t> VariantData;
-
- CV_DESERIALIZE(Data, H, Name, CV_ARRAY_FIELD_TAIL(VariantData));
-
- return Thunk32Sym(Kind, RecordOffset, H, Name, VariantData);
- }
+ explicit Thunk32Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ Thunk32Sym(SymbolRecordKind Kind, uint32_t RecordOffset)
+ : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
- uint32_t RecordOffset;
- Hdr Header;
+ uint32_t Parent;
+ uint32_t End;
+ uint32_t Next;
+ uint32_t Offset;
+ uint16_t Segment;
+ uint16_t Length;
+ ThunkOrdinal Thunk;
StringRef Name;
ArrayRef<uint8_t> VariantData;
+
+ uint32_t RecordOffset;
};
// S_TRAMPOLINE
class TrampolineSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle16_t Type; // TrampolineType enum
- ulittle16_t Size;
- ulittle32_t ThunkOff;
- ulittle32_t TargetOff;
- ulittle16_t ThunkSection;
- ulittle16_t TargetSection;
- };
-
- TrampolineSym(SymbolRecordKind Kind, uint32_t RecordOffset, const Hdr *H)
- : SymbolRecord(Kind), RecordOffset(RecordOffset), Header(*H) {}
-
- static ErrorOr<TrampolineSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
+ explicit TrampolineSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ TrampolineSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+ : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
- CV_DESERIALIZE(Data, H);
-
- return TrampolineSym(Kind, RecordOffset, H);
- }
+ TrampolineType Type;
+ uint16_t Size;
+ uint32_t ThunkOffset;
+ uint32_t TargetOffset;
+ uint16_t ThunkSection;
+ uint16_t TargetSection;
uint32_t RecordOffset;
- Hdr Header;
};
// S_SECTION
class SectionSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle16_t SectionNumber;
- uint8_t Alignment;
- uint8_t Reserved; // Must be 0
- ulittle32_t Rva;
- ulittle32_t Length;
- ulittle32_t Characteristics;
- // Name: The null-terminated name follows.
- };
-
- SectionSym(SymbolRecordKind Kind, uint32_t RecordOffset, const Hdr *H,
- StringRef Name)
- : SymbolRecord(Kind), RecordOffset(RecordOffset), Header(*H), Name(Name) {
- }
-
- static ErrorOr<SectionSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
-
- CV_DESERIALIZE(Data, H, Name);
+ explicit SectionSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ SectionSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+ : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
- return SectionSym(Kind, RecordOffset, H, Name);
- }
+ uint16_t SectionNumber;
+ uint8_t Alignment;
+ uint32_t Rva;
+ uint32_t Length;
+ uint32_t Characteristics;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_COFFGROUP
class CoffGroupSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Size;
- ulittle32_t Characteristics;
- ulittle32_t Offset;
- ulittle16_t Segment;
- // Name: The null-terminated name follows.
- };
-
- CoffGroupSym(SymbolRecordKind Kind, uint32_t RecordOffset, const Hdr *H,
- StringRef Name)
- : SymbolRecord(Kind), RecordOffset(RecordOffset), Header(*H), Name(Name) {
- }
-
- static ErrorOr<CoffGroupSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
-
- CV_DESERIALIZE(Data, H, Name);
+ explicit CoffGroupSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ CoffGroupSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+ : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
- return CoffGroupSym(Kind, RecordOffset, H, Name);
- }
+ uint32_t Size;
+ uint32_t Characteristics;
+ uint32_t Offset;
+ uint16_t Segment;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
class ScopeEndSym : public SymbolRecord {
public:
+ explicit ScopeEndSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
ScopeEndSym(SymbolRecordKind Kind, uint32_t RecordOffset)
: SymbolRecord(Kind), RecordOffset(RecordOffset) {}
- static ErrorOr<ScopeEndSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- return ScopeEndSym(Kind, RecordOffset);
- }
uint32_t RecordOffset;
};
class CallerSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Count;
- };
-
- CallerSym(SymbolRecordKind Kind, uint32_t RecordOffset, const Hdr *Header,
- ArrayRef<TypeIndex> Indices)
- : SymbolRecord(Kind), RecordOffset(RecordOffset), Header(*Header),
- Indices(Indices) {}
-
- static ErrorOr<CallerSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *Header;
- ArrayRef<TypeIndex> Indices;
-
- CV_DESERIALIZE(Data, Header, CV_ARRAY_FIELD_N(Indices, Header->Count));
-
- return CallerSym(Kind, RecordOffset, Header, Indices);
- }
+ explicit CallerSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ CallerSym(SymbolRecordKind Kind, uint32_t RecordOffset)
+ : SymbolRecord(Kind), RecordOffset(RecordOffset) {}
+ std::vector<TypeIndex> Indices;
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<TypeIndex> Indices;
};
struct BinaryAnnotationIterator {
@@ -264,7 +168,7 @@ struct BinaryAnnotationIterator {
};
BinaryAnnotationIterator(ArrayRef<uint8_t> Annotations) : Data(Annotations) {}
- BinaryAnnotationIterator() {}
+ BinaryAnnotationIterator() = default;
BinaryAnnotationIterator(const BinaryAnnotationIterator &Other)
: Data(Other.Data) {}
@@ -435,1018 +339,608 @@ private:
// S_INLINESITE
class InlineSiteSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t PtrParent;
- ulittle32_t PtrEnd;
- TypeIndex Inlinee;
- // BinaryAnnotations
- };
-
- InlineSiteSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<uint8_t> Annotations)
+ explicit InlineSiteSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ InlineSiteSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::InlineSiteSym),
- RecordOffset(RecordOffset), Header(*H), Annotations(Annotations) {}
-
- static ErrorOr<InlineSiteSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<uint8_t> Annotations;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Annotations));
-
- return InlineSiteSym(RecordOffset, H, Annotations);
- }
+ RecordOffset(RecordOffset) {}
llvm::iterator_range<BinaryAnnotationIterator> annotations() const {
- return llvm::make_range(BinaryAnnotationIterator(Annotations),
+ return llvm::make_range(BinaryAnnotationIterator(AnnotationData),
BinaryAnnotationIterator());
}
- uint32_t RecordOffset;
- Hdr Header;
+ uint32_t Parent;
+ uint32_t End;
+ TypeIndex Inlinee;
+ std::vector<uint8_t> AnnotationData;
-private:
- ArrayRef<uint8_t> Annotations;
+ uint32_t RecordOffset;
};
// S_PUB32
class PublicSym32 : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Index; // Type index, or Metadata token if a managed symbol
- ulittle32_t Off;
- ulittle16_t Seg;
- // Name: The null-terminated name follows.
- };
-
- PublicSym32(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::PublicSym32), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
-
- static ErrorOr<PublicSym32> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
+ explicit PublicSym32(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit PublicSym32(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::PublicSym32),
+ RecordOffset(RecordOffset) {}
- return PublicSym32(RecordOffset, H, Name);
- }
+ uint32_t Index;
+ uint32_t Offset;
+ uint16_t Segment;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_REGISTER
class RegisterSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Index; // Type index or Metadata token
- ulittle16_t Register; // RegisterId enumeration
- // Name: The null-terminated name follows.
- };
-
- RegisterSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::RegisterSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
+ explicit RegisterSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ RegisterSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::RegisterSym),
+ RecordOffset(RecordOffset) {}
- static ErrorOr<RegisterSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return RegisterSym(RecordOffset, H, Name);
- }
+ uint32_t Index;
+ RegisterId Register;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_PROCREF, S_LPROCREF
class ProcRefSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t SumName; // SUC of the name (?)
- ulittle32_t SymOffset; // Offset of actual symbol in $$Symbols
- ulittle16_t Mod; // Module containing the actual symbol
- // Name: The null-terminated name follows.
- };
-
- ProcRefSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::ProcRefSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
-
- static ErrorOr<ProcRefSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return ProcRefSym(RecordOffset, H, Name);
+ explicit ProcRefSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit ProcRefSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::ProcRefSym), RecordOffset(RecordOffset) {
}
- uint32_t RecordOffset;
- Hdr Header;
+ uint32_t SumName;
+ uint32_t SymOffset;
+ uint16_t Module;
StringRef Name;
+
+ uint32_t RecordOffset;
};
// S_LOCAL
class LocalSym : public SymbolRecord {
public:
- struct Hdr {
- TypeIndex Type;
- ulittle16_t Flags; // LocalSymFlags enum
- // Name: The null-terminated name follows.
- };
-
- LocalSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::LocalSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
+ explicit LocalSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit LocalSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::LocalSym), RecordOffset(RecordOffset) {}
- static ErrorOr<LocalSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return LocalSym(RecordOffset, H, Name);
- }
+ TypeIndex Type;
+ LocalSymFlags Flags;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
struct LocalVariableAddrRange {
- ulittle32_t OffsetStart;
- ulittle16_t ISectStart;
- ulittle16_t Range;
+ uint32_t OffsetStart;
+ uint16_t ISectStart;
+ uint16_t Range;
};
struct LocalVariableAddrGap {
- ulittle16_t GapStartOffset;
- ulittle16_t Range;
+ uint16_t GapStartOffset;
+ uint16_t Range;
};
enum : uint16_t { MaxDefRange = 0xf000 };
// S_DEFRANGE
class DefRangeSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t Program;
- LocalVariableAddrRange Range;
- // LocalVariableAddrGap Gaps[];
- };
+ static constexpr uint32_t RelocationOffset = 8;
- DefRangeSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<LocalVariableAddrGap> Gaps)
- : SymbolRecord(SymbolRecordKind::DefRangeSym), RecordOffset(RecordOffset),
- Header(*H), Gaps(Gaps) {}
-
- static ErrorOr<DefRangeSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<LocalVariableAddrGap> Gaps;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Gaps));
-
- return DefRangeSym(RecordOffset, H, Gaps);
- }
+public:
+ explicit DefRangeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit DefRangeSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::DefRangeSym),
+ RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, Range);
+ return RecordOffset + RelocationOffset;
}
+ uint32_t Program;
+ LocalVariableAddrRange Range;
+ std::vector<LocalVariableAddrGap> Gaps;
+
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<LocalVariableAddrGap> Gaps;
};
// S_DEFRANGE_SUBFIELD
class DefRangeSubfieldSym : public SymbolRecord {
+ static constexpr uint32_t RelocationOffset = 12;
+
public:
- struct Hdr {
- ulittle32_t Program;
- ulittle16_t OffsetInParent;
- LocalVariableAddrRange Range;
- // LocalVariableAddrGap Gaps[];
- };
- DefRangeSubfieldSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<LocalVariableAddrGap> Gaps)
+ explicit DefRangeSubfieldSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ DefRangeSubfieldSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::DefRangeSubfieldSym),
- RecordOffset(RecordOffset), Header(*H), Gaps(Gaps) {}
-
- static ErrorOr<DefRangeSubfieldSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<LocalVariableAddrGap> Gaps;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Gaps));
-
- return DefRangeSubfieldSym(RecordOffset, H, Gaps);
- }
+ RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, Range);
+ return RecordOffset + RelocationOffset;
}
+ uint32_t Program;
+ uint16_t OffsetInParent;
+ LocalVariableAddrRange Range;
+ std::vector<LocalVariableAddrGap> Gaps;
+
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<LocalVariableAddrGap> Gaps;
};
// S_DEFRANGE_REGISTER
class DefRangeRegisterSym : public SymbolRecord {
public:
- struct Hdr {
+ struct Header {
ulittle16_t Register;
ulittle16_t MayHaveNoName;
- LocalVariableAddrRange Range;
- // LocalVariableAddrGap Gaps[];
};
-
- DefRangeRegisterSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<LocalVariableAddrGap> Gaps)
+ explicit DefRangeRegisterSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ DefRangeRegisterSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::DefRangeRegisterSym),
- RecordOffset(RecordOffset), Header(*H), Gaps(Gaps) {}
-
- DefRangeRegisterSym(uint16_t Register, uint16_t MayHaveNoName,
- uint32_t OffsetStart, uint16_t ISectStart, uint16_t Range,
- ArrayRef<LocalVariableAddrGap> Gaps)
- : SymbolRecord(SymbolRecordKind::DefRangeRegisterSym), RecordOffset(0),
- Gaps(Gaps) {
- Header.Register = Register;
- Header.MayHaveNoName = MayHaveNoName;
- Header.Range.OffsetStart = OffsetStart;
- Header.Range.ISectStart = ISectStart;
- Header.Range.Range = Range;
- }
+ RecordOffset(RecordOffset) {}
- static ErrorOr<DefRangeRegisterSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<LocalVariableAddrGap> Gaps;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Gaps));
+ uint32_t getRelocationOffset() const { return RecordOffset + sizeof(Header); }
- return DefRangeRegisterSym(RecordOffset, H, Gaps);
- }
-
- uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, Range);
- }
+ Header Hdr;
+ LocalVariableAddrRange Range;
+ std::vector<LocalVariableAddrGap> Gaps;
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<LocalVariableAddrGap> Gaps;
};
// S_DEFRANGE_SUBFIELD_REGISTER
class DefRangeSubfieldRegisterSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle16_t Register; // Register to which the variable is relative
+ struct Header {
+ ulittle16_t Register;
ulittle16_t MayHaveNoName;
ulittle32_t OffsetInParent;
- LocalVariableAddrRange Range;
- // LocalVariableAddrGap Gaps[];
};
-
- DefRangeSubfieldRegisterSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<LocalVariableAddrGap> Gaps)
+ explicit DefRangeSubfieldRegisterSym(SymbolRecordKind Kind)
+ : SymbolRecord(Kind) {}
+ DefRangeSubfieldRegisterSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::DefRangeSubfieldRegisterSym),
- RecordOffset(RecordOffset), Header(*H), Gaps(Gaps) {}
-
- DefRangeSubfieldRegisterSym(uint16_t Register, uint16_t MayHaveNoName,
- uint32_t OffsetInParent,
- ArrayRef<LocalVariableAddrGap> Gaps)
- : SymbolRecord(SymbolRecordKind::DefRangeSubfieldRegisterSym),
- RecordOffset(0), Gaps(Gaps) {
- Header.Register = Register;
- Header.MayHaveNoName = MayHaveNoName;
- Header.OffsetInParent = OffsetInParent;
- }
+ RecordOffset(RecordOffset) {}
- static ErrorOr<DefRangeSubfieldRegisterSym>
- deserialize(SymbolRecordKind Kind, uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<LocalVariableAddrGap> Gaps;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Gaps));
+ uint32_t getRelocationOffset() const { return RecordOffset + sizeof(Header); }
- return DefRangeSubfieldRegisterSym(RecordOffset, H, Gaps);
- }
-
- uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, Range);
- }
+ Header Hdr;
+ LocalVariableAddrRange Range;
+ std::vector<LocalVariableAddrGap> Gaps;
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<LocalVariableAddrGap> Gaps;
};
// S_DEFRANGE_FRAMEPOINTER_REL
class DefRangeFramePointerRelSym : public SymbolRecord {
-public:
- struct Hdr {
- little32_t Offset; // Offset from the frame pointer register
- LocalVariableAddrRange Range;
- // LocalVariableAddrGap Gaps[];
- };
+ static constexpr uint32_t RelocationOffset = 8;
- DefRangeFramePointerRelSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<LocalVariableAddrGap> Gaps)
+public:
+ explicit DefRangeFramePointerRelSym(SymbolRecordKind Kind)
+ : SymbolRecord(Kind) {}
+ DefRangeFramePointerRelSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::DefRangeFramePointerRelSym),
- RecordOffset(RecordOffset), Header(*H), Gaps(Gaps) {}
-
- static ErrorOr<DefRangeFramePointerRelSym>
- deserialize(SymbolRecordKind Kind, uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<LocalVariableAddrGap> Gaps;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Gaps));
-
- return DefRangeFramePointerRelSym(RecordOffset, H, Gaps);
- }
+ RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, Range);
+ return RecordOffset + RelocationOffset;
}
+ int32_t Offset;
+ LocalVariableAddrRange Range;
+ std::vector<LocalVariableAddrGap> Gaps;
+
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<LocalVariableAddrGap> Gaps;
};
// S_DEFRANGE_REGISTER_REL
class DefRangeRegisterRelSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle16_t BaseRegister;
+ struct Header {
+ ulittle16_t Register;
ulittle16_t Flags;
little32_t BasePointerOffset;
- LocalVariableAddrRange Range;
- // LocalVariableAddrGap Gaps[];
};
-
- DefRangeRegisterRelSym(uint32_t RecordOffset, const Hdr *H,
- ArrayRef<LocalVariableAddrGap> Gaps)
+ explicit DefRangeRegisterRelSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit DefRangeRegisterRelSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::DefRangeRegisterRelSym),
- RecordOffset(RecordOffset), Header(*H), Gaps(Gaps) {}
-
- DefRangeRegisterRelSym(uint16_t BaseRegister, uint16_t Flags,
- int32_t BasePointerOffset, uint32_t OffsetStart,
- uint16_t ISectStart, uint16_t Range,
- ArrayRef<LocalVariableAddrGap> Gaps)
- : SymbolRecord(SymbolRecordKind::DefRangeRegisterRelSym), RecordOffset(0),
- Gaps(Gaps) {
- Header.BaseRegister = BaseRegister;
- Header.Flags = Flags;
- Header.BasePointerOffset = BasePointerOffset;
- Header.Range.OffsetStart = OffsetStart;
- Header.Range.ISectStart = ISectStart;
- Header.Range.Range = Range;
- }
+ RecordOffset(RecordOffset) {}
- static ErrorOr<DefRangeRegisterRelSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- ArrayRef<LocalVariableAddrGap> Gaps;
- CV_DESERIALIZE(Data, H, CV_ARRAY_FIELD_TAIL(Gaps));
+ // The flags implement this notional bitfield:
+ // uint16_t IsSubfield : 1;
+ // uint16_t Padding : 3;
+ // uint16_t OffsetInParent : 12;
+ enum : uint16_t {
+ IsSubfieldFlag = 1,
+ OffsetInParentShift = 4,
+ };
- return DefRangeRegisterRelSym(RecordOffset, H, Gaps);
- }
+ bool hasSpilledUDTMember() const { return Hdr.Flags & IsSubfieldFlag; }
+ uint16_t offsetInParent() const { return Hdr.Flags >> OffsetInParentShift; }
- bool hasSpilledUDTMember() const { return Header.Flags & 1; }
- uint16_t offsetInParent() const { return Header.Flags >> 4; }
+ uint32_t getRelocationOffset() const { return RecordOffset + sizeof(Header); }
- uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, Range);
- }
+ Header Hdr;
+ LocalVariableAddrRange Range;
+ std::vector<LocalVariableAddrGap> Gaps;
uint32_t RecordOffset;
- Hdr Header;
- ArrayRef<LocalVariableAddrGap> Gaps;
};
// S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE
class DefRangeFramePointerRelFullScopeSym : public SymbolRecord {
public:
- struct Hdr {
- little32_t Offset; // Offset from the frame pointer register
- };
-
- DefRangeFramePointerRelFullScopeSym(uint32_t RecordOffset, const Hdr *H)
+ explicit DefRangeFramePointerRelFullScopeSym(SymbolRecordKind Kind)
+ : SymbolRecord(Kind) {}
+ explicit DefRangeFramePointerRelFullScopeSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::DefRangeFramePointerRelFullScopeSym),
- RecordOffset(RecordOffset), Header(*H) {}
-
- static ErrorOr<DefRangeFramePointerRelFullScopeSym>
- deserialize(SymbolRecordKind Kind, uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- CV_DESERIALIZE(Data, H);
+ RecordOffset(RecordOffset) {}
- return DefRangeFramePointerRelFullScopeSym(RecordOffset, H);
- }
+ int32_t Offset;
uint32_t RecordOffset;
- Hdr Header;
};
// S_BLOCK32
class BlockSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t PtrParent;
- ulittle32_t PtrEnd;
- ulittle32_t CodeSize;
- ulittle32_t CodeOffset;
- ulittle16_t Segment;
- // Name: The null-terminated name follows.
- };
+ static constexpr uint32_t RelocationOffset = 16;
- BlockSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::BlockSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
-
- static ErrorOr<BlockSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return BlockSym(RecordOffset, H, Name);
- }
+public:
+ explicit BlockSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit BlockSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::BlockSym), RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, CodeOffset);
+ return RecordOffset + RelocationOffset;
}
- uint32_t RecordOffset;
- Hdr Header;
+ uint32_t Parent;
+ uint32_t End;
+ uint32_t CodeSize;
+ uint32_t CodeOffset;
+ uint16_t Segment;
StringRef Name;
+
+ uint32_t RecordOffset;
};
// S_LABEL32
class LabelSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t CodeOffset;
- ulittle16_t Segment;
- uint8_t Flags; // CV_PROCFLAGS
- // Name: The null-terminated name follows.
- };
-
- LabelSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::LabelSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
+ static constexpr uint32_t RelocationOffset = 4;
- static ErrorOr<LabelSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return LabelSym(RecordOffset, H, Name);
- }
+public:
+ explicit LabelSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit LabelSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::LabelSym), RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, CodeOffset);
+ return RecordOffset + RelocationOffset;
}
- uint32_t RecordOffset;
- Hdr Header;
+ uint32_t CodeOffset;
+ uint16_t Segment;
+ ProcSymFlags Flags;
StringRef Name;
+
+ uint32_t RecordOffset;
};
// S_OBJNAME
class ObjNameSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Signature;
- // Name: The null-terminated name follows.
- };
-
- ObjNameSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::ObjNameSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
-
- static ErrorOr<ObjNameSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return ObjNameSym(RecordOffset, H, Name);
+ explicit ObjNameSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ ObjNameSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::ObjNameSym), RecordOffset(RecordOffset) {
}
- uint32_t RecordOffset;
- Hdr Header;
+ uint32_t Signature;
StringRef Name;
+
+ uint32_t RecordOffset;
};
// S_ENVBLOCK
class EnvBlockSym : public SymbolRecord {
public:
- struct Hdr {
- uint8_t Reserved;
- // Sequence of zero terminated strings.
- };
-
- EnvBlockSym(uint32_t RecordOffset, const Hdr *H,
- const std::vector<StringRef> &Fields)
- : SymbolRecord(SymbolRecordKind::EnvBlockSym), RecordOffset(RecordOffset),
- Header(*H), Fields(Fields) {}
-
- static ErrorOr<EnvBlockSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- std::vector<StringRef> Fields;
- CV_DESERIALIZE(Data, H, CV_STRING_ARRAY_NULL_TERM(Fields));
+ explicit EnvBlockSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ EnvBlockSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::EnvBlockSym),
+ RecordOffset(RecordOffset) {}
- return EnvBlockSym(RecordOffset, H, Fields);
- }
+ std::vector<StringRef> Fields;
uint32_t RecordOffset;
- Hdr Header;
- std::vector<StringRef> Fields;
};
// S_EXPORT
class ExportSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle16_t Ordinal;
- ulittle16_t Flags; // ExportFlags
- // Name: The null-terminated name follows.
- };
+ explicit ExportSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ ExportSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::ExportSym), RecordOffset(RecordOffset) {}
- ExportSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::ExportSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
-
- static ErrorOr<ExportSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return ExportSym(RecordOffset, H, Name);
- }
+ uint16_t Ordinal;
+ ExportFlags Flags;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_FILESTATIC
class FileStaticSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Index; // Type Index
- ulittle32_t ModFilenameOffset; // Index of mod filename in string table
- ulittle16_t Flags; // LocalSymFlags enum
- // Name: The null-terminated name follows.
- };
-
- FileStaticSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
+ explicit FileStaticSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ FileStaticSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::FileStaticSym),
- RecordOffset(RecordOffset), Header(*H), Name(Name) {}
-
- static ErrorOr<FileStaticSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
+ RecordOffset(RecordOffset) {}
- return FileStaticSym(RecordOffset, H, Name);
- }
+ uint32_t Index;
+ uint32_t ModFilenameOffset;
+ LocalSymFlags Flags;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_COMPILE2
class Compile2Sym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t flags; // CompileSym2Flags enum
- uint8_t getLanguage() const { return flags & 0xFF; }
- unsigned short Machine; // CPUType enum
- unsigned short VersionFrontendMajor;
- unsigned short VersionFrontendMinor;
- unsigned short VersionFrontendBuild;
- unsigned short VersionBackendMajor;
- unsigned short VersionBackendMinor;
- unsigned short VersionBackendBuild;
- // Version: The null-terminated version string follows.
- // Optional block of zero terminated strings terminated with a double zero.
- };
-
- Compile2Sym(uint32_t RecordOffset, const Hdr *H, StringRef Version)
- : SymbolRecord(SymbolRecordKind::Compile2Sym), RecordOffset(RecordOffset),
- Header(*H), Version(Version) {}
-
- static ErrorOr<Compile2Sym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Version;
- CV_DESERIALIZE(Data, H, Version);
+ explicit Compile2Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ Compile2Sym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::Compile2Sym),
+ RecordOffset(RecordOffset) {}
+
+ CompileSym2Flags Flags;
+ CPUType Machine;
+ uint16_t VersionFrontendMajor;
+ uint16_t VersionFrontendMinor;
+ uint16_t VersionFrontendBuild;
+ uint16_t VersionBackendMajor;
+ uint16_t VersionBackendMinor;
+ uint16_t VersionBackendBuild;
+ StringRef Version;
+ std::vector<StringRef> ExtraStrings;
- return Compile2Sym(RecordOffset, H, Version);
- }
+ uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
+ uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
uint32_t RecordOffset;
- Hdr Header;
- StringRef Version;
};
// S_COMPILE3
class Compile3Sym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t flags; // CompileSym3Flags enum
- uint8_t getLanguage() const { return flags & 0xff; }
- ulittle16_t Machine; // CPUType enum
- ulittle16_t VersionFrontendMajor;
- ulittle16_t VersionFrontendMinor;
- ulittle16_t VersionFrontendBuild;
- ulittle16_t VersionFrontendQFE;
- ulittle16_t VersionBackendMajor;
- ulittle16_t VersionBackendMinor;
- ulittle16_t VersionBackendBuild;
- ulittle16_t VersionBackendQFE;
- // VersionString: The null-terminated version string follows.
- };
-
- Compile3Sym(uint32_t RecordOffset, const Hdr *H, StringRef Version)
- : SymbolRecord(SymbolRecordKind::Compile3Sym), RecordOffset(RecordOffset),
- Header(*H), Version(Version) {}
+ explicit Compile3Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ Compile3Sym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::Compile3Sym),
+ RecordOffset(RecordOffset) {}
+
+ CompileSym3Flags Flags;
+ CPUType Machine;
+ uint16_t VersionFrontendMajor;
+ uint16_t VersionFrontendMinor;
+ uint16_t VersionFrontendBuild;
+ uint16_t VersionFrontendQFE;
+ uint16_t VersionBackendMajor;
+ uint16_t VersionBackendMinor;
+ uint16_t VersionBackendBuild;
+ uint16_t VersionBackendQFE;
+ StringRef Version;
- static ErrorOr<Compile3Sym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Version;
- CV_DESERIALIZE(Data, H, Version);
-
- return Compile3Sym(RecordOffset, H, Version);
- }
+ uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
+ uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
uint32_t RecordOffset;
- Hdr Header;
- StringRef Version;
};
// S_FRAMEPROC
class FrameProcSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t TotalFrameBytes;
- ulittle32_t PaddingFrameBytes;
- ulittle32_t OffsetToPadding;
- ulittle32_t BytesOfCalleeSavedRegisters;
- ulittle32_t OffsetOfExceptionHandler;
- ulittle16_t SectionIdOfExceptionHandler;
- ulittle32_t Flags;
- };
-
- FrameProcSym(uint32_t RecordOffset, const Hdr *H)
+ explicit FrameProcSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit FrameProcSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::FrameProcSym),
- RecordOffset(RecordOffset), Header(*H) {}
+ RecordOffset(RecordOffset) {}
- static ErrorOr<FrameProcSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- CV_DESERIALIZE(Data, H);
-
- return FrameProcSym(RecordOffset, H);
- }
+ uint32_t TotalFrameBytes;
+ uint32_t PaddingFrameBytes;
+ uint32_t OffsetToPadding;
+ uint32_t BytesOfCalleeSavedRegisters;
+ uint32_t OffsetOfExceptionHandler;
+ uint16_t SectionIdOfExceptionHandler;
+ FrameProcedureOptions Flags;
uint32_t RecordOffset;
- Hdr Header;
};
// S_CALLSITEINFO
class CallSiteInfoSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t CodeOffset;
- ulittle16_t Segment;
- ulittle16_t Reserved;
- TypeIndex Type;
- };
+ static constexpr uint32_t RelocationOffset = 4;
- CallSiteInfoSym(uint32_t RecordOffset, const Hdr *H)
- : SymbolRecord(SymbolRecordKind::CallSiteInfoSym),
- RecordOffset(RecordOffset), Header(*H) {}
-
- static ErrorOr<CallSiteInfoSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- CV_DESERIALIZE(Data, H);
-
- return CallSiteInfoSym(RecordOffset, H);
- }
+public:
+ explicit CallSiteInfoSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit CallSiteInfoSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::CallSiteInfoSym) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, CodeOffset);
+ return RecordOffset + RelocationOffset;
}
+ uint32_t CodeOffset;
+ uint16_t Segment;
+ TypeIndex Type;
+
uint32_t RecordOffset;
- Hdr Header;
};
// S_HEAPALLOCSITE
class HeapAllocationSiteSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t CodeOffset;
- ulittle16_t Segment;
- ulittle16_t CallInstructionSize;
- TypeIndex Type;
- };
+ static constexpr uint32_t RelocationOffset = 4;
- HeapAllocationSiteSym(uint32_t RecordOffset, const Hdr *H)
+public:
+ explicit HeapAllocationSiteSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit HeapAllocationSiteSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::HeapAllocationSiteSym),
- RecordOffset(RecordOffset), Header(*H) {}
-
- static ErrorOr<HeapAllocationSiteSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- CV_DESERIALIZE(Data, H);
-
- return HeapAllocationSiteSym(RecordOffset, H);
- }
+ RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, CodeOffset);
+ return RecordOffset + RelocationOffset;
}
+ uint32_t CodeOffset;
+ uint16_t Segment;
+ uint16_t CallInstructionSize;
+ TypeIndex Type;
+
uint32_t RecordOffset;
- Hdr Header;
};
// S_FRAMECOOKIE
class FrameCookieSym : public SymbolRecord {
-public:
- struct Hdr {
- ulittle32_t CodeOffset;
- ulittle16_t Register;
- uint8_t CookieKind;
- uint8_t Flags;
- };
-
- FrameCookieSym(uint32_t RecordOffset, const Hdr *H)
- : SymbolRecord(SymbolRecordKind::FrameCookieSym),
- RecordOffset(RecordOffset), Header(*H) {}
+ static constexpr uint32_t RelocationOffset = 4;
- static ErrorOr<FrameCookieSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- CV_DESERIALIZE(Data, H);
-
- return FrameCookieSym(RecordOffset, H);
- }
+public:
+ explicit FrameCookieSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit FrameCookieSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::FrameCookieSym) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, CodeOffset);
+ return RecordOffset + RelocationOffset;
}
+ uint32_t CodeOffset;
+ uint16_t Register;
+ uint8_t CookieKind;
+ uint8_t Flags;
+
uint32_t RecordOffset;
- Hdr Header;
};
// S_UDT, S_COBOLUDT
class UDTSym : public SymbolRecord {
public:
- struct Hdr {
- TypeIndex Type; // Type of the UDT
- // Name: The null-terminated name follows.
- };
-
- UDTSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::UDTSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
+ explicit UDTSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit UDTSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::UDTSym) {}
- static ErrorOr<UDTSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return UDTSym(RecordOffset, H, Name);
- }
+ TypeIndex Type;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_BUILDINFO
class BuildInfoSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t BuildId;
- };
-
- BuildInfoSym(uint32_t RecordOffset, const Hdr *H)
+ explicit BuildInfoSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ BuildInfoSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::BuildInfoSym),
- RecordOffset(RecordOffset), Header(*H) {}
+ RecordOffset(RecordOffset) {}
- static ErrorOr<BuildInfoSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- CV_DESERIALIZE(Data, H);
-
- return BuildInfoSym(RecordOffset, H);
- }
+ uint32_t BuildId;
uint32_t RecordOffset;
- Hdr Header;
};
// S_BPREL32
class BPRelativeSym : public SymbolRecord {
public:
- struct Hdr {
- little32_t Offset; // Offset from the base pointer register
- TypeIndex Type; // Type of the variable
- // Name: The null-terminated name follows.
- };
-
- BPRelativeSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
+ explicit BPRelativeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit BPRelativeSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::BPRelativeSym),
- RecordOffset(RecordOffset), Header(*H), Name(Name) {}
-
- static ErrorOr<BPRelativeSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
+ RecordOffset(RecordOffset) {}
- return BPRelativeSym(RecordOffset, H, Name);
- }
+ int32_t Offset;
+ TypeIndex Type;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_REGREL32
class RegRelativeSym : public SymbolRecord {
public:
- struct Hdr {
- ulittle32_t Offset; // Offset from the register
- TypeIndex Type; // Type of the variable
- ulittle16_t Register; // Register to which the variable is relative
- // Name: The null-terminated name follows.
- };
-
- RegRelativeSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
+ explicit RegRelativeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit RegRelativeSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::RegRelativeSym),
- RecordOffset(RecordOffset), Header(*H), Name(Name) {}
-
- static ErrorOr<RegRelativeSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
+ RecordOffset(RecordOffset) {}
- return RegRelativeSym(RecordOffset, H, Name);
- }
+ uint32_t Offset;
+ TypeIndex Type;
+ uint16_t Register;
+ StringRef Name;
uint32_t RecordOffset;
- Hdr Header;
- StringRef Name;
};
// S_CONSTANT, S_MANCONSTANT
class ConstantSym : public SymbolRecord {
public:
- struct Hdr {
- TypeIndex Type;
- // Value: The value of the constant.
- // Name: The null-terminated name follows.
- };
-
- ConstantSym(uint32_t RecordOffset, const Hdr *H, const APSInt &Value,
- StringRef Name)
- : SymbolRecord(SymbolRecordKind::ConstantSym), RecordOffset(RecordOffset),
- Header(*H), Value(Value), Name(Name) {}
-
- static ErrorOr<ConstantSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- APSInt Value;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Value, Name);
-
- return ConstantSym(RecordOffset, H, Value, Name);
- }
+ explicit ConstantSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ ConstantSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::ConstantSym),
+ RecordOffset(RecordOffset) {}
- uint32_t RecordOffset;
- Hdr Header;
+ TypeIndex Type;
APSInt Value;
StringRef Name;
+
+ uint32_t RecordOffset;
};
// S_LDATA32, S_GDATA32, S_LMANDATA, S_GMANDATA
class DataSym : public SymbolRecord {
-public:
- struct Hdr {
- TypeIndex Type;
- ulittle32_t DataOffset;
- ulittle16_t Segment;
- // Name: The null-terminated name follows.
- };
-
- DataSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
- : SymbolRecord(SymbolRecordKind::DataSym), RecordOffset(RecordOffset),
- Header(*H), Name(Name) {}
+ static constexpr uint32_t RelocationOffset = 8;
- static ErrorOr<DataSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return DataSym(RecordOffset, H, Name);
- }
+public:
+ explicit DataSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ DataSym(uint32_t RecordOffset)
+ : SymbolRecord(SymbolRecordKind::DataSym), RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, DataOffset);
+ return RecordOffset + RelocationOffset;
}
- uint32_t RecordOffset;
- Hdr Header;
+ TypeIndex Type;
+ uint32_t DataOffset;
+ uint16_t Segment;
StringRef Name;
+
+ uint32_t RecordOffset;
};
// S_LTHREAD32, S_GTHREAD32
class ThreadLocalDataSym : public SymbolRecord {
-public:
- struct Hdr {
- TypeIndex Type;
- ulittle32_t DataOffset;
- ulittle16_t Segment;
- // Name: The null-terminated name follows.
- };
+ static constexpr uint32_t RelocationOffset = 8;
- ThreadLocalDataSym(uint32_t RecordOffset, const Hdr *H, StringRef Name)
+public:
+ explicit ThreadLocalDataSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
+ explicit ThreadLocalDataSym(uint32_t RecordOffset)
: SymbolRecord(SymbolRecordKind::ThreadLocalDataSym),
- RecordOffset(RecordOffset), Header(*H), Name(Name) {}
-
- static ErrorOr<ThreadLocalDataSym> deserialize(SymbolRecordKind Kind,
- uint32_t RecordOffset,
- ArrayRef<uint8_t> &Data) {
- const Hdr *H = nullptr;
- StringRef Name;
- CV_DESERIALIZE(Data, H, Name);
-
- return ThreadLocalDataSym(RecordOffset, H, Name);
- }
+ RecordOffset(RecordOffset) {}
uint32_t getRelocationOffset() const {
- return RecordOffset + offsetof(Hdr, DataOffset);
+ return RecordOffset + RelocationOffset;
}
- uint32_t RecordOffset;
- Hdr Header;
+ TypeIndex Type;
+ uint32_t DataOffset;
+ uint16_t Segment;
StringRef Name;
+
+ uint32_t RecordOffset;
};
typedef CVRecord<SymbolKind> CVSymbol;
-typedef VarStreamArray<CVSymbol> CVSymbolArray;
+typedef msf::VarStreamArray<CVSymbol> CVSymbolArray;
-} // namespace codeview
-} // namespace llvm
+} // end namespace codeview
+} // end namespace llvm
-#endif
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
diff --git a/include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h b/include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h
new file mode 100644
index 000000000000..1bd14ed1347a
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/SymbolRecordMapping.h
@@ -0,0 +1,44 @@
+//===- SymbolRecordMapping.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDMAPPING_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDMAPPING_H
+
+#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+
+namespace llvm {
+namespace msf {
+class StreamReader;
+class StreamWriter;
+}
+
+namespace codeview {
+class SymbolRecordMapping : public SymbolVisitorCallbacks {
+public:
+ explicit SymbolRecordMapping(msf::StreamReader &Reader) : IO(Reader) {}
+ explicit SymbolRecordMapping(msf::StreamWriter &Writer) : IO(Writer) {}
+
+ Error visitSymbolBegin(CVSymbol &Record) override;
+ Error visitSymbolEnd(CVSymbol &Record) override;
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownRecord(CVSymbol &CVR, Name &Record) override;
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "CVSymbolTypes.def"
+
+private:
+ Optional<SymbolKind> Kind;
+
+ CodeViewRecordIO IO;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/SymbolSerializer.h b/include/llvm/DebugInfo/CodeView/SymbolSerializer.h
new file mode 100644
index 000000000000..4eb914e7ae6b
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/SymbolSerializer.h
@@ -0,0 +1,96 @@
+//===- symbolSerializer.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+#include "llvm/DebugInfo/MSF/ByteStream.h"
+#include "llvm/DebugInfo/MSF/StreamWriter.h"
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class SymbolSerializer : public SymbolVisitorCallbacks {
+ uint32_t RecordStart = 0;
+ msf::StreamWriter &Writer;
+ SymbolRecordMapping Mapping;
+ Optional<SymbolKind> CurrentSymbol;
+
+ Error writeRecordPrefix(SymbolKind Kind) {
+ RecordPrefix Prefix;
+ Prefix.RecordKind = Kind;
+ Prefix.RecordLen = 0;
+ if (auto EC = Writer.writeObject(Prefix))
+ return EC;
+ return Error::success();
+ }
+
+public:
+ explicit SymbolSerializer(msf::StreamWriter &Writer)
+ : Writer(Writer), Mapping(Writer) {}
+
+ virtual Error visitSymbolBegin(CVSymbol &Record) override {
+ assert(!CurrentSymbol.hasValue() && "Already in a symbol mapping!");
+
+ RecordStart = Writer.getOffset();
+ if (auto EC = writeRecordPrefix(Record.kind()))
+ return EC;
+
+ CurrentSymbol = Record.kind();
+ if (auto EC = Mapping.visitSymbolBegin(Record))
+ return EC;
+
+ return Error::success();
+ }
+
+ virtual Error visitSymbolEnd(CVSymbol &Record) override {
+ assert(CurrentSymbol.hasValue() && "Not in a symbol mapping!");
+
+ if (auto EC = Mapping.visitSymbolEnd(Record))
+ return EC;
+
+ uint32_t RecordEnd = Writer.getOffset();
+ Writer.setOffset(RecordStart);
+ uint16_t Length = RecordEnd - Writer.getOffset() - 2;
+ if (auto EC = Writer.writeInteger(Length))
+ return EC;
+
+ Writer.setOffset(RecordEnd);
+ CurrentSymbol.reset();
+
+ return Error::success();
+ }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
+ virtual Error visitKnownRecord(CVSymbol &CVR, Name &Record) override { \
+ return visitKnownRecordImpl(CVR, Record); \
+ }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "CVSymbolTypes.def"
+
+private:
+ template <typename RecordKind>
+ Error visitKnownRecordImpl(CVSymbol &CVR, RecordKind &Record) {
+ return Mapping.visitKnownRecord(CVR, Record);
+ }
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h b/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h
new file mode 100644
index 000000000000..96a93bf7e576
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbackPipeline.h
@@ -0,0 +1,71 @@
+//===- SymbolVisitorCallbackPipeline.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class SymbolVisitorCallbackPipeline : public SymbolVisitorCallbacks {
+public:
+ SymbolVisitorCallbackPipeline() = default;
+
+ Error visitUnknownSymbol(CVSymbol &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitUnknownSymbol(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitSymbolBegin(CVSymbol &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitSymbolBegin(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitSymbolEnd(CVSymbol &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitSymbolEnd(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ void addCallbackToPipeline(SymbolVisitorCallbacks &Callbacks) {
+ Pipeline.push_back(&Callbacks);
+ }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownRecord(CVSymbol &CVR, Name &Record) override { \
+ for (auto Visitor : Pipeline) { \
+ if (auto EC = Visitor->visitKnownRecord(CVR, Record)) \
+ return EC; \
+ } \
+ return Error::success(); \
+ }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CVSymbolTypes.def"
+
+private:
+ std::vector<SymbolVisitorCallbacks *> Pipeline;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
diff --git a/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h b/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h
new file mode 100644
index 000000000000..aaa9d2e85e13
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h
@@ -0,0 +1,48 @@
+//===- SymbolVisitorCallbacks.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
+#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
+
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class SymbolVisitorCallbacks {
+ friend class CVSymbolVisitor;
+
+public:
+ virtual ~SymbolVisitorCallbacks() = default;
+
+ /// Action to take on unknown symbols. By default, they are ignored.
+ virtual Error visitUnknownSymbol(CVSymbol &Record) {
+ return Error::success();
+ }
+
+ /// Paired begin/end actions for all symbols. Receives all record data,
+ /// including the fixed-length record prefix. visitSymbolBegin() should
+ /// return
+ /// the type of the Symbol, or an error if it cannot be determined.
+ virtual Error visitSymbolBegin(CVSymbol &Record) { return Error::success(); }
+ virtual Error visitSymbolEnd(CVSymbol &Record) { return Error::success(); }
+
+#define SYMBOL_RECORD(EnumName, EnumVal, Name) \
+ virtual Error visitKnownRecord(CVSymbol &CVR, Name &Record) { \
+ return Error::success(); \
+ }
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "CVSymbolTypes.def"
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
diff --git a/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h b/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h
index a4965168c3db..2b468a289fd8 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h
@@ -10,24 +10,28 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
-
-#include <stdint.h>
+#include <cstdint>
namespace llvm {
+namespace msf {
+class StreamReader;
+} // end namespace msf
+
namespace codeview {
class SymbolVisitorDelegate {
public:
- virtual ~SymbolVisitorDelegate() {}
+ virtual ~SymbolVisitorDelegate() = default;
- virtual uint32_t getRecordOffset(ArrayRef<uint8_t> Record) = 0;
+ virtual uint32_t getRecordOffset(msf::StreamReader Reader) = 0;
virtual StringRef getFileNameForFileOffset(uint32_t FileOffset) = 0;
virtual StringRef getStringTable() = 0;
};
+
} // end namespace codeview
+
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
diff --git a/include/llvm/DebugInfo/CodeView/TypeDeserializer.h b/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
new file mode 100644
index 000000000000..dc5eaf82845b
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
@@ -0,0 +1,136 @@
+//===- TypeDeserializer.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/DebugInfo/MSF/ByteStream.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace codeview {
+
+class TypeDeserializer : public TypeVisitorCallbacks {
+ struct MappingInfo {
+ explicit MappingInfo(ArrayRef<uint8_t> RecordData)
+ : Stream(RecordData), Reader(Stream), Mapping(Reader) {}
+
+ msf::ByteStream Stream;
+ msf::StreamReader Reader;
+ TypeRecordMapping Mapping;
+ };
+
+public:
+ TypeDeserializer() = default;
+
+ Error visitTypeBegin(CVType &Record) override {
+ assert(!Mapping && "Already in a type mapping!");
+ Mapping = llvm::make_unique<MappingInfo>(Record.content());
+ return Mapping->Mapping.visitTypeBegin(Record);
+ }
+
+ Error visitTypeEnd(CVType &Record) override {
+ assert(Mapping && "Not in a type mapping!");
+ auto EC = Mapping->Mapping.visitTypeEnd(Record);
+ Mapping.reset();
+ return EC;
+ }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownRecord(CVType &CVR, Name##Record &Record) override { \
+ return visitKnownRecordImpl<Name##Record>(CVR, Record); \
+ }
+#define MEMBER_RECORD(EnumName, EnumVal, Name)
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "TypeRecords.def"
+
+private:
+ template <typename RecordType>
+ Error visitKnownRecordImpl(CVType &CVR, RecordType &Record) {
+ return Mapping->Mapping.visitKnownRecord(CVR, Record);
+ }
+
+ std::unique_ptr<MappingInfo> Mapping;
+};
+
+class FieldListDeserializer : public TypeVisitorCallbacks {
+ struct MappingInfo {
+ explicit MappingInfo(msf::StreamReader &R)
+ : Reader(R), Mapping(Reader), StartOffset(0) {}
+
+ msf::StreamReader &Reader;
+ TypeRecordMapping Mapping;
+ uint32_t StartOffset;
+ };
+
+public:
+ explicit FieldListDeserializer(msf::StreamReader &Reader) : Mapping(Reader) {
+ CVType FieldList;
+ FieldList.Type = TypeLeafKind::LF_FIELDLIST;
+ consumeError(Mapping.Mapping.visitTypeBegin(FieldList));
+ }
+
+ ~FieldListDeserializer() override {
+ CVType FieldList;
+ FieldList.Type = TypeLeafKind::LF_FIELDLIST;
+ consumeError(Mapping.Mapping.visitTypeEnd(FieldList));
+ }
+
+ Error visitMemberBegin(CVMemberRecord &Record) override {
+ Mapping.StartOffset = Mapping.Reader.getOffset();
+ return Mapping.Mapping.visitMemberBegin(Record);
+ }
+
+ Error visitMemberEnd(CVMemberRecord &Record) override {
+ if (auto EC = Mapping.Mapping.visitMemberEnd(Record))
+ return EC;
+ return Error::success();
+ }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)
+#define MEMBER_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override { \
+ return visitKnownMemberImpl<Name##Record>(CVR, Record); \
+ }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "TypeRecords.def"
+
+private:
+ template <typename RecordType>
+ Error visitKnownMemberImpl(CVMemberRecord &CVR, RecordType &Record) {
+ if (auto EC = Mapping.Mapping.visitKnownMember(CVR, Record))
+ return EC;
+
+ uint32_t EndOffset = Mapping.Reader.getOffset();
+ uint32_t RecordLength = EndOffset - Mapping.StartOffset;
+ Mapping.Reader.setOffset(Mapping.StartOffset);
+ if (auto EC = Mapping.Reader.readBytes(CVR.Data, RecordLength))
+ return EC;
+ assert(Mapping.Reader.getOffset() == EndOffset);
+ return Error::success();
+ }
+ MappingInfo Mapping;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
diff --git a/include/llvm/DebugInfo/CodeView/TypeDumper.h b/include/llvm/DebugInfo/CodeView/TypeDumper.h
index ca79ab076e5e..5a8b555cec02 100644
--- a/include/llvm/DebugInfo/CodeView/TypeDumper.h
+++ b/include/llvm/DebugInfo/CodeView/TypeDumper.h
@@ -63,18 +63,20 @@ public:
ScopedPrinter *getPrinter() { return W; }
/// Action to take on unknown types. By default, they are ignored.
- Error visitUnknownType(const CVRecord<TypeLeafKind> &Record) override;
- Error visitUnknownMember(const CVRecord<TypeLeafKind> &Record) override;
+ Error visitUnknownType(CVType &Record) override;
+ Error visitUnknownMember(CVMemberRecord &Record) override;
/// Paired begin/end actions for all types. Receives all record data,
/// including the fixed-length record prefix.
- Error visitTypeBegin(const CVRecord<TypeLeafKind> &Record) override;
- Error visitTypeEnd(const CVRecord<TypeLeafKind> &Record) override;
+ Error visitTypeBegin(CVType &Record) override;
+ Error visitTypeEnd(CVType &Record) override;
+ Error visitMemberBegin(CVMemberRecord &Record) override;
+ Error visitMemberEnd(CVMemberRecord &Record) override;
#define TYPE_RECORD(EnumName, EnumVal, Name) \
- Error visit##Name(Name##Record &Record) override;
+ Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
#define MEMBER_RECORD(EnumName, EnumVal, Name) \
- TYPE_RECORD(EnumName, EnumVal, Name)
+ Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "TypeRecords.def"
@@ -86,6 +88,7 @@ private:
ScopedPrinter *W;
+ bool IsInFieldList = false;
bool PrintRecordBytes = false;
/// Name of the current type. Only valid before visitTypeEnd.
diff --git a/include/llvm/DebugInfo/CodeView/TypeIndex.h b/include/llvm/DebugInfo/CodeView/TypeIndex.h
index c2ebf3848892..3c11d248fa72 100644
--- a/include/llvm/DebugInfo/CodeView/TypeIndex.h
+++ b/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -93,7 +93,7 @@ public:
static const uint32_t SimpleModeMask = 0x00000700;
public:
- TypeIndex() : Index(0) {}
+ TypeIndex() : Index(static_cast<uint32_t>(SimpleTypeKind::None)) {}
explicit TypeIndex(uint32_t Index) : Index(Index) {}
explicit TypeIndex(SimpleTypeKind Kind)
: Index(static_cast<uint32_t>(Kind)) {}
@@ -101,6 +101,7 @@ public:
: Index(static_cast<uint32_t>(Kind) | static_cast<uint32_t>(Mode)) {}
uint32_t getIndex() const { return Index; }
+ void setIndex(uint32_t I) { Index = I; }
bool isSimple() const { return Index < FirstNonSimpleIndex; }
bool isNoneType() const { return *this == None(); }
diff --git a/include/llvm/DebugInfo/CodeView/TypeRecord.h b/include/llvm/DebugInfo/CodeView/TypeRecord.h
index 42751fbd4af1..4f1c047815d2 100644
--- a/include/llvm/DebugInfo/CodeView/TypeRecord.h
+++ b/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -12,27 +12,54 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
-#include "llvm/Support/ErrorOr.h"
-#include <cinttypes>
-#include <utility>
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/Support/Endian.h"
+#include <algorithm>
+#include <cstdint>
+#include <vector>
namespace llvm {
+
+namespace msf {
+class StreamReader;
+} // end namespace msf
+
namespace codeview {
-using llvm::support::little32_t;
-using llvm::support::ulittle16_t;
-using llvm::support::ulittle32_t;
+using support::little32_t;
+using support::ulittle16_t;
+using support::ulittle32_t;
+
+typedef CVRecord<TypeLeafKind> CVType;
+
+struct CVMemberRecord {
+ TypeLeafKind Kind;
+ ArrayRef<uint8_t> Data;
+};
+typedef msf::VarStreamArray<CVType> CVTypeArray;
/// Equvalent to CV_fldattr_t in cvinfo.h.
struct MemberAttributes {
- ulittle16_t Attrs;
+ uint16_t Attrs = 0;
enum {
MethodKindShift = 2,
};
+ MemberAttributes() = default;
+
+ explicit MemberAttributes(MemberAccess Access)
+ : Attrs(static_cast<uint16_t>(Access)) {}
+
+ MemberAttributes(MemberAccess Access, MethodKind Kind, MethodOptions Flags) {
+ Attrs = static_cast<uint16_t>(Access);
+ Attrs |= (static_cast<uint16_t>(Kind) << MethodKindShift);
+ Attrs |= static_cast<uint16_t>(Flags);
+ }
/// Get the access specifier. Valid for any kind of member.
MemberAccess getAccess() const {
@@ -73,7 +100,7 @@ struct MemberAttributes {
// if it represents a member pointer.
class MemberPointerInfo {
public:
- MemberPointerInfo() {}
+ MemberPointerInfo() = default;
MemberPointerInfo(TypeIndex ContainingType,
PointerToMemberRepresentation Representation)
@@ -83,25 +110,18 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<MemberPointerInfo> deserialize(ArrayRef<uint8_t> &Data);
-
TypeIndex getContainingType() const { return ContainingType; }
PointerToMemberRepresentation getRepresentation() const {
return Representation;
}
-private:
- struct Layout {
- TypeIndex ClassType;
- ulittle16_t Representation; // PointerToMemberRepresentation
- };
-
TypeIndex ContainingType;
PointerToMemberRepresentation Representation;
};
class TypeRecord {
protected:
+ TypeRecord() = default;
explicit TypeRecord(TypeRecordKind Kind) : Kind(Kind) {}
public:
@@ -114,6 +134,7 @@ private:
// LF_MODIFIER
class ModifierRecord : public TypeRecord {
public:
+ explicit ModifierRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
ModifierRecord(TypeIndex ModifiedType, ModifierOptions Modifiers)
: TypeRecord(TypeRecordKind::Modifier), ModifiedType(ModifiedType),
Modifiers(Modifiers) {}
@@ -122,18 +143,9 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<ModifierRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getModifiedType() const { return ModifiedType; }
ModifierOptions getModifiers() const { return Modifiers; }
-private:
- struct Layout {
- TypeIndex ModifiedType;
- ulittle16_t Modifiers; // ModifierOptions
- };
-
TypeIndex ModifiedType;
ModifierOptions Modifiers;
};
@@ -141,6 +153,7 @@ private:
// LF_PROCEDURE
class ProcedureRecord : public TypeRecord {
public:
+ explicit ProcedureRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
ProcedureRecord(TypeIndex ReturnType, CallingConvention CallConv,
FunctionOptions Options, uint16_t ParameterCount,
TypeIndex ArgumentList)
@@ -152,26 +165,12 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<ProcedureRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
- static uint32_t getLayoutSize() { return 2 + sizeof(Layout); }
-
TypeIndex getReturnType() const { return ReturnType; }
CallingConvention getCallConv() const { return CallConv; }
FunctionOptions getOptions() const { return Options; }
uint16_t getParameterCount() const { return ParameterCount; }
TypeIndex getArgumentList() const { return ArgumentList; }
-private:
- struct Layout {
- TypeIndex ReturnType;
- CallingConvention CallConv;
- FunctionOptions Options;
- ulittle16_t NumParameters;
- TypeIndex ArgListType;
- };
-
TypeIndex ReturnType;
CallingConvention CallConv;
FunctionOptions Options;
@@ -182,6 +181,8 @@ private:
// LF_MFUNCTION
class MemberFunctionRecord : public TypeRecord {
public:
+ explicit MemberFunctionRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
MemberFunctionRecord(TypeIndex ReturnType, TypeIndex ClassType,
TypeIndex ThisType, CallingConvention CallConv,
FunctionOptions Options, uint16_t ParameterCount,
@@ -196,9 +197,6 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<MemberFunctionRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getReturnType() const { return ReturnType; }
TypeIndex getClassType() const { return ClassType; }
TypeIndex getThisType() const { return ThisType; }
@@ -208,18 +206,6 @@ public:
TypeIndex getArgumentList() const { return ArgumentList; }
int32_t getThisPointerAdjustment() const { return ThisPointerAdjustment; }
-private:
- struct Layout {
- TypeIndex ReturnType;
- TypeIndex ClassType;
- TypeIndex ThisType;
- CallingConvention CallConv;
- FunctionOptions Options;
- ulittle16_t NumParameters;
- TypeIndex ArgListType;
- little32_t ThisAdjustment;
- };
-
TypeIndex ReturnType;
TypeIndex ClassType;
TypeIndex ThisType;
@@ -233,6 +219,7 @@ private:
// LF_MFUNC_ID
class MemberFuncIdRecord : public TypeRecord {
public:
+ explicit MemberFuncIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
MemberFuncIdRecord(TypeIndex ClassType, TypeIndex FunctionType,
StringRef Name)
: TypeRecord(TypeRecordKind::MemberFuncId), ClassType(ClassType),
@@ -242,18 +229,9 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<MemberFuncIdRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
TypeIndex getClassType() const { return ClassType; }
TypeIndex getFunctionType() const { return FunctionType; }
StringRef getName() const { return Name; }
-
-private:
- struct Layout {
- TypeIndex ClassType;
- TypeIndex FunctionType;
- // Name: The null-terminated name follows.
- };
TypeIndex ClassType;
TypeIndex FunctionType;
StringRef Name;
@@ -262,6 +240,8 @@ private:
// LF_ARGLIST, LF_SUBSTR_LIST
class ArgListRecord : public TypeRecord {
public:
+ explicit ArgListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+
ArgListRecord(TypeRecordKind Kind, ArrayRef<TypeIndex> Indices)
: TypeRecord(Kind), StringIndices(Indices) {}
@@ -269,19 +249,8 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<ArgListRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
ArrayRef<TypeIndex> getIndices() const { return StringIndices; }
- static uint32_t getLayoutSize() { return 2 + sizeof(Layout); }
-
-private:
- struct Layout {
- ulittle32_t NumArgs; // Number of arguments
- // ArgTypes[]: Type indicies of arguments
- };
-
std::vector<TypeIndex> StringIndices;
};
@@ -294,94 +263,96 @@ public:
static const uint32_t PointerModeShift = 5;
static const uint32_t PointerModeMask = 0x07;
+ static const uint32_t PointerOptionMask = 0xFF;
+
static const uint32_t PointerSizeShift = 13;
static const uint32_t PointerSizeMask = 0xFF;
- PointerRecord(TypeIndex ReferentType, PointerKind Kind, PointerMode Mode,
- PointerOptions Options, uint8_t Size)
- : PointerRecord(ReferentType, Kind, Mode, Options, Size,
- MemberPointerInfo()) {}
+ explicit PointerRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
- PointerRecord(TypeIndex ReferentType, PointerKind Kind, PointerMode Mode,
- PointerOptions Options, uint8_t Size,
+ PointerRecord(TypeIndex ReferentType, uint32_t Attrs)
+ : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+ Attrs(Attrs) {}
+
+ PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
+ PointerOptions PO, uint8_t Size)
+ : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+ Attrs(calcAttrs(PK, PM, PO, Size)) {}
+
+ PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
+ PointerOptions PO, uint8_t Size,
+ const MemberPointerInfo &Member)
+ : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+ Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(Member) {}
+
+ PointerRecord(TypeIndex ReferentType, uint32_t Attrs,
const MemberPointerInfo &Member)
: TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
- PtrKind(Kind), Mode(Mode), Options(Options), Size(Size),
- MemberInfo(Member) {}
+ Attrs(Attrs), MemberInfo(Member) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<PointerRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getReferentType() const { return ReferentType; }
- PointerKind getPointerKind() const { return PtrKind; }
- PointerMode getMode() const { return Mode; }
- PointerOptions getOptions() const { return Options; }
- uint8_t getSize() const { return Size; }
- MemberPointerInfo getMemberInfo() const { return MemberInfo; }
- bool isPointerToMember() const {
- return Mode == PointerMode::PointerToDataMember ||
- Mode == PointerMode::PointerToMemberFunction;
+ PointerKind getPointerKind() const {
+ return static_cast<PointerKind>((Attrs >> PointerKindShift) &
+ PointerKindMask);
+ }
+
+ PointerMode getMode() const {
+ return static_cast<PointerMode>((Attrs >> PointerModeShift) &
+ PointerModeMask);
}
- bool isFlat() const {
- return !!(uint32_t(Options) & uint32_t(PointerOptions::Flat32));
+
+ PointerOptions getOptions() const {
+ return static_cast<PointerOptions>(Attrs);
}
- bool isConst() const {
- return !!(uint32_t(Options) & uint32_t(PointerOptions::Const));
+
+ uint8_t getSize() const {
+ return (Attrs >> PointerSizeShift) & PointerSizeMask;
}
+
+ MemberPointerInfo getMemberInfo() const { return *MemberInfo; }
+
+ bool isPointerToMember() const {
+ return getMode() == PointerMode::PointerToDataMember ||
+ getMode() == PointerMode::PointerToMemberFunction;
+ }
+
+ bool isFlat() const { return !!(Attrs & uint32_t(PointerOptions::Flat32)); }
+ bool isConst() const { return !!(Attrs & uint32_t(PointerOptions::Const)); }
+
bool isVolatile() const {
- return !!(uint32_t(Options) & uint32_t(PointerOptions::Volatile));
+ return !!(Attrs & uint32_t(PointerOptions::Volatile));
}
+
bool isUnaligned() const {
- return !!(uint32_t(Options) & uint32_t(PointerOptions::Unaligned));
+ return !!(Attrs & uint32_t(PointerOptions::Unaligned));
}
-private:
- struct Layout {
- TypeIndex PointeeType;
- ulittle32_t Attrs; // pointer attributes
- // if pointer to member:
- // PointerToMemberTail
- PointerKind getPtrKind() const {
- return PointerKind(Attrs & PointerKindMask);
- }
- PointerMode getPtrMode() const {
- return PointerMode((Attrs >> PointerModeShift) & PointerModeMask);
- }
- uint8_t getPtrSize() const {
- return (Attrs >> PointerSizeShift) & PointerSizeMask;
- }
- bool isFlat() const { return Attrs & (1 << 8); }
- bool isVolatile() const { return Attrs & (1 << 9); }
- bool isConst() const { return Attrs & (1 << 10); }
- bool isUnaligned() const { return Attrs & (1 << 11); }
-
- bool isPointerToDataMember() const {
- return getPtrMode() == PointerMode::PointerToDataMember;
- }
- bool isPointerToMemberFunction() const {
- return getPtrMode() == PointerMode::PointerToMemberFunction;
- }
- bool isPointerToMember() const {
- return isPointerToMemberFunction() || isPointerToDataMember();
- }
- };
-
TypeIndex ReferentType;
- PointerKind PtrKind;
- PointerMode Mode;
- PointerOptions Options;
- uint8_t Size;
- MemberPointerInfo MemberInfo;
+ uint32_t Attrs;
+
+ Optional<MemberPointerInfo> MemberInfo;
+
+private:
+ static uint32_t calcAttrs(PointerKind PK, PointerMode PM, PointerOptions PO,
+ uint8_t Size) {
+ uint32_t A = 0;
+ A |= static_cast<uint32_t>(PK);
+ A |= static_cast<uint32_t>(PO);
+ A |= (static_cast<uint32_t>(PM) << PointerModeShift);
+ A |= (static_cast<uint32_t>(Size) << PointerSizeShift);
+ return A;
+ }
};
// LF_NESTTYPE
class NestedTypeRecord : public TypeRecord {
public:
+ explicit NestedTypeRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
NestedTypeRecord(TypeIndex Type, StringRef Name)
: TypeRecord(TypeRecordKind::NestedType), Type(Type), Name(Name) {}
@@ -389,26 +360,31 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<NestedTypeRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getNestedType() const { return Type; }
StringRef getName() const { return Name; }
-private:
- struct Layout {
- ulittle16_t Pad0; // Should be zero
- TypeIndex Type; // Type index of nested type
- // Name: Null-terminated string
- };
-
TypeIndex Type;
StringRef Name;
};
+// LF_FIELDLIST
+class FieldListRecord : public TypeRecord {
+public:
+ explicit FieldListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ explicit FieldListRecord(ArrayRef<uint8_t> Data)
+ : TypeRecord(TypeRecordKind::FieldList), Data(Data) {}
+
+ /// Rewrite member type indices with IndexMap. Returns false if a type index
+ /// is not in the map.
+ bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap) { return false; }
+
+ ArrayRef<uint8_t> Data;
+};
+
// LF_ARRAY
class ArrayRecord : public TypeRecord {
public:
+ explicit ArrayRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
ArrayRecord(TypeIndex ElementType, TypeIndex IndexType, uint64_t Size,
StringRef Name)
: TypeRecord(TypeRecordKind::Array), ElementType(ElementType),
@@ -418,30 +394,20 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<ArrayRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getElementType() const { return ElementType; }
TypeIndex getIndexType() const { return IndexType; }
uint64_t getSize() const { return Size; }
- llvm::StringRef getName() const { return Name; }
-
-private:
- struct Layout {
- TypeIndex ElementType;
- TypeIndex IndexType;
- // SizeOf: LF_NUMERIC encoded size in bytes. Not element count!
- // Name: The null-terminated name follows.
- };
+ StringRef getName() const { return Name; }
TypeIndex ElementType;
TypeIndex IndexType;
uint64_t Size;
- llvm::StringRef Name;
+ StringRef Name;
};
class TagRecord : public TypeRecord {
protected:
+ explicit TagRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
TagRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
TypeIndex FieldList, StringRef Name, StringRef UniqueName)
: TypeRecord(Kind), MemberCount(MemberCount), Options(Options),
@@ -457,13 +423,16 @@ public:
static const int WinRTKindShift = 14;
static const int WinRTKindMask = 0xC000;
+ bool hasUniqueName() const {
+ return (Options & ClassOptions::HasUniqueName) != ClassOptions::None;
+ }
+
uint16_t getMemberCount() const { return MemberCount; }
ClassOptions getOptions() const { return Options; }
TypeIndex getFieldList() const { return FieldList; }
StringRef getName() const { return Name; }
StringRef getUniqueName() const { return UniqueName; }
-private:
uint16_t MemberCount;
ClassOptions Options;
TypeIndex FieldList;
@@ -474,45 +443,34 @@ private:
// LF_CLASS, LF_STRUCTURE, LF_INTERFACE
class ClassRecord : public TagRecord {
public:
+ explicit ClassRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
ClassRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
- HfaKind Hfa, WindowsRTClassKind WinRTKind, TypeIndex FieldList,
- TypeIndex DerivationList, TypeIndex VTableShape, uint64_t Size,
- StringRef Name, StringRef UniqueName)
+ TypeIndex FieldList, TypeIndex DerivationList,
+ TypeIndex VTableShape, uint64_t Size, StringRef Name,
+ StringRef UniqueName)
: TagRecord(Kind, MemberCount, Options, FieldList, Name, UniqueName),
- Hfa(Hfa), WinRTKind(WinRTKind), DerivationList(DerivationList),
- VTableShape(VTableShape), Size(Size) {}
+ DerivationList(DerivationList), VTableShape(VTableShape), Size(Size) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<ClassRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
+ HfaKind getHfa() const {
+ uint16_t Value = static_cast<uint16_t>(Options);
+ Value = (Value & HfaKindMask) >> HfaKindShift;
+ return static_cast<HfaKind>(Value);
+ }
+
+ WindowsRTClassKind getWinRTKind() const {
+ uint16_t Value = static_cast<uint16_t>(Options);
+ Value = (Value & WinRTKindMask) >> WinRTKindShift;
+ return static_cast<WindowsRTClassKind>(Value);
+ }
- HfaKind getHfa() const { return Hfa; }
- WindowsRTClassKind getWinRTKind() const { return WinRTKind; }
TypeIndex getDerivationList() const { return DerivationList; }
TypeIndex getVTableShape() const { return VTableShape; }
uint64_t getSize() const { return Size; }
-private:
- struct Layout {
- ulittle16_t MemberCount; // Number of members in FieldList.
- ulittle16_t Properties; // ClassOptions bitset
- TypeIndex FieldList; // LF_FIELDLIST: List of all kinds of members
- TypeIndex DerivedFrom; // LF_DERIVED: List of known derived classes
- TypeIndex VShape; // LF_VTSHAPE: Shape of the vftable
- // SizeOf: The 'sizeof' the UDT in bytes is encoded as an LF_NUMERIC
- // integer.
- // Name: The null-terminated name follows.
-
- bool hasUniqueName() const {
- return Properties & uint16_t(ClassOptions::HasUniqueName);
- }
- };
-
- HfaKind Hfa;
- WindowsRTClassKind WinRTKind;
TypeIndex DerivationList;
TypeIndex VTableShape;
uint64_t Size;
@@ -520,40 +478,28 @@ private:
// LF_UNION
struct UnionRecord : public TagRecord {
- UnionRecord(uint16_t MemberCount, ClassOptions Options, HfaKind Hfa,
- TypeIndex FieldList, uint64_t Size, StringRef Name,
- StringRef UniqueName)
+ explicit UnionRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
+ UnionRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
+ uint64_t Size, StringRef Name, StringRef UniqueName)
: TagRecord(TypeRecordKind::Union, MemberCount, Options, FieldList, Name,
UniqueName),
- Hfa(Hfa), Size(Size) {}
+ Size(Size) {}
- static ErrorOr<UnionRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
+ HfaKind getHfa() const {
+ uint16_t Value = static_cast<uint16_t>(Options);
+ Value = (Value & HfaKindMask) >> HfaKindShift;
+ return static_cast<HfaKind>(Value);
+ }
- HfaKind getHfa() const { return Hfa; }
uint64_t getSize() const { return Size; }
-private:
- struct Layout {
- ulittle16_t MemberCount; // Number of members in FieldList.
- ulittle16_t Properties; // ClassOptions bitset
- TypeIndex FieldList; // LF_FIELDLIST: List of all kinds of members
- // SizeOf: The 'sizeof' the UDT in bytes is encoded as an LF_NUMERIC
- // integer.
- // Name: The null-terminated name follows.
-
- bool hasUniqueName() const {
- return Properties & uint16_t(ClassOptions::HasUniqueName);
- }
- };
-
- HfaKind Hfa;
uint64_t Size;
};
// LF_ENUM
class EnumRecord : public TagRecord {
public:
+ explicit EnumRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
EnumRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
StringRef Name, StringRef UniqueName, TypeIndex UnderlyingType)
: TagRecord(TypeRecordKind::Enum, MemberCount, Options, FieldList, Name,
@@ -563,30 +509,14 @@ public:
/// Rewrite member type indices with IndexMap. Returns false if a type index is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<EnumRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getUnderlyingType() const { return UnderlyingType; }
-
-private:
- struct Layout {
- ulittle16_t NumEnumerators; // Number of enumerators
- ulittle16_t Properties;
- TypeIndex UnderlyingType;
- TypeIndex FieldListType;
- // Name: The null-terminated name follows.
-
- bool hasUniqueName() const {
- return Properties & uint16_t(ClassOptions::HasUniqueName);
- }
- };
-
TypeIndex UnderlyingType;
};
// LF_BITFIELD
class BitFieldRecord : public TypeRecord {
public:
+ explicit BitFieldRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
BitFieldRecord(TypeIndex Type, uint8_t BitSize, uint8_t BitOffset)
: TypeRecord(TypeRecordKind::BitField), Type(Type), BitSize(BitSize),
BitOffset(BitOffset) {}
@@ -595,20 +525,9 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<BitFieldRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getType() const { return Type; }
uint8_t getBitOffset() const { return BitOffset; }
uint8_t getBitSize() const { return BitSize; }
-
-private:
- struct Layout {
- TypeIndex Type;
- uint8_t BitSize;
- uint8_t BitOffset;
- };
-
TypeIndex Type;
uint8_t BitSize;
uint8_t BitOffset;
@@ -617,6 +536,7 @@ private:
// LF_VTSHAPE
class VFTableShapeRecord : public TypeRecord {
public:
+ explicit VFTableShapeRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
explicit VFTableShapeRecord(ArrayRef<VFTableSlotKind> Slots)
: TypeRecord(TypeRecordKind::VFTableShape), SlotsRef(Slots) {}
explicit VFTableShapeRecord(std::vector<VFTableSlotKind> Slots)
@@ -626,26 +546,13 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<VFTableShapeRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
ArrayRef<VFTableSlotKind> getSlots() const {
if (!SlotsRef.empty())
return SlotsRef;
return Slots;
}
- uint32_t getEntryCount() const { return getSlots().size(); }
-
-private:
- struct Layout {
- // Number of vftable entries. Each method may have more than one entry due
- // to
- // things like covariant return types.
- ulittle16_t VFEntryCount;
- // Descriptors[]: 4-bit virtual method descriptors of type CV_VTS_desc_e.
- };
-private:
+ uint32_t getEntryCount() const { return getSlots().size(); }
ArrayRef<VFTableSlotKind> SlotsRef;
std::vector<VFTableSlotKind> Slots;
};
@@ -653,6 +560,7 @@ private:
// LF_TYPESERVER2
class TypeServer2Record : public TypeRecord {
public:
+ explicit TypeServer2Record(TypeRecordKind Kind) : TypeRecord(Kind) {}
TypeServer2Record(StringRef Guid, uint32_t Age, StringRef Name)
: TypeRecord(TypeRecordKind::TypeServer2), Guid(Guid), Age(Age),
Name(Name) {}
@@ -661,22 +569,12 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<TypeServer2Record> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
StringRef getGuid() const { return Guid; }
uint32_t getAge() const { return Age; }
StringRef getName() const { return Name; }
-private:
- struct Layout {
- char Guid[16]; // GUID
- ulittle32_t Age;
- // Name: Name of the PDB as a null-terminated string
- };
-
StringRef Guid;
uint32_t Age;
StringRef Name;
@@ -685,6 +583,7 @@ private:
// LF_STRING_ID
class StringIdRecord : public TypeRecord {
public:
+ explicit StringIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
StringIdRecord(TypeIndex Id, StringRef String)
: TypeRecord(TypeRecordKind::StringId), Id(Id), String(String) {}
@@ -692,19 +591,9 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<StringIdRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getId() const { return Id; }
StringRef getString() const { return String; }
-
-private:
- struct Layout {
- TypeIndex id;
- // Name: Name of the PDB as a null-terminated string
- };
-
TypeIndex Id;
StringRef String;
};
@@ -712,6 +601,7 @@ private:
// LF_FUNC_ID
class FuncIdRecord : public TypeRecord {
public:
+ explicit FuncIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
FuncIdRecord(TypeIndex ParentScope, TypeIndex FunctionType, StringRef Name)
: TypeRecord(TypeRecordKind::FuncId), ParentScope(ParentScope),
FunctionType(FunctionType), Name(Name) {}
@@ -720,22 +610,12 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<FuncIdRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getParentScope() const { return ParentScope; }
TypeIndex getFunctionType() const { return FunctionType; }
StringRef getName() const { return Name; }
-private:
- struct Layout {
- TypeIndex ParentScope;
- TypeIndex FunctionType;
- // Name: The null-terminated name follows.
- };
-
TypeIndex ParentScope;
TypeIndex FunctionType;
StringRef Name;
@@ -744,6 +624,7 @@ private:
// LF_UDT_SRC_LINE
class UdtSourceLineRecord : public TypeRecord {
public:
+ explicit UdtSourceLineRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
UdtSourceLineRecord(TypeIndex UDT, TypeIndex SourceFile, uint32_t LineNumber)
: TypeRecord(TypeRecordKind::UdtSourceLine), UDT(UDT),
SourceFile(SourceFile), LineNumber(LineNumber) {}
@@ -752,20 +633,10 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<UdtSourceLineRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getUDT() const { return UDT; }
TypeIndex getSourceFile() const { return SourceFile; }
uint32_t getLineNumber() const { return LineNumber; }
-private:
- struct Layout {
- TypeIndex UDT; // The user-defined type
- TypeIndex SourceFile; // StringID containing the source filename
- ulittle32_t LineNumber;
- };
-
TypeIndex UDT;
TypeIndex SourceFile;
uint32_t LineNumber;
@@ -774,6 +645,7 @@ private:
// LF_UDT_MOD_SRC_LINE
class UdtModSourceLineRecord : public TypeRecord {
public:
+ explicit UdtModSourceLineRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
UdtModSourceLineRecord(TypeIndex UDT, TypeIndex SourceFile,
uint32_t LineNumber, uint16_t Module)
: TypeRecord(TypeRecordKind::UdtSourceLine), UDT(UDT),
@@ -781,28 +653,11 @@ public:
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<UdtModSourceLineRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data) {
- const Layout *L = nullptr;
- CV_DESERIALIZE(Data, L);
-
- return UdtModSourceLineRecord(L->UDT, L->SourceFile, L->LineNumber,
- L->Module);
- }
-
TypeIndex getUDT() const { return UDT; }
TypeIndex getSourceFile() const { return SourceFile; }
uint32_t getLineNumber() const { return LineNumber; }
uint16_t getModule() const { return Module; }
-private:
- struct Layout {
- TypeIndex UDT; // The user-defined type
- TypeIndex SourceFile; // StringID containing the source filename
- ulittle32_t LineNumber;
- ulittle16_t Module; // Module that contributes this UDT definition
- };
-
TypeIndex UDT;
TypeIndex SourceFile;
uint32_t LineNumber;
@@ -812,6 +667,7 @@ private:
// LF_BUILDINFO
class BuildInfoRecord : public TypeRecord {
public:
+ explicit BuildInfoRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
BuildInfoRecord(ArrayRef<TypeIndex> ArgIndices)
: TypeRecord(TypeRecordKind::BuildInfo),
ArgIndices(ArgIndices.begin(), ArgIndices.end()) {}
@@ -820,111 +676,73 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<BuildInfoRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
ArrayRef<TypeIndex> getArgs() const { return ArgIndices; }
-
-private:
- struct Layout {
- ulittle16_t NumArgs; // Number of arguments
- // ArgTypes[]: Type indicies of arguments
- };
SmallVector<TypeIndex, 4> ArgIndices;
};
// LF_VFTABLE
class VFTableRecord : public TypeRecord {
public:
+ explicit VFTableRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
VFTableRecord(TypeIndex CompleteClass, TypeIndex OverriddenVFTable,
uint32_t VFPtrOffset, StringRef Name,
ArrayRef<StringRef> Methods)
- : TypeRecord(TypeRecordKind::VFTable),
- CompleteClass(CompleteClass), OverriddenVFTable(OverriddenVFTable),
- VFPtrOffset(VFPtrOffset), Name(Name), MethodNamesRef(Methods) {}
- VFTableRecord(TypeIndex CompleteClass, TypeIndex OverriddenVFTable,
- uint32_t VFPtrOffset, StringRef Name,
- const std::vector<StringRef> &Methods)
- : TypeRecord(TypeRecordKind::VFTable),
- CompleteClass(CompleteClass), OverriddenVFTable(OverriddenVFTable),
- VFPtrOffset(VFPtrOffset), Name(Name), MethodNames(Methods) {}
+ : TypeRecord(TypeRecordKind::VFTable), CompleteClass(CompleteClass),
+ OverriddenVFTable(OverriddenVFTable), VFPtrOffset(VFPtrOffset) {
+ MethodNames.push_back(Name);
+ MethodNames.insert(MethodNames.end(), Methods.begin(), Methods.end());
+ }
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<VFTableRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getCompleteClass() const { return CompleteClass; }
TypeIndex getOverriddenVTable() const { return OverriddenVFTable; }
uint32_t getVFPtrOffset() const { return VFPtrOffset; }
- StringRef getName() const { return Name; }
+ StringRef getName() const { return makeArrayRef(MethodNames).front(); }
ArrayRef<StringRef> getMethodNames() const {
- if (!MethodNamesRef.empty())
- return MethodNamesRef;
- return MethodNames;
+ return makeArrayRef(MethodNames).drop_front();
}
-private:
- struct Layout {
- TypeIndex CompleteClass; // Class that owns this vftable.
- TypeIndex OverriddenVFTable; // VFTable that this overrides.
- ulittle32_t VFPtrOffset; // VFPtr offset in CompleteClass
- ulittle32_t NamesLen; // Length of subsequent names array in bytes.
- // Names: A sequence of null-terminated strings. First string is vftable
- // names.
- };
-
TypeIndex CompleteClass;
TypeIndex OverriddenVFTable;
- ulittle32_t VFPtrOffset;
- StringRef Name;
- ArrayRef<StringRef> MethodNamesRef;
+ uint32_t VFPtrOffset;
std::vector<StringRef> MethodNames;
};
// LF_ONEMETHOD
class OneMethodRecord : public TypeRecord {
public:
- OneMethodRecord(TypeIndex Type, MethodKind Kind, MethodOptions Options,
- MemberAccess Access, int32_t VFTableOffset, StringRef Name)
- : TypeRecord(TypeRecordKind::OneMethod), Type(Type), Kind(Kind),
- Options(Options), Access(Access), VFTableOffset(VFTableOffset),
- Name(Name) {}
+ OneMethodRecord() : TypeRecord(TypeRecordKind::OneMethod) {}
+ explicit OneMethodRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ OneMethodRecord(TypeIndex Type, MemberAttributes Attrs, int32_t VFTableOffset,
+ StringRef Name)
+ : TypeRecord(TypeRecordKind::OneMethod), Type(Type), Attrs(Attrs),
+ VFTableOffset(VFTableOffset), Name(Name) {}
+ OneMethodRecord(TypeIndex Type, MemberAccess Access, MethodKind MK,
+ MethodOptions Options, int32_t VFTableOffset, StringRef Name)
+ : TypeRecord(TypeRecordKind::OneMethod), Type(Type),
+ Attrs(Access, MK, Options), VFTableOffset(VFTableOffset), Name(Name) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<OneMethodRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getType() const { return Type; }
- MethodKind getKind() const { return Kind; }
- MethodOptions getOptions() const { return Options; }
- MemberAccess getAccess() const { return Access; }
+ MethodKind getMethodKind() const { return Attrs.getMethodKind(); }
+ MethodOptions getOptions() const { return Attrs.getFlags(); }
+ MemberAccess getAccess() const { return Attrs.getAccess(); }
int32_t getVFTableOffset() const { return VFTableOffset; }
StringRef getName() const { return Name; }
bool isIntroducingVirtual() const {
- return Kind == MethodKind::IntroducingVirtual ||
- Kind == MethodKind::PureIntroducingVirtual;
+ return getMethodKind() == MethodKind::IntroducingVirtual ||
+ getMethodKind() == MethodKind::PureIntroducingVirtual;
}
-private:
- struct Layout {
- MemberAttributes Attrs;
- TypeIndex Type;
- // If is introduced virtual method:
- // VFTableOffset: int32_t offset in vftable
- // Name: Null-terminated string
- };
-
TypeIndex Type;
- MethodKind Kind;
- MethodOptions Options;
- MemberAccess Access;
+ MemberAttributes Attrs;
int32_t VFTableOffset;
StringRef Name;
};
@@ -932,6 +750,7 @@ private:
// LF_METHODLIST
class MethodOverloadListRecord : public TypeRecord {
public:
+ explicit MethodOverloadListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
MethodOverloadListRecord(ArrayRef<OneMethodRecord> Methods)
: TypeRecord(TypeRecordKind::MethodOverloadList), Methods(Methods) {}
@@ -939,27 +758,14 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<MethodOverloadListRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
ArrayRef<OneMethodRecord> getMethods() const { return Methods; }
-
-private:
- struct Layout {
- MemberAttributes Attrs;
- ulittle16_t Padding;
-
- TypeIndex Type;
- // If is introduced virtual method:
- // VFTableOffset: int32_t offset in vftable
- };
-
std::vector<OneMethodRecord> Methods;
};
/// For method overload sets. LF_METHOD
class OverloadedMethodRecord : public TypeRecord {
public:
+ explicit OverloadedMethodRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
OverloadedMethodRecord(uint16_t NumOverloads, TypeIndex MethodList,
StringRef Name)
: TypeRecord(TypeRecordKind::OverloadedMethod),
@@ -969,20 +775,9 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<OverloadedMethodRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
uint16_t getNumOverloads() const { return NumOverloads; }
TypeIndex getMethodList() const { return MethodList; }
StringRef getName() const { return Name; }
-
-private:
- struct Layout {
- ulittle16_t MethodCount; // Size of overload set
- TypeIndex MethList; // Type index of methods in overload set
- // Name: Null-terminated string
- };
-
uint16_t NumOverloads;
TypeIndex MethodList;
StringRef Name;
@@ -991,32 +786,26 @@ private:
// LF_MEMBER
class DataMemberRecord : public TypeRecord {
public:
+ explicit DataMemberRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ DataMemberRecord(MemberAttributes Attrs, TypeIndex Type, uint64_t Offset,
+ StringRef Name)
+ : TypeRecord(TypeRecordKind::DataMember), Attrs(Attrs), Type(Type),
+ FieldOffset(Offset), Name(Name) {}
DataMemberRecord(MemberAccess Access, TypeIndex Type, uint64_t Offset,
StringRef Name)
- : TypeRecord(TypeRecordKind::DataMember), Access(Access), Type(Type),
+ : TypeRecord(TypeRecordKind::DataMember), Attrs(Access), Type(Type),
FieldOffset(Offset), Name(Name) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<DataMemberRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
- MemberAccess getAccess() const { return Access; }
+ MemberAccess getAccess() const { return Attrs.getAccess(); }
TypeIndex getType() const { return Type; }
uint64_t getFieldOffset() const { return FieldOffset; }
StringRef getName() const { return Name; }
-private:
- struct Layout {
- MemberAttributes Attrs; // Access control attributes, etc
- TypeIndex Type;
- // FieldOffset: LF_NUMERIC encoded byte offset
- // Name: Null-terminated string
- };
-
- MemberAccess Access;
+ MemberAttributes Attrs;
TypeIndex Type;
uint64_t FieldOffset;
StringRef Name;
@@ -1025,29 +814,23 @@ private:
// LF_STMEMBER
class StaticDataMemberRecord : public TypeRecord {
public:
+ explicit StaticDataMemberRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ StaticDataMemberRecord(MemberAttributes Attrs, TypeIndex Type, StringRef Name)
+ : TypeRecord(TypeRecordKind::StaticDataMember), Attrs(Attrs), Type(Type),
+ Name(Name) {}
StaticDataMemberRecord(MemberAccess Access, TypeIndex Type, StringRef Name)
- : TypeRecord(TypeRecordKind::StaticDataMember), Access(Access),
- Type(Type), Name(Name) {}
+ : TypeRecord(TypeRecordKind::StaticDataMember), Attrs(Access), Type(Type),
+ Name(Name) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<StaticDataMemberRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
- MemberAccess getAccess() const { return Access; }
+ MemberAccess getAccess() const { return Attrs.getAccess(); }
TypeIndex getType() const { return Type; }
StringRef getName() const { return Name; }
-private:
- struct Layout {
- MemberAttributes Attrs; // Access control attributes, etc
- TypeIndex Type;
- // Name: Null-terminated string
- };
-
- MemberAccess Access;
+ MemberAttributes Attrs;
TypeIndex Type;
StringRef Name;
};
@@ -1055,29 +838,23 @@ private:
// LF_ENUMERATE
class EnumeratorRecord : public TypeRecord {
public:
+ explicit EnumeratorRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ EnumeratorRecord(MemberAttributes Attrs, APSInt Value, StringRef Name)
+ : TypeRecord(TypeRecordKind::Enumerator), Attrs(Attrs),
+ Value(std::move(Value)), Name(Name) {}
EnumeratorRecord(MemberAccess Access, APSInt Value, StringRef Name)
- : TypeRecord(TypeRecordKind::Enumerator), Access(Access),
+ : TypeRecord(TypeRecordKind::Enumerator), Attrs(Access),
Value(std::move(Value)), Name(Name) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<EnumeratorRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
- MemberAccess getAccess() const { return Access; }
+ MemberAccess getAccess() const { return Attrs.getAccess(); }
APSInt getValue() const { return Value; }
StringRef getName() const { return Name; }
-private:
- struct Layout {
- MemberAttributes Attrs; // Access control attributes, etc
- // EnumValue: LF_NUMERIC encoded enumerator value
- // Name: Null-terminated string
- };
-
- MemberAccess Access;
+ MemberAttributes Attrs;
APSInt Value;
StringRef Name;
};
@@ -1085,6 +862,7 @@ private:
// LF_VFUNCTAB
class VFPtrRecord : public TypeRecord {
public:
+ explicit VFPtrRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
VFPtrRecord(TypeIndex Type)
: TypeRecord(TypeRecordKind::VFPtr), Type(Type) {}
@@ -1092,44 +870,31 @@ public:
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<VFPtrRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
TypeIndex getType() const { return Type; }
-private:
- struct Layout {
- ulittle16_t Pad0;
- TypeIndex Type; // Type of vfptr
- };
TypeIndex Type;
};
// LF_BCLASS, LF_BINTERFACE
class BaseClassRecord : public TypeRecord {
public:
+ explicit BaseClassRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ BaseClassRecord(MemberAttributes Attrs, TypeIndex Type, uint64_t Offset)
+ : TypeRecord(TypeRecordKind::BaseClass), Attrs(Attrs), Type(Type),
+ Offset(Offset) {}
BaseClassRecord(MemberAccess Access, TypeIndex Type, uint64_t Offset)
- : TypeRecord(TypeRecordKind::BaseClass), Access(Access), Type(Type),
+ : TypeRecord(TypeRecordKind::BaseClass), Attrs(Access), Type(Type),
Offset(Offset) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<BaseClassRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
- MemberAccess getAccess() const { return Access; }
+ MemberAccess getAccess() const { return Attrs.getAccess(); }
TypeIndex getBaseType() const { return Type; }
uint64_t getBaseOffset() const { return Offset; }
-private:
- struct Layout {
- MemberAttributes Attrs; // Access control attributes, etc
- TypeIndex BaseType; // Base class type
- // BaseOffset: LF_NUMERIC encoded byte offset of base from derived.
- };
- MemberAccess Access;
+ MemberAttributes Attrs;
TypeIndex Type;
uint64_t Offset;
};
@@ -1137,34 +902,29 @@ private:
// LF_VBCLASS, LF_IVBCLASS
class VirtualBaseClassRecord : public TypeRecord {
public:
- VirtualBaseClassRecord(MemberAccess Access, TypeIndex BaseType,
- TypeIndex VBPtrType, uint64_t Offset, uint64_t Index)
- : TypeRecord(TypeRecordKind::VirtualBaseClass), Access(Access),
- BaseType(BaseType), VBPtrType(VBPtrType), VBPtrOffset(Offset),
- VTableIndex(Index) {}
+ explicit VirtualBaseClassRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
+ VirtualBaseClassRecord(TypeRecordKind Kind, MemberAttributes Attrs,
+ TypeIndex BaseType, TypeIndex VBPtrType,
+ uint64_t Offset, uint64_t Index)
+ : TypeRecord(Kind), Attrs(Attrs), BaseType(BaseType),
+ VBPtrType(VBPtrType), VBPtrOffset(Offset), VTableIndex(Index) {}
+ VirtualBaseClassRecord(TypeRecordKind Kind, MemberAccess Access,
+ TypeIndex BaseType, TypeIndex VBPtrType,
+ uint64_t Offset, uint64_t Index)
+ : TypeRecord(Kind), Attrs(Access), BaseType(BaseType),
+ VBPtrType(VBPtrType), VBPtrOffset(Offset), VTableIndex(Index) {}
/// Rewrite member type indices with IndexMap. Returns false if a type index
/// is not in the map.
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<VirtualBaseClassRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
- MemberAccess getAccess() const { return Access; }
+ MemberAccess getAccess() const { return Attrs.getAccess(); }
TypeIndex getBaseType() const { return BaseType; }
TypeIndex getVBPtrType() const { return VBPtrType; }
uint64_t getVBPtrOffset() const { return VBPtrOffset; }
uint64_t getVTableIndex() const { return VTableIndex; }
-private:
- struct Layout {
- MemberAttributes Attrs; // Access control attributes, etc.
- TypeIndex BaseType; // Base class type
- TypeIndex VBPtrType; // Virtual base pointer type
- // VBPtrOffset: Offset of vbptr from vfptr encoded as LF_NUMERIC.
- // VBTableIndex: Index of vbase within vbtable encoded as LF_NUMERIC.
- };
- MemberAccess Access;
+ MemberAttributes Attrs;
TypeIndex BaseType;
TypeIndex VBPtrType;
uint64_t VBPtrOffset;
@@ -1175,6 +935,7 @@ private:
/// together. The first will end in an LF_INDEX record that points to the next.
class ListContinuationRecord : public TypeRecord {
public:
+ explicit ListContinuationRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
ListContinuationRecord(TypeIndex ContinuationIndex)
: TypeRecord(TypeRecordKind::ListContinuation),
ContinuationIndex(ContinuationIndex) {}
@@ -1183,20 +944,11 @@ public:
bool remapTypeIndices(ArrayRef<TypeIndex> IndexMap);
- static ErrorOr<ListContinuationRecord> deserialize(TypeRecordKind Kind,
- ArrayRef<uint8_t> &Data);
-
-private:
- struct Layout {
- ulittle16_t Pad0;
- TypeIndex ContinuationIndex;
- };
TypeIndex ContinuationIndex;
};
-typedef CVRecord<TypeLeafKind> CVType;
-typedef VarStreamArray<CVType> CVTypeArray;
-}
-}
+} // end namespace codeview
+
+} // end namespace llvm
-#endif
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
diff --git a/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h b/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
index eb7993baab89..5a6507ee7f5b 100644
--- a/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
+++ b/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
@@ -47,6 +47,12 @@ public:
llvm::StringRef str();
uint64_t size() const { return Stream.tell(); }
+ TypeRecordKind kind() const { return Kind; }
+
+ /// Returns the number of bytes remaining before this record is larger than
+ /// the maximum record length. Accounts for the extra two byte size field in
+ /// the header.
+ size_t maxBytesRemaining() const { return MaxRecordLength - size() - 2; }
void truncate(uint64_t Size) {
// This works because raw_svector_ostream is not buffered.
@@ -56,10 +62,12 @@ public:
void reset(TypeRecordKind K) {
Buffer.clear();
+ Kind = K;
writeTypeRecordKind(K);
}
private:
+ TypeRecordKind Kind;
llvm::SmallVector<char, 256> Buffer;
llvm::raw_svector_ostream Stream;
llvm::support::endian::Writer<llvm::support::endianness::little> Writer;
diff --git a/include/llvm/DebugInfo/CodeView/TypeRecordMapping.h b/include/llvm/DebugInfo/CodeView/TypeRecordMapping.h
new file mode 100644
index 000000000000..fe470a72abbb
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/TypeRecordMapping.h
@@ -0,0 +1,52 @@
+//===- TypeRecordMapping.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDMAPPING_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDMAPPING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace msf {
+class StreamReader;
+class StreamWriter;
+}
+namespace codeview {
+class TypeRecordMapping : public TypeVisitorCallbacks {
+public:
+ explicit TypeRecordMapping(msf::StreamReader &Reader) : IO(Reader) {}
+ explicit TypeRecordMapping(msf::StreamWriter &Writer) : IO(Writer) {}
+
+ Error visitTypeBegin(CVType &Record) override;
+ Error visitTypeEnd(CVType &Record) override;
+
+ Error visitMemberBegin(CVMemberRecord &Record) override;
+ Error visitMemberEnd(CVMemberRecord &Record) override;
+
+#define TYPE_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
+#define MEMBER_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "TypeRecords.def"
+
+private:
+ Optional<TypeLeafKind> TypeKind;
+ Optional<TypeLeafKind> MemberKind;
+
+ CodeViewRecordIO IO;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/TypeRecords.def b/include/llvm/DebugInfo/CodeView/TypeRecords.def
index 0959f4bf19c7..c98dbac21a7a 100644
--- a/include/llvm/DebugInfo/CodeView/TypeRecords.def
+++ b/include/llvm/DebugInfo/CodeView/TypeRecords.def
@@ -43,6 +43,8 @@ TYPE_RECORD(LF_PROCEDURE, 0x1008, Procedure)
TYPE_RECORD(LF_MFUNCTION, 0x1009, MemberFunction)
TYPE_RECORD(LF_ARGLIST, 0x1201, ArgList)
+TYPE_RECORD(LF_FIELDLIST, 0x1203, FieldList)
+
TYPE_RECORD(LF_ARRAY, 0x1503, Array)
TYPE_RECORD(LF_CLASS, 0x1504, Class)
TYPE_RECORD_ALIAS(LF_STRUCTURE, 0x1505, Struct, Class)
@@ -159,7 +161,6 @@ CV_TYPE(LF_OEM2, 0x1011)
CV_TYPE(LF_SKIP, 0x1200)
CV_TYPE(LF_DEFARG_ST, 0x1202)
-CV_TYPE(LF_FIELDLIST, 0x1203)
CV_TYPE(LF_DERIVED, 0x1204)
CV_TYPE(LF_DIMCONU, 0x1207)
CV_TYPE(LF_DIMCONLU, 0x1208)
diff --git a/include/llvm/DebugInfo/CodeView/TypeSerializer.h b/include/llvm/DebugInfo/CodeView/TypeSerializer.h
new file mode 100644
index 000000000000..e05922194638
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/TypeSerializer.h
@@ -0,0 +1,140 @@
+//===- TypeSerializer.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESERIALIZER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPESERIALIZER_H
+
+#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/DebugInfo/MSF/ByteStream.h"
+#include "llvm/DebugInfo/MSF/StreamWriter.h"
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+namespace codeview {
+
+class TypeSerializer : public TypeVisitorCallbacks {
+ struct SubRecord {
+ SubRecord(TypeLeafKind K, uint32_t S) : Kind(K), Size(S) {}
+
+ TypeLeafKind Kind;
+ uint32_t Size = 0;
+ };
+ struct RecordSegment {
+ SmallVector<SubRecord, 16> SubRecords;
+
+ uint32_t length() const {
+ uint32_t L = sizeof(RecordPrefix);
+ for (const auto &R : SubRecords) {
+ L += R.Size;
+ }
+ return L;
+ }
+ };
+
+ typedef SmallVector<MutableArrayRef<uint8_t>, 2> RecordList;
+
+ static constexpr uint8_t ContinuationLength = 8;
+ BumpPtrAllocator &RecordStorage;
+ RecordSegment CurrentSegment;
+ RecordList FieldListSegments;
+
+ TypeIndex LastTypeIndex;
+ Optional<TypeLeafKind> TypeKind;
+ Optional<TypeLeafKind> MemberKind;
+ std::vector<uint8_t> RecordBuffer;
+ msf::MutableByteStream Stream;
+ msf::StreamWriter Writer;
+ TypeRecordMapping Mapping;
+
+ RecordList SeenRecords;
+ StringMap<TypeIndex> HashedRecords;
+
+ bool isInFieldList() const;
+ TypeIndex calcNextTypeIndex() const;
+ TypeIndex incrementTypeIndex();
+ MutableArrayRef<uint8_t> getCurrentSubRecordData();
+ MutableArrayRef<uint8_t> getCurrentRecordData();
+ Error writeRecordPrefix(TypeLeafKind Kind);
+ TypeIndex insertRecordBytesPrivate(MutableArrayRef<uint8_t> Record);
+
+ Expected<MutableArrayRef<uint8_t>>
+ addPadding(MutableArrayRef<uint8_t> Record);
+
+public:
+ explicit TypeSerializer(BumpPtrAllocator &Storage);
+
+ ArrayRef<MutableArrayRef<uint8_t>> records() const;
+ TypeIndex getLastTypeIndex() const;
+ TypeIndex insertRecordBytes(MutableArrayRef<uint8_t> Record);
+ Expected<TypeIndex> visitTypeEndGetIndex(CVType &Record);
+
+ Error visitTypeBegin(CVType &Record) override;
+ Error visitTypeEnd(CVType &Record) override;
+ Error visitMemberBegin(CVMemberRecord &Record) override;
+ Error visitMemberEnd(CVMemberRecord &Record) override;
+
+#define TYPE_RECORD(EnumName, EnumVal, Name) \
+ virtual Error visitKnownRecord(CVType &CVR, Name##Record &Record) override { \
+ return visitKnownRecordImpl(CVR, Record); \
+ }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override { \
+ return visitKnownMemberImpl<Name##Record>(CVR, Record); \
+ }
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/TypeRecords.def"
+
+private:
+ template <typename RecordKind>
+ Error visitKnownRecordImpl(CVType &CVR, RecordKind &Record) {
+ return Mapping.visitKnownRecord(CVR, Record);
+ }
+
+ template <typename RecordType>
+ Error visitKnownMemberImpl(CVMemberRecord &CVR, RecordType &Record) {
+ assert(CVR.Kind == static_cast<TypeLeafKind>(Record.getKind()));
+
+ if (auto EC = Writer.writeEnum(CVR.Kind))
+ return EC;
+
+ if (auto EC = Mapping.visitKnownMember(CVR, Record))
+ return EC;
+
+ // Get all the data that was just written and is yet to be committed to
+ // the current segment. Then pad it to 4 bytes.
+ MutableArrayRef<uint8_t> ThisRecord = getCurrentSubRecordData();
+ auto ExpectedRecord = addPadding(ThisRecord);
+ if (!ExpectedRecord)
+ return ExpectedRecord.takeError();
+ ThisRecord = *ExpectedRecord;
+
+ CurrentSegment.SubRecords.emplace_back(CVR.Kind, ThisRecord.size());
+ CVR.Data = ThisRecord;
+
+ // Both the last subrecord and the total length of this segment should be
+ // multiples of 4.
+ assert(ThisRecord.size() % 4 == 0);
+ assert(CurrentSegment.length() % 4 == 0);
+
+ return Error::success();
+ }
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h b/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
index 5b2aa6186147..4e6d81ece318 100644
--- a/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
+++ b/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
@@ -10,61 +10,120 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeSerializer.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <type_traits>
namespace llvm {
-
-class StringRef;
-
namespace codeview {
-class FieldListRecordBuilder;
-class MethodListRecordBuilder;
-class TypeRecordBuilder;
-
class TypeTableBuilder {
private:
+ TypeIndex handleError(Error EC) const {
+ assert(false && "Couldn't write Type!");
+ consumeError(std::move(EC));
+ return TypeIndex();
+ }
+
+ BumpPtrAllocator &Allocator;
+ TypeSerializer Serializer;
+
+public:
+ explicit TypeTableBuilder(BumpPtrAllocator &Allocator)
+ : Allocator(Allocator), Serializer(Allocator) {}
TypeTableBuilder(const TypeTableBuilder &) = delete;
TypeTableBuilder &operator=(const TypeTableBuilder &) = delete;
-protected:
- TypeTableBuilder();
+ bool empty() const { return Serializer.records().empty(); }
-public:
- virtual ~TypeTableBuilder();
+ BumpPtrAllocator &getAllocator() const { return Allocator; }
+
+ template <typename T> TypeIndex writeKnownType(T &Record) {
+ static_assert(!std::is_same<T, FieldListRecord>::value,
+ "Can't serialize FieldList!");
+
+ CVType Type;
+ Type.Type = static_cast<TypeLeafKind>(Record.getKind());
+ if (auto EC = Serializer.visitTypeBegin(Type))
+ return handleError(std::move(EC));
+ if (auto EC = Serializer.visitKnownRecord(Type, Record))
+ return handleError(std::move(EC));
+
+ auto ExpectedIndex = Serializer.visitTypeEndGetIndex(Type);
+ if (!ExpectedIndex)
+ return handleError(ExpectedIndex.takeError());
+
+ return *ExpectedIndex;
+ }
+
+ TypeIndex writeSerializedRecord(MutableArrayRef<uint8_t> Record) {
+ return Serializer.insertRecordBytes(Record);
+ }
+
+ template <typename TFunc> void ForEachRecord(TFunc Func) {
+ uint32_t Index = TypeIndex::FirstNonSimpleIndex;
+
+ for (auto Record : Serializer.records()) {
+ Func(TypeIndex(Index), Record);
+ ++Index;
+ }
+ }
+
+ ArrayRef<MutableArrayRef<uint8_t>> records() const {
+ return Serializer.records();
+ }
+};
+
+class FieldListRecordBuilder {
+ TypeTableBuilder &TypeTable;
+ TypeSerializer TempSerializer;
+ CVType Type;
public:
- TypeIndex writeModifier(const ModifierRecord &Record);
- TypeIndex writeProcedure(const ProcedureRecord &Record);
- TypeIndex writeMemberFunction(const MemberFunctionRecord &Record);
- TypeIndex writeArgList(const ArgListRecord &Record);
- TypeIndex writePointer(const PointerRecord &Record);
- TypeIndex writeArray(const ArrayRecord &Record);
- TypeIndex writeClass(const ClassRecord &Record);
- TypeIndex writeUnion(const UnionRecord &Record);
- TypeIndex writeEnum(const EnumRecord &Record);
- TypeIndex writeBitField(const BitFieldRecord &Record);
- TypeIndex writeVFTableShape(const VFTableShapeRecord &Record);
- TypeIndex writeStringId(const StringIdRecord &Record);
- TypeIndex writeVFTable(const VFTableRecord &Record);
- TypeIndex writeUdtSourceLine(const UdtSourceLineRecord &Record);
- TypeIndex writeUdtModSourceLine(const UdtModSourceLineRecord &Record);
- TypeIndex writeFuncId(const FuncIdRecord &Record);
- TypeIndex writeMemberFuncId(const MemberFuncIdRecord &Record);
- TypeIndex writeBuildInfo(const BuildInfoRecord &Record);
- TypeIndex writeMethodOverloadList(const MethodOverloadListRecord &Record);
- TypeIndex writeTypeServer2(const TypeServer2Record &Record);
-
- TypeIndex writeFieldList(FieldListRecordBuilder &FieldList);
-
- TypeIndex writeRecord(TypeRecordBuilder &builder);
-
- virtual TypeIndex writeRecord(llvm::StringRef record) = 0;
+ explicit FieldListRecordBuilder(TypeTableBuilder &TypeTable)
+ : TypeTable(TypeTable), TempSerializer(TypeTable.getAllocator()) {
+ Type.Type = TypeLeafKind::LF_FIELDLIST;
+ }
+
+ void begin() {
+ if (auto EC = TempSerializer.visitTypeBegin(Type))
+ consumeError(std::move(EC));
+ }
+
+ template <typename T> void writeMemberType(T &Record) {
+ CVMemberRecord CVMR;
+ CVMR.Kind = static_cast<TypeLeafKind>(Record.getKind());
+ if (auto EC = TempSerializer.visitMemberBegin(CVMR))
+ consumeError(std::move(EC));
+ if (auto EC = TempSerializer.visitKnownMember(CVMR, Record))
+ consumeError(std::move(EC));
+ if (auto EC = TempSerializer.visitMemberEnd(CVMR))
+ consumeError(std::move(EC));
+ }
+
+ TypeIndex end() {
+ if (auto EC = TempSerializer.visitTypeEnd(Type)) {
+ consumeError(std::move(EC));
+ return TypeIndex();
+ }
+
+ TypeIndex Index;
+ for (auto Record : TempSerializer.records()) {
+ Index = TypeTable.writeSerializedRecord(Record);
+ }
+ return Index;
+ }
};
-}
-}
-#endif
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
diff --git a/include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h b/include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h
new file mode 100644
index 000000000000..f25129691041
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h
@@ -0,0 +1,114 @@
+//===- TypeVisitorCallbackPipeline.h ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class TypeVisitorCallbackPipeline : public TypeVisitorCallbacks {
+public:
+ TypeVisitorCallbackPipeline() = default;
+
+ Error visitUnknownType(CVRecord<TypeLeafKind> &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitUnknownType(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitUnknownMember(CVMemberRecord &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitUnknownMember(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitTypeBegin(CVType &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitTypeBegin(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitTypeEnd(CVType &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitTypeEnd(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitMemberBegin(CVMemberRecord &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitMemberBegin(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ Error visitMemberEnd(CVMemberRecord &Record) override {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitMemberEnd(Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ void addCallbackToPipeline(TypeVisitorCallbacks &Callbacks) {
+ Pipeline.push_back(&Callbacks);
+ }
+
+#define TYPE_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownRecord(CVType &CVR, Name##Record &Record) override { \
+ return visitKnownRecordImpl(CVR, Record); \
+ }
+#define MEMBER_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownMember(CVMemberRecord &CVMR, Name##Record &Record) \
+ override { \
+ return visitKnownMemberImpl(CVMR, Record); \
+ }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/TypeRecords.def"
+
+private:
+ template <typename T> Error visitKnownRecordImpl(CVType &CVR, T &Record) {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitKnownRecord(CVR, Record))
+ return EC;
+ }
+ return Error::success();
+ }
+
+ template <typename T>
+ Error visitKnownMemberImpl(CVMemberRecord &CVMR, T &Record) {
+ for (auto Visitor : Pipeline) {
+ if (auto EC = Visitor->visitKnownMember(CVMR, Record))
+ return EC;
+ }
+ return Error::success();
+ }
+ std::vector<TypeVisitorCallbacks *> Pipeline;
+};
+
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
diff --git a/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h b/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h
index 310847ec5d2d..5e27df346b00 100644
--- a/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h
+++ b/include/llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h
@@ -10,54 +10,53 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Error.h"
namespace llvm {
namespace codeview {
+
class TypeVisitorCallbacks {
friend class CVTypeVisitor;
public:
- virtual ~TypeVisitorCallbacks() {}
+ virtual ~TypeVisitorCallbacks() = default;
/// Action to take on unknown types. By default, they are ignored.
- virtual Error visitUnknownType(const CVRecord<TypeLeafKind> &Record) {
- return Error::success();
- }
- virtual Error visitUnknownMember(const CVRecord<TypeLeafKind> &Record) {
- return Error::success();
- }
-
+ virtual Error visitUnknownType(CVType &Record) { return Error::success(); }
/// Paired begin/end actions for all types. Receives all record data,
- /// including the fixed-length record prefix.
- virtual Error visitTypeBegin(const CVRecord<TypeLeafKind> &Record) {
- return Error::success();
- }
- virtual Error visitTypeEnd(const CVRecord<TypeLeafKind> &Record) {
+ /// including the fixed-length record prefix. visitTypeBegin() should return
+ /// the type of the Record, or an error if it cannot be determined.
+ virtual Error visitTypeBegin(CVType &Record) { return Error::success(); }
+ virtual Error visitTypeEnd(CVType &Record) { return Error::success(); }
+
+ virtual Error visitUnknownMember(CVMemberRecord &Record) {
return Error::success();
}
- virtual Error visitFieldListBegin(const CVRecord<TypeLeafKind> &Record) {
+ virtual Error visitMemberBegin(CVMemberRecord &Record) {
return Error::success();
}
- virtual Error visitFieldListEnd(const CVRecord<TypeLeafKind> &Record) {
+ virtual Error visitMemberEnd(CVMemberRecord &Record) {
return Error::success();
}
#define TYPE_RECORD(EnumName, EnumVal, Name) \
- virtual Error visit##Name(Name##Record &Record) { return Error::success(); }
+ virtual Error visitKnownRecord(CVType &CVR, Name##Record &Record) { \
+ return Error::success(); \
+ }
#define MEMBER_RECORD(EnumName, EnumVal, Name) \
- TYPE_RECORD(EnumName, EnumVal, Name)
+ virtual Error visitKnownMember(CVMemberRecord &CVM, Name##Record &Record) { \
+ return Error::success(); \
+ }
+
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "TypeRecords.def"
};
-}
-}
-#endif
+} // end namespace codeview
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h
index 2f88371979ea..804419c517df 100644
--- a/include/llvm/DebugInfo/DIContext.h
+++ b/include/llvm/DebugInfo/DIContext.h
@@ -17,9 +17,12 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Object/ObjectFile.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
#include <string>
+#include <tuple>
+#include <utility>
namespace llvm {
@@ -32,8 +35,12 @@ struct DILineInfo {
uint32_t Line;
uint32_t Column;
+ // DWARF-specific.
+ uint32_t Discriminator;
+
DILineInfo()
- : FileName("<invalid>"), FunctionName("<invalid>"), Line(0), Column(0) {}
+ : FileName("<invalid>"), FunctionName("<invalid>"), Line(0), Column(0),
+ Discriminator(0) {}
bool operator==(const DILineInfo &RHS) const {
return Line == RHS.Line && Column == RHS.Column &&
@@ -42,6 +49,10 @@ struct DILineInfo {
bool operator!=(const DILineInfo &RHS) const {
return !(*this == RHS);
}
+ bool operator<(const DILineInfo &RHS) const {
+ return std::tie(FileName, FunctionName, Line, Column) <
+ std::tie(RHS.FileName, RHS.FunctionName, RHS.Line, RHS.Column);
+ }
};
typedef SmallVector<std::pair<uint64_t, DILineInfo>, 16> DILineInfoTable;
@@ -49,19 +60,24 @@ typedef SmallVector<std::pair<uint64_t, DILineInfo>, 16> DILineInfoTable;
/// DIInliningInfo - a format-neutral container for inlined code description.
class DIInliningInfo {
SmallVector<DILineInfo, 4> Frames;
- public:
- DIInliningInfo() {}
+
+public:
+ DIInliningInfo() = default;
+
DILineInfo getFrame(unsigned Index) const {
assert(Index < Frames.size());
return Frames[Index];
}
+
DILineInfo *getMutableFrame(unsigned Index) {
assert(Index < Frames.size());
return &Frames[Index];
}
+
uint32_t getNumberOfFrames() const {
return Frames.size();
}
+
void addFrame(const DILineInfo &Frame) {
Frames.push_back(Frame);
}
@@ -124,6 +140,7 @@ enum DIDumpType {
DIDT_AppleNamespaces,
DIDT_AppleObjC,
DIDT_CUIndex,
+ DIDT_GdbIndex,
DIDT_TUIndex,
};
@@ -133,13 +150,14 @@ public:
CK_DWARF,
CK_PDB
};
- DIContextKind getKind() const { return Kind; }
DIContext(DIContextKind K) : Kind(K) {}
- virtual ~DIContext() {}
+ virtual ~DIContext() = default;
+
+ DIContextKind getKind() const { return Kind; }
virtual void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All,
- bool DumpEH = false) = 0;
+ bool DumpEH = false, bool SummarizeTypes = false) = 0;
virtual DILineInfo getLineInfoForAddress(uint64_t Address,
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
@@ -147,6 +165,7 @@ public:
uint64_t Size, DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address,
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+
private:
const DIContextKind Kind;
};
@@ -192,6 +211,6 @@ public:
virtual std::unique_ptr<LoadedObjectInfo> clone() const = 0;
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_DEBUGINFO_DICONTEXT_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h b/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
index 6ab5d5ce6f6e..778817f57bf5 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
@@ -10,26 +10,41 @@
#ifndef LLVM_LIB_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
#define LLVM_LIB_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataExtractor.h"
-
+#include "llvm/Support/Dwarf.h"
namespace llvm {
+class DWARFUnit;
+class DWARFFormValue;
class raw_ostream;
class DWARFAbbreviationDeclaration {
public:
struct AttributeSpec {
- AttributeSpec(uint16_t Attr, uint16_t Form) : Attr(Attr), Form(Form) {}
- uint16_t Attr;
- uint16_t Form;
+ AttributeSpec(dwarf::Attribute A, dwarf::Form F, Optional<uint8_t> S)
+ : Attr(A), Form(F), ByteSize(S) {}
+ dwarf::Attribute Attr;
+ dwarf::Form Form;
+ /// If ByteSize has a value, then it contains the fixed size in bytes for
+ /// the Form in this object. If ByteSize doesn't have a value, then the
+ /// byte size of Form either varies according to the DWARFUnit that it is
+ /// contained in or the value size varies and must be decoded from the
+ /// debug information in order to determine its size.
+ Optional<uint8_t> ByteSize;
+ /// Get the fixed byte size of this Form if possible. This function might
+ /// use the DWARFUnit to calculate the size of the Form, like for
+ /// DW_AT_address and DW_AT_ref_addr, so this isn't just an accessor for
+ /// the ByteSize member.
+ Optional<uint8_t> getByteSize(const DWARFUnit &U) const;
};
typedef SmallVector<AttributeSpec, 8> AttributeSpecVector;
DWARFAbbreviationDeclaration();
uint32_t getCode() const { return Code; }
- uint32_t getTag() const { return Tag; }
+ dwarf::Tag getTag() const { return Tag; }
bool hasChildren() const { return HasChildren; }
typedef iterator_range<AttributeSpecVector::const_iterator>
@@ -39,22 +54,77 @@ public:
return attr_iterator_range(AttributeSpecs.begin(), AttributeSpecs.end());
}
- uint16_t getFormByIndex(uint32_t idx) const {
- return idx < AttributeSpecs.size() ? AttributeSpecs[idx].Form : 0;
+ dwarf::Form getFormByIndex(uint32_t idx) const {
+ if (idx < AttributeSpecs.size())
+ return AttributeSpecs[idx].Form;
+ return dwarf::Form(0);
}
- uint32_t findAttributeIndex(uint16_t attr) const;
+ /// Get the index of the specified attribute.
+ ///
+ /// Searches the this abbreviation declaration for the index of the specified
+ /// attribute.
+ ///
+ /// \param attr DWARF attribute to search for.
+ /// \returns Optional index of the attribute if found, None otherwise.
+ Optional<uint32_t> findAttributeIndex(dwarf::Attribute attr) const;
+
+ /// Extract a DWARF form value from a DIE specified by DIE offset.
+ ///
+ /// Extract an attribute value for a DWARFUnit given the DIE offset and the
+ /// attribute.
+ ///
+ /// \param DIEOffset the DIE offset that points to the ULEB128 abbreviation
+ /// code in the .debug_info data.
+ /// \param Attr DWARF attribute to search for.
+ /// \param U the DWARFUnit the contains the DIE.
+ /// \returns Optional DWARF form value if the attribute was extracted.
+ Optional<DWARFFormValue> getAttributeValue(const uint32_t DIEOffset,
+ const dwarf::Attribute Attr,
+ const DWARFUnit &U) const;
+
bool extract(DataExtractor Data, uint32_t* OffsetPtr);
void dump(raw_ostream &OS) const;
+ // Return an optional byte size of all attribute data in this abbreviation
+ // if a constant byte size can be calculated given a DWARFUnit. This allows
+ // DWARF parsing to be faster as many DWARF DIEs have a fixed byte size.
+ Optional<size_t> getFixedAttributesByteSize(const DWARFUnit &U) const;
+
private:
void clear();
+ /// A helper structure that can quickly determine the size in bytes of an
+ /// abbreviation declaration.
+ struct FixedSizeInfo {
+ /// The fixed byte size for fixed size forms.
+ uint16_t NumBytes;
+ /// Number of DW_FORM_address forms in this abbrevation declaration.
+ uint8_t NumAddrs;
+ /// Number of DW_FORM_ref_addr forms in this abbrevation declaration.
+ uint8_t NumRefAddrs;
+ /// Number of 4 byte in DWARF32 and 8 byte in DWARF64 forms.
+ uint8_t NumDwarfOffsets;
+ /// Constructor
+ FixedSizeInfo()
+ : NumBytes(0), NumAddrs(0), NumRefAddrs(0), NumDwarfOffsets(0) {}
+ /// Calculate the fixed size in bytes given a DWARFUnit.
+ ///
+ /// \param U the DWARFUnit to use when determing the byte size.
+ /// \returns the size in bytes for all attribute data in this abbreviation.
+ /// The returned size does not include bytes for the ULEB128 abbreviation
+ /// code
+ size_t getByteSize(const DWARFUnit &U) const;
+ };
+
uint32_t Code;
- uint32_t Tag;
+ dwarf::Tag Tag;
+ uint8_t CodeByteSize;
bool HasChildren;
-
AttributeSpecVector AttributeSpecs;
+ /// If this abbreviation has a fixed byte size then FixedAttributeSize member
+ /// variable below will have a value.
+ Optional<FixedSizeInfo> FixedAttributeSize;
};
}
diff --git a/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h b/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
index 47dbf5fd4f56..63343728fa99 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/Support/Dwarf.h"
#include <cstdint>
namespace llvm {
@@ -30,7 +31,7 @@ class DWARFAcceleratorTable {
struct HeaderData {
typedef uint16_t AtomType;
- typedef uint16_t Form;
+ typedef dwarf::Form Form;
uint32_t DIEOffsetBase;
SmallVector<std::pair<AtomType, Form>, 3> Atoms;
};
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index 741a31cb582b..ef310e704005 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -10,18 +10,31 @@
#ifndef LLVM_LIB_DEBUGINFO_DWARFCONTEXT_H
#define LLVM_LIB_DEBUGINFO_DWARFCONTEXT_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugAranges.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugFrame.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugLoc.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugMacro.h"
-#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/DebugInfo/DWARF/DWARFGdbIndex.h"
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
#include "llvm/DebugInfo/DWARF/DWARFTypeUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
+#include "llvm/Object/ObjectFile.h"
+#include <cstdint>
+#include <deque>
+#include <map>
+#include <memory>
+#include <utility>
namespace llvm {
@@ -30,17 +43,17 @@ namespace llvm {
// dwarf where we expect relocated values. This adds a bit of complexity to the
// dwarf parsing/extraction at the benefit of not allocating memory for the
// entire size of the debug info sections.
-typedef DenseMap<uint64_t, std::pair<uint8_t, int64_t> > RelocAddrMap;
+typedef DenseMap<uint64_t, std::pair<uint8_t, int64_t>> RelocAddrMap;
/// DWARFContext
/// This data structure is the top level entity that deals with dwarf debug
/// information parsing. The actual data is supplied through pure virtual
/// methods that a concrete implementation provides.
class DWARFContext : public DIContext {
-
DWARFUnitSection<DWARFCompileUnit> CUs;
std::deque<DWARFUnitSection<DWARFTypeUnit>> TUs;
std::unique_ptr<DWARFUnitIndex> CUIndex;
+ std::unique_ptr<DWARFGdbIndex> GdbIndex;
std::unique_ptr<DWARFUnitIndex> TUIndex;
std::unique_ptr<DWARFDebugAbbrev> Abbrev;
std::unique_ptr<DWARFDebugLoc> Loc;
@@ -55,9 +68,6 @@ class DWARFContext : public DIContext {
std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
std::unique_ptr<DWARFDebugLocDWO> LocDWO;
- DWARFContext(DWARFContext &) = delete;
- DWARFContext &operator=(DWARFContext &) = delete;
-
/// Read compile units from the debug_info section (if necessary)
/// and store them in CUs.
void parseCompileUnits();
@@ -76,13 +86,15 @@ class DWARFContext : public DIContext {
public:
DWARFContext() : DIContext(CK_DWARF) {}
+ DWARFContext(DWARFContext &) = delete;
+ DWARFContext &operator=(DWARFContext &) = delete;
static bool classof(const DIContext *DICtx) {
return DICtx->getKind() == CK_DWARF;
}
void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All,
- bool DumpEH = false) override;
+ bool DumpEH = false, bool SummarizeTypes = false) override;
typedef DWARFUnitSection<DWARFCompileUnit>::iterator_range cu_iterator_range;
typedef DWARFUnitSection<DWARFTypeUnit>::iterator_range tu_iterator_range;
@@ -149,6 +161,7 @@ public:
}
const DWARFUnitIndex &getCUIndex();
+ DWARFGdbIndex &getGdbIndex();
const DWARFUnitIndex &getTUIndex();
/// Get a pointer to the parsed DebugAbbrev object.
@@ -220,11 +233,13 @@ public:
virtual const DWARFSection& getAppleNamespacesSection() = 0;
virtual const DWARFSection& getAppleObjCSection() = 0;
virtual StringRef getCUIndexSection() = 0;
+ virtual StringRef getGdbIndexSection() = 0;
virtual StringRef getTUIndexSection() = 0;
static bool isSupportedVersion(unsigned version) {
return version == 2 || version == 3 || version == 4 || version == 5;
}
+
private:
/// Return the compile unit that includes an offset (relative to .debug_info).
DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
@@ -239,6 +254,7 @@ private:
/// pointers to it.
class DWARFContextInMemory : public DWARFContext {
virtual void anchor();
+
bool IsLittleEndian;
uint8_t AddressSize;
DWARFSection InfoSection;
@@ -272,6 +288,7 @@ class DWARFContextInMemory : public DWARFContext {
DWARFSection AppleNamespacesSection;
DWARFSection AppleObjCSection;
StringRef CUIndexSection;
+ StringRef GdbIndexSection;
StringRef TUIndexSection;
SmallVector<SmallString<32>, 4> UncompressedSections;
@@ -279,6 +296,7 @@ class DWARFContextInMemory : public DWARFContext {
public:
DWARFContextInMemory(const object::ObjectFile &Obj,
const LoadedObjectInfo *L = nullptr);
+
bool isLittleEndian() const override { return IsLittleEndian; }
uint8_t getAddressSize() const override { return AddressSize; }
const DWARFSection &getInfoSection() override { return InfoSection; }
@@ -318,9 +336,10 @@ public:
return AddrSection;
}
StringRef getCUIndexSection() override { return CUIndexSection; }
+ StringRef getGdbIndexSection() override { return GdbIndexSection; }
StringRef getTUIndexSection() override { return TUIndexSection; }
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_LIB_DEBUGINFO_DWARFCONTEXT_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h b/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
index 67c4a2bb3e67..f732deef548c 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
@@ -23,6 +23,9 @@ class DWARFAbbreviationDeclarationSet {
uint32_t FirstAbbrCode;
std::vector<DWARFAbbreviationDeclaration> Decls;
+ typedef std::vector<DWARFAbbreviationDeclaration>::const_iterator
+ const_iterator;
+
public:
DWARFAbbreviationDeclarationSet();
@@ -33,6 +36,14 @@ public:
const DWARFAbbreviationDeclaration *
getAbbreviationDeclaration(uint32_t AbbrCode) const;
+ const_iterator begin() const {
+ return Decls.begin();
+ }
+
+ const_iterator end() const {
+ return Decls.end();
+ }
+
private:
void clear();
};
@@ -53,6 +64,14 @@ public:
void dump(raw_ostream &OS) const;
void extract(DataExtractor Data);
+ DWARFAbbreviationDeclarationSetMap::const_iterator begin() const {
+ return AbbrDeclSets.begin();
+ }
+
+ DWARFAbbreviationDeclarationSetMap::const_iterator end() const {
+ return AbbrDeclSets.end();
+ }
+
private:
void clear();
};
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h b/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
index 837a8e63469e..5a602392add8 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
@@ -59,6 +59,8 @@ public:
uint32_t getCompileUnitDIEOffset() const { return HeaderData.CuOffset; }
+ const Header &getHeader() const { return HeaderData; }
+
desc_iterator_range descriptors() const {
return desc_iterator_range(ArangeDescriptors.begin(),
ArangeDescriptors.end());
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h b/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
index f29d5fe9ecde..f36f470980b1 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
@@ -15,6 +15,7 @@
#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Dwarf.h"
namespace llvm {
@@ -25,134 +26,39 @@ class DWARFContext;
class DWARFFormValue;
struct DWARFDebugInfoEntryInlinedChain;
-/// DWARFDebugInfoEntryMinimal - A DIE with only the minimum required data.
-class DWARFDebugInfoEntryMinimal {
+/// DWARFDebugInfoEntry - A DIE with only the minimum required data.
+class DWARFDebugInfoEntry {
/// Offset within the .debug_info of the start of this entry.
uint32_t Offset;
- /// How many to add to "this" to get the sibling.
- uint32_t SiblingIdx;
+ /// The integer depth of this DIE within the compile unit DIEs where the
+ /// compile/type unit DIE has a depth of zero.
+ uint32_t Depth;
const DWARFAbbreviationDeclaration *AbbrevDecl;
public:
- DWARFDebugInfoEntryMinimal()
- : Offset(0), SiblingIdx(0), AbbrevDecl(nullptr) {}
-
- void dump(raw_ostream &OS, DWARFUnit *u, unsigned recurseDepth,
- unsigned indent = 0) const;
- void dumpAttribute(raw_ostream &OS, DWARFUnit *u, uint32_t *offset_ptr,
- uint16_t attr, uint16_t form, unsigned indent = 0) const;
+ DWARFDebugInfoEntry()
+ : Offset(0), Depth(0), AbbrevDecl(nullptr) {}
/// Extracts a debug info entry, which is a child of a given unit,
/// starting at a given offset. If DIE can't be extracted, returns false and
/// doesn't change OffsetPtr.
- bool extractFast(const DWARFUnit *U, uint32_t *OffsetPtr);
-
- uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; }
- bool isNULL() const { return AbbrevDecl == nullptr; }
-
- /// Returns true if DIE represents a subprogram (not inlined).
- bool isSubprogramDIE() const;
- /// Returns true if DIE represents a subprogram or an inlined
- /// subroutine.
- bool isSubroutineDIE() const;
+ bool extractFast(const DWARFUnit &U, uint32_t *OffsetPtr);
+ /// High performance extraction should use this call.
+ bool extractFast(const DWARFUnit &U, uint32_t *OffsetPtr,
+ const DataExtractor &DebugInfoData,
+ uint32_t UEndOffset,
+ uint32_t Depth);
uint32_t getOffset() const { return Offset; }
- bool hasChildren() const { return !isNULL() && AbbrevDecl->hasChildren(); }
-
- // We know we are kept in a vector of contiguous entries, so we know
- // our sibling will be some index after "this".
- const DWARFDebugInfoEntryMinimal *getSibling() const {
- return SiblingIdx > 0 ? this + SiblingIdx : nullptr;
+ uint32_t getDepth() const { return Depth; }
+ dwarf::Tag getTag() const {
+ return AbbrevDecl ? AbbrevDecl->getTag() : dwarf::DW_TAG_null;
}
-
- // We know we are kept in a vector of contiguous entries, so we know
- // we don't need to store our child pointer, if we have a child it will
- // be the next entry in the list...
- const DWARFDebugInfoEntryMinimal *getFirstChild() const {
- return hasChildren() ? this + 1 : nullptr;
- }
-
- void setSibling(const DWARFDebugInfoEntryMinimal *Sibling) {
- if (Sibling) {
- // We know we are kept in a vector of contiguous entries, so we know
- // our sibling will be some index after "this".
- SiblingIdx = Sibling - this;
- } else
- SiblingIdx = 0;
- }
-
+ bool hasChildren() const { return AbbrevDecl && AbbrevDecl->hasChildren(); }
const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
return AbbrevDecl;
}
-
- bool getAttributeValue(const DWARFUnit *U, const uint16_t Attr,
- DWARFFormValue &FormValue) const;
-
- const char *getAttributeValueAsString(const DWARFUnit *U, const uint16_t Attr,
- const char *FailValue) const;
-
- uint64_t getAttributeValueAsAddress(const DWARFUnit *U, const uint16_t Attr,
- uint64_t FailValue) const;
-
- uint64_t getAttributeValueAsUnsignedConstant(const DWARFUnit *U,
- const uint16_t Attr,
- uint64_t FailValue) const;
-
- uint64_t getAttributeValueAsReference(const DWARFUnit *U, const uint16_t Attr,
- uint64_t FailValue) const;
-
- uint64_t getAttributeValueAsSectionOffset(const DWARFUnit *U,
- const uint16_t Attr,
- uint64_t FailValue) const;
-
- uint64_t getRangesBaseAttribute(const DWARFUnit *U, uint64_t FailValue) const;
-
- /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
- /// Returns true if both attributes are present.
- bool getLowAndHighPC(const DWARFUnit *U, uint64_t &LowPC,
- uint64_t &HighPC) const;
-
- DWARFAddressRangesVector getAddressRanges(const DWARFUnit *U) const;
-
- void collectChildrenAddressRanges(const DWARFUnit *U,
- DWARFAddressRangesVector &Ranges) const;
-
- bool addressRangeContainsAddress(const DWARFUnit *U,
- const uint64_t Address) const;
-
- /// If a DIE represents a subprogram (or inlined subroutine),
- /// returns its mangled name (or short name, if mangled is missing).
- /// This name may be fetched from specification or abstract origin
- /// for this subprogram. Returns null if no name is found.
- const char *getSubroutineName(const DWARFUnit *U, DINameKind Kind) const;
-
- /// Return the DIE name resolving DW_AT_sepcification or
- /// DW_AT_abstract_origin references if necessary.
- /// Returns null if no name is found.
- const char *getName(const DWARFUnit *U, DINameKind Kind) const;
-
- /// Retrieves values of DW_AT_call_file, DW_AT_call_line and
- /// DW_AT_call_column from DIE (or zeroes if they are missing).
- void getCallerFrame(const DWARFUnit *U, uint32_t &CallFile,
- uint32_t &CallLine, uint32_t &CallColumn) const;
-
- /// Get inlined chain for a given address, rooted at the current DIE.
- /// Returns empty chain if address is not contained in address range
- /// of current DIE.
- DWARFDebugInfoEntryInlinedChain
- getInlinedChainForAddress(const DWARFUnit *U, const uint64_t Address) const;
-};
-
-/// DWARFDebugInfoEntryInlinedChain - represents a chain of inlined_subroutine
-/// DIEs, (possibly ending with subprogram DIE), all of which are contained
-/// in some concrete inlined instance tree. Address range for each DIE
-/// (except the last DIE) in this chain is contained in address
-/// range for next DIE in the chain.
-struct DWARFDebugInfoEntryInlinedChain {
- DWARFDebugInfoEntryInlinedChain() : U(nullptr) {}
- SmallVector<DWARFDebugInfoEntryMinimal, 4> DIEs;
- const DWARFUnit *U;
};
}
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h b/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
index 760950b726b3..ca9a6c822876 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
@@ -188,6 +188,8 @@ public:
bool lookupAddressRange(uint64_t address, uint64_t size,
std::vector<uint32_t> &result) const;
+ bool hasFileAtIndex(uint64_t FileIndex) const;
+
// Extracts filename by its index in filename table in prologue.
// Returns true on success.
bool getFileNameByIndex(uint64_t FileIndex, const char *CompDir,
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h b/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
index d13c7f553ba3..5a0352dacdb9 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
@@ -12,7 +12,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataExtractor.h"
-#include "llvm/Support/Dwarf.h"
+#include <cstdint>
namespace llvm {
@@ -46,13 +46,14 @@ class DWARFDebugMacro {
MacroList Macros;
public:
- DWARFDebugMacro() {}
+ DWARFDebugMacro() = default;
+
/// Print the macro list found within the debug_macinfo section.
void dump(raw_ostream &OS) const;
/// Parse the debug_macinfo section accessible via the 'data' parameter.
void parse(DataExtractor data);
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h b/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
new file mode 100644
index 000000000000..2b23837e32d6
--- /dev/null
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugPubTable.h
@@ -0,0 +1,77 @@
+//===-- DWARFDebugPubTable.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGPUBTABLE_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGPUBTABLE_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Dwarf.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// Represents structure for holding and parsing .debug_pub* tables.
+class DWARFDebugPubTable {
+public:
+ struct Entry {
+ /// Section offset from the beginning of the compilation unit.
+ uint32_t SecOffset;
+
+ /// An entry of the various gnu_pub* debug sections.
+ llvm::dwarf::PubIndexEntryDescriptor Descriptor;
+
+ /// The name of the object as given by the DW_AT_name attribute of the
+ /// referenced DIE.
+ const char *Name;
+ };
+
+ /// Each table consists of sets of variable length entries. Each set describes
+ /// the names of global objects and functions, or global types, respectively,
+ /// whose definitions are represented by debugging information entries owned
+ /// by a single compilation unit.
+ struct Set {
+ /// The total length of the entries for that set, not including the length
+ /// field itself.
+ uint32_t Length;
+
+ /// This number is specific to the name lookup table and is independent of
+ /// the DWARF version number.
+ uint16_t Version;
+
+ /// The offset from the beginning of the .debug_info section of the
+ /// compilation unit header referenced by the set.
+ uint32_t Offset;
+
+ /// The size in bytes of the contents of the .debug_info section generated
+ /// to represent that compilation unit.
+ uint32_t Size;
+
+ std::vector<Entry> Entries;
+ };
+
+private:
+ std::vector<Set> Sets;
+
+ /// gnu styled tables contains additional information.
+ /// This flag determines whether or not section we parse is debug_gnu* table.
+ bool GnuStyle;
+
+public:
+ DWARFDebugPubTable(StringRef Data, bool LittleEndian, bool GnuStyle);
+ void dump(StringRef Name, raw_ostream &OS) const;
+
+ ArrayRef<Set> getData() { return Sets; }
+};
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDie.h b/include/llvm/DebugInfo/DWARF/DWARFDie.h
new file mode 100644
index 000000000000..f33758de6a55
--- /dev/null
+++ b/include/llvm/DebugInfo/DWARF/DWARFDie.h
@@ -0,0 +1,369 @@
+//===-- DWARFDie.h --------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDIE_H
+#define LLVM_LIB_DEBUGINFO_DWARFDIE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
+
+namespace llvm {
+
+class DWARFUnit;
+class DWARFDebugInfoEntry;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+/// Utility class that carries the DWARF compile/type unit and the debug info
+/// entry in an object.
+///
+/// When accessing information from a debug info entry we always need to DWARF
+/// compile/type unit in order to extract the info correctly as some information
+/// is relative to the compile/type unit. Prior to this class the DWARFUnit and
+/// the DWARFDebugInfoEntry was passed around separately and there was the
+/// possibility for error if the wrong DWARFUnit was used to extract a unit
+/// relative offset. This class helps to ensure that this doesn't happen and
+/// also simplifies the attribute extraction calls by not having to specify the
+/// DWARFUnit for each call.
+class DWARFDie {
+ DWARFUnit *U;
+ const DWARFDebugInfoEntry *Die;
+public:
+ DWARFDie() : U(nullptr), Die(nullptr) {}
+ DWARFDie(DWARFUnit *Unit, const DWARFDebugInfoEntry * D) : U(Unit), Die(D) {}
+
+ bool isValid() const { return U && Die; }
+ explicit operator bool() const { return isValid(); }
+ bool operator ==(const DWARFDie &RHS) const {
+ return Die == RHS.Die && U == RHS.U;
+ }
+ const DWARFDebugInfoEntry *getDebugInfoEntry() const { return Die; }
+ DWARFUnit *getDwarfUnit() const { return U; }
+
+
+ /// Get the abbreviation declaration for this DIE.
+ ///
+ /// \returns the abbreviation declaration or NULL for null tags.
+ const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
+ assert(isValid() && "must check validity prior to calling");
+ return Die->getAbbreviationDeclarationPtr();
+ }
+
+ /// Get the absolute offset into the debug info or types section.
+ ///
+ /// \returns the DIE offset or -1U if invalid.
+ uint32_t getOffset() const {
+ assert(isValid() && "must check validity prior to calling");
+ return Die->getOffset();
+ }
+
+ dwarf::Tag getTag() const {
+ auto AbbrevDecl = getAbbreviationDeclarationPtr();
+ if (AbbrevDecl)
+ return AbbrevDecl->getTag();
+ return dwarf::DW_TAG_null;
+ }
+
+ bool hasChildren() const {
+ assert(isValid() && "must check validity prior to calling");
+ return Die->hasChildren();
+ }
+
+ /// Returns true for a valid DIE that terminates a sibling chain.
+ bool isNULL() const {
+ return getAbbreviationDeclarationPtr() == nullptr;
+ }
+ /// Returns true if DIE represents a subprogram (not inlined).
+ bool isSubprogramDIE() const;
+
+ /// Returns true if DIE represents a subprogram or an inlined subroutine.
+ bool isSubroutineDIE() const;
+
+ /// Get the parent of this DIE object.
+ ///
+ /// \returns a valid DWARFDie instance if this object has a parent or an
+ /// invalid DWARFDie instance if it doesn't.
+ DWARFDie getParent() const;
+
+ /// Get the sibling of this DIE object.
+ ///
+ /// \returns a valid DWARFDie instance if this object has a sibling or an
+ /// invalid DWARFDie instance if it doesn't.
+ DWARFDie getSibling() const;
+
+ /// Get the first child of this DIE object.
+ ///
+ /// \returns a valid DWARFDie instance if this object has children or an
+ /// invalid DWARFDie instance if it doesn't.
+ DWARFDie getFirstChild() const {
+ if (isValid() && Die->hasChildren())
+ return DWARFDie(U, Die + 1);
+ return DWARFDie();
+ }
+
+ /// Dump the DIE and all of its attributes to the supplied stream.
+ ///
+ /// \param OS the stream to use for output.
+ /// \param recurseDepth the depth to recurse to when dumping this DIE and its
+ /// children.
+ /// \param indent the number of characters to indent each line that is output.
+ void dump(raw_ostream &OS, unsigned recurseDepth, unsigned indent = 0) const;
+
+ /// Extract the specified attribute from this DIE.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns an optional DWARFFormValue that will have the form value if the
+ /// attribute was successfully extracted.
+ Optional<DWARFFormValue> getAttributeValue(dwarf::Attribute Attr) const;
+
+ /// Extract the specified attribute from this DIE as a C string.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \param FailValue the value to return if this DIE doesn't have this
+ /// attribute.
+ /// \returns the NULL terminated C string value owned by the DWARF section
+ /// that contains the string or FailValue if the attribute doesn't exist or
+ /// if the attribute's form isn't a form that describes an string.
+ const char *getAttributeValueAsString(dwarf::Attribute Attr,
+ const char *FailValue) const;
+
+ /// Extract the specified attribute from this DIE as an address.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \param FailValue the value to return if this DIE doesn't have this
+ /// attribute.
+ /// \returns the address value of the attribute or FailValue if the
+ /// attribute doesn't exist or if the attribute's form isn't a form that
+ /// describes an address.
+ uint64_t getAttributeValueAsAddress(dwarf::Attribute Attr,
+ uint64_t FailValue) const;
+
+ /// Extract the specified attribute from this DIE as an address.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns an optional value for the attribute.
+ Optional<uint64_t> getAttributeValueAsAddress(dwarf::Attribute Attr) const;
+
+ /// Extract the specified attribute from this DIE as a signed integer.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \param FailValue the value to return if this DIE doesn't have this
+ /// attribute.
+ /// \returns the signed integer constant value of the attribute or FailValue
+ /// if the attribute doesn't exist or if the attribute's form isn't a form
+ /// that describes a signed integer.
+ int64_t getAttributeValueAsSignedConstant(dwarf::Attribute Attr,
+ int64_t FailValue) const;
+
+ /// Extract the specified attribute from this DIE as a signed integer.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns an optional value for the attribute.
+ Optional<int64_t>
+ getAttributeValueAsSignedConstant(dwarf::Attribute Attr) const;
+
+ /// Extract the specified attribute from this DIE as an unsigned integer.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \param FailValue the value to return if this DIE doesn't have this
+ /// attribute.
+ /// \returns the unsigned integer constant value of the attribute or FailValue
+ /// if the attribute doesn't exist or if the attribute's form isn't a form
+ /// that describes an unsigned integer.
+ uint64_t getAttributeValueAsUnsignedConstant(dwarf::Attribute Attr,
+ uint64_t FailValue) const;
+
+ /// Extract the specified attribute from this DIE as an unsigned integer.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns an optional value for the attribute.
+ Optional<uint64_t>
+ getAttributeValueAsUnsignedConstant(dwarf::Attribute Attr) const;
+
+ /// Extract the specified attribute from this DIE as absolute DIE Offset.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \param FailValue the value to return if this DIE doesn't have this
+ /// attribute.
+ /// \returns the unsigned integer constant value of the attribute or FailValue
+ /// if the attribute doesn't exist or if the attribute's form isn't a form
+ /// that describes a reference.
+ uint64_t getAttributeValueAsReference(dwarf::Attribute Attr,
+ uint64_t FailValue) const;
+
+ /// Extract the specified attribute from this DIE as absolute DIE Offset.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns an optional value for the attribute.
+ Optional<uint64_t> getAttributeValueAsReference(dwarf::Attribute Attr) const;
+
+ /// Extract the specified attribute from this DIE as absolute section offset.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \param FailValue the value to return if this DIE doesn't have this
+ /// attribute.
+ /// \returns the unsigned integer constant value of the attribute or FailValue
+ /// if the attribute doesn't exist or if the attribute's form isn't a form
+ /// that describes a section offset.
+ uint64_t getAttributeValueAsSectionOffset(dwarf::Attribute Attr,
+ uint64_t FailValue) const;
+ /// Extract the specified attribute from this DIE as absolute section offset.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns an optional value for the attribute.
+ Optional<uint64_t>
+ getAttributeValueAsSectionOffset(dwarf::Attribute Attr) const;
+
+ /// Extract the specified attribute from this DIE as the referenced DIE.
+ ///
+ /// Regardless of the reference type, return the correct DWARFDie instance if
+ /// the attribute exists. The returned DWARFDie object might be from another
+ /// DWARFUnit, but that is all encapsulated in the new DWARFDie object.
+ ///
+ /// Extract an attribute value from this DIE only. This call doesn't look
+ /// for the attribute value in any DW_AT_specification or
+ /// DW_AT_abstract_origin referenced DIEs.
+ ///
+ /// \param Attr the attribute to extract.
+ /// \returns a valid DWARFDie instance if the attribute exists, or an invalid
+ /// DWARFDie object if it doesn't.
+ DWARFDie getAttributeValueAsReferencedDie(dwarf::Attribute Attr) const;
+
+ /// Extract the range base attribute from this DIE as absolute section offset.
+ ///
+ /// This is a utility function that checks for either the DW_AT_rnglists_base
+ /// or DW_AT_GNU_ranges_base attribute.
+ ///
+ /// \returns anm optional absolute section offset value for the attribute.
+ Optional<uint64_t> getRangesBaseAttribute() const;
+
+ /// Get the DW_AT_high_pc attribute value as an address.
+ ///
+ /// In DWARF version 4 and later the high PC can be encoded as an offset from
+ /// the DW_AT_low_pc. This function takes care of extracting the value as an
+ /// address or offset and adds it to the low PC if needed and returns the
+ /// value as an optional in case the DIE doesn't have a DW_AT_high_pc
+ /// attribute.
+ ///
+ /// \param LowPC the low PC that might be needed to calculate the high PC.
+ /// \returns an optional address value for the attribute.
+ Optional<uint64_t> getHighPC(uint64_t LowPC) const;
+
+ /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
+ /// Returns true if both attributes are present.
+ bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC) const;
+
+ /// Get the address ranges for this DIE.
+ ///
+ /// Get the hi/low PC range if both attributes are available or exrtracts the
+ /// non-contiguous address ranges from the DW_AT_ranges attribute.
+ ///
+ /// Extracts the range information from this DIE only. This call doesn't look
+ /// for the range in any DW_AT_specification or DW_AT_abstract_origin DIEs.
+ ///
+ /// \returns a address range vector that might be empty if no address range
+ /// information is available.
+ DWARFAddressRangesVector getAddressRanges() const;
+
+ /// Get all address ranges for any DW_TAG_subprogram DIEs in this DIE or any
+ /// of its children.
+ ///
+ /// Get the hi/low PC range if both attributes are available or exrtracts the
+ /// non-contiguous address ranges from the DW_AT_ranges attribute for this DIE
+ /// and all children.
+ ///
+ /// \param Ranges the addres range vector to fill in.
+ void collectChildrenAddressRanges(DWARFAddressRangesVector &Ranges) const;
+
+ bool addressRangeContainsAddress(const uint64_t Address) const;
+
+ /// If a DIE represents a subprogram (or inlined subroutine), returns its
+ /// mangled name (or short name, if mangled is missing). This name may be
+ /// fetched from specification or abstract origin for this subprogram.
+ /// Returns null if no name is found.
+ const char *getSubroutineName(DINameKind Kind) const;
+
+ /// Return the DIE name resolving DW_AT_sepcification or DW_AT_abstract_origin
+ /// references if necessary. Returns null if no name is found.
+ const char *getName(DINameKind Kind) const;
+
+ /// Retrieves values of DW_AT_call_file, DW_AT_call_line and DW_AT_call_column
+ /// from DIE (or zeroes if they are missing). This function looks for
+ /// DW_AT_call attributes in this DIE only, it will not resolve the attribute
+ /// values in any DW_AT_specification or DW_AT_abstract_origin DIEs.
+ /// \param CallFile filled in with non-zero if successful, zero if there is no
+ /// DW_AT_call_file attribute in this DIE.
+ /// \param CallLine filled in with non-zero if successful, zero if there is no
+ /// DW_AT_call_line attribute in this DIE.
+ /// \param CallColumn filled in with non-zero if successful, zero if there is
+ /// no DW_AT_call_column attribute in this DIE.
+ void getCallerFrame(uint32_t &CallFile, uint32_t &CallLine,
+ uint32_t &CallColumn) const;
+
+ /// Get inlined chain for a given address, rooted at the current DIE.
+ /// Returns empty chain if address is not contained in address range
+ /// of current DIE.
+ void
+ getInlinedChainForAddress(const uint64_t Address,
+ SmallVectorImpl<DWARFDie> &InlinedChain) const;
+
+};
+
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_DEBUGINFO_DWARFDIE_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index b2f750dd7945..920880cea10c 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Dwarf.h"
namespace llvm {
@@ -48,15 +49,17 @@ private:
const uint8_t* data;
};
- uint16_t Form; // Form for this value.
+ dwarf::Form Form; // Form for this value.
ValueType Value; // Contains all data for the form.
+ const DWARFUnit *U; // Remember the DWARFUnit at extract time.
public:
- DWARFFormValue(uint16_t Form = 0) : Form(Form) {}
- uint16_t getForm() const { return Form; }
+ DWARFFormValue(dwarf::Form F = dwarf::Form(0)) : Form(F), U(nullptr) {}
+ dwarf::Form getForm() const { return Form; }
+ void setForm(dwarf::Form F) { Form = F; }
bool isFormClass(FormClass FC) const;
-
- void dump(raw_ostream &OS, const DWARFUnit *U) const;
+ const DWARFUnit *getUnit() const { return U; }
+ void dump(raw_ostream &OS) const;
/// \brief extracts a value in data at offset *offset_ptr.
///
@@ -64,34 +67,98 @@ public:
/// case no relocation processing will be performed and some
/// kind of forms that depend on Unit information are disallowed.
/// \returns whether the extraction succeeded.
- bool extractValue(DataExtractor data, uint32_t *offset_ptr,
- const DWARFUnit *u);
+ bool extractValue(const DataExtractor &Data, uint32_t *OffsetPtr,
+ const DWARFUnit *U);
bool isInlinedCStr() const {
return Value.data != nullptr && Value.data == (const uint8_t*)Value.cstr;
}
/// getAsFoo functions below return the extracted value as Foo if only
/// DWARFFormValue has form class is suitable for representing Foo.
- Optional<uint64_t> getAsReference(const DWARFUnit *U) const;
+ Optional<uint64_t> getAsReference() const;
Optional<uint64_t> getAsUnsignedConstant() const;
Optional<int64_t> getAsSignedConstant() const;
- Optional<const char *> getAsCString(const DWARFUnit *U) const;
- Optional<uint64_t> getAsAddress(const DWARFUnit *U) const;
+ Optional<const char *> getAsCString() const;
+ Optional<uint64_t> getAsAddress() const;
Optional<uint64_t> getAsSectionOffset() const;
Optional<ArrayRef<uint8_t>> getAsBlock() const;
-
+ Optional<uint64_t> getAsCStringOffset() const;
+ Optional<uint64_t> getAsReferenceUVal() const;
+ /// Get the fixed byte size for a given form.
+ ///
+ /// If the form always has a fixed valid byte size that doesn't depend on a
+ /// DWARFUnit, then an Optional with a value will be returned. If the form
+ /// can vary in size depending on the DWARFUnit (DWARF version, address byte
+ /// size, or DWARF 32/64) and the DWARFUnit is valid, then an Optional with a
+ /// valid value is returned. If the form is always encoded using a variable
+ /// length storage format (ULEB or SLEB numbers or blocks) or the size
+ /// depends on a DWARFUnit and the DWARFUnit is NULL, then None will be
+ /// returned.
+ /// \param Form The DWARF form to get the fixed byte size for
+ /// \param U The DWARFUnit that can be used to help determine the byte size.
+ ///
+ /// \returns Optional<uint8_t> value with the fixed byte size or None if
+ /// \p Form doesn't have a fixed byte size or a DWARFUnit wasn't supplied
+ /// and was needed to calculate the byte size.
+ static Optional<uint8_t> getFixedByteSize(dwarf::Form Form,
+ const DWARFUnit *U = nullptr);
+ /// Get the fixed byte size for a given form.
+ ///
+ /// If the form has a fixed byte size given a valid DWARF version and address
+ /// byte size, then an Optional with a valid value is returned. If the form
+ /// is always encoded using a variable length storage format (ULEB or SLEB
+ /// numbers or blocks) then None will be returned.
+ ///
+ /// \param Form DWARF form to get the fixed byte size for
+ /// \param Version DWARF version number.
+ /// \param AddrSize size of an address in bytes.
+ /// \param Format enum value from llvm::dwarf::DwarfFormat.
+ /// \returns Optional<uint8_t> value with the fixed byte size or None if
+ /// \p Form doesn't have a fixed byte size.
+ static Optional<uint8_t> getFixedByteSize(dwarf::Form Form, uint16_t Version,
+ uint8_t AddrSize,
+ llvm::dwarf::DwarfFormat Format);
+
+ /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ ///
+ /// Skips the bytes for this form in the debug info and updates the offset.
+ ///
+ /// \param debug_info_data the .debug_info data to use to skip the value.
+ /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param U the DWARFUnit to use when skipping the form in case the form
+ /// size differs according to data in the DWARFUnit.
+ /// \returns true on success, false if the form was not skipped.
bool skipValue(DataExtractor debug_info_data, uint32_t *offset_ptr,
- const DWARFUnit *u) const;
- static bool skipValue(uint16_t form, DataExtractor debug_info_data,
- uint32_t *offset_ptr, const DWARFUnit *u);
- static bool skipValue(uint16_t form, DataExtractor debug_info_data,
+ const DWARFUnit *U) const;
+ /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ ///
+ /// Skips the bytes for this form in the debug info and updates the offset.
+ ///
+ /// \param form the DW_FORM enumeration that indicates the form to skip.
+ /// \param debug_info_data the .debug_info data to use to skip the value.
+ /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param U the DWARFUnit to use when skipping the form in case the form
+ /// size differs according to data in the DWARFUnit.
+ /// \returns true on success, false if the form was not skipped.
+ static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
+ uint32_t *offset_ptr, const DWARFUnit *U);
+ /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ ///
+ /// Skips the bytes for this form in the debug info and updates the offset.
+ ///
+ /// \param form the DW_FORM enumeration that indicates the form to skip.
+ /// \param debug_info_data the .debug_info data to use to skip the value.
+ /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param Version DWARF version number.
+ /// \param AddrSize size of an address in bytes.
+ /// \param Format enum value from llvm::dwarf::DwarfFormat.
+ /// \returns true on success, false if the form was not skipped.
+ static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
uint32_t *offset_ptr, uint16_t Version,
- uint8_t AddrSize);
+ uint8_t AddrSize, llvm::dwarf::DwarfFormat Format);
- static ArrayRef<uint8_t> getFixedFormSizes(uint8_t AddrSize,
- uint16_t Version);
private:
- void dumpString(raw_ostream &OS, const DWARFUnit *U) const;
+ void dumpString(raw_ostream &OS) const;
};
}
diff --git a/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h b/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
new file mode 100644
index 000000000000..66041be96566
--- /dev/null
+++ b/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h
@@ -0,0 +1,68 @@
+//===-- DWARFGdbIndex.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFGDBINDEX_H
+#define LLVM_LIB_DEBUGINFO_DWARFGDBINDEX_H
+
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+class DWARFGdbIndex {
+ uint32_t Version;
+
+ uint32_t CuListOffset;
+ uint32_t AddressAreaOffset;
+ uint32_t SymbolTableOffset;
+ uint32_t ConstantPoolOffset;
+
+ struct CompUnitEntry {
+ uint64_t Offset; // Offset of a CU in the .debug_info section.
+ uint64_t Length; // Length of that CU.
+ };
+ SmallVector<CompUnitEntry, 0> CuList;
+
+ struct AddressEntry {
+ uint64_t LowAddress; // The low address.
+ uint64_t HighAddress; // The high address.
+ uint32_t CuIndex; // The CU index.
+ };
+ SmallVector<AddressEntry, 0> AddressArea;
+
+ struct SymTableEntry {
+ uint32_t NameOffset; // Offset of the symbol's name in the constant pool.
+ uint32_t VecOffset; // Offset of the CU vector in the constant pool.
+ };
+ SmallVector<SymTableEntry, 0> SymbolTable;
+
+ // Each value is CU index + attributes.
+ SmallVector<std::pair<uint32_t, SmallVector<uint32_t, 0>>, 0>
+ ConstantPoolVectors;
+
+ StringRef ConstantPoolStrings;
+ uint32_t StringPoolOffset;
+
+ void dumpCUList(raw_ostream &OS) const;
+ void dumpAddressArea(raw_ostream &OS) const;
+ void dumpSymbolTable(raw_ostream &OS) const;
+ void dumpConstantPool(raw_ostream &OS) const;
+
+ bool parseImpl(DataExtractor Data);
+
+public:
+ void dump(raw_ostream &OS);
+ void parse(DataExtractor Data);
+
+ bool HasContent = false;
+ bool HasError = false;
+};
+}
+
+#endif // LLVM_LIB_DEBUGINFO_DWARFGDBINDEX_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h b/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
index a697edd32072..4f1e1292a1f1 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
@@ -29,7 +29,7 @@ public:
uint32_t getHeaderSize() const override {
return DWARFUnit::getHeaderSize() + 12;
}
- void dump(raw_ostream &OS);
+ void dump(raw_ostream &OS, bool Brief = false);
static const DWARFSectionKind Section = DW_SECT_TYPES;
protected:
diff --git a/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 9c3fe3be6aa6..db7b59be90c2 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -10,10 +10,13 @@
#ifndef LLVM_LIB_DEBUGINFO_DWARFUNIT_H
#define LLVM_LIB_DEBUGINFO_DWARFUNIT_H
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
@@ -124,7 +127,9 @@ class DWARFUnit {
uint8_t AddrSize;
uint64_t BaseAddr;
// The compile unit debug information entry items.
- std::vector<DWARFDebugInfoEntryMinimal> DieArray;
+ std::vector<DWARFDebugInfoEntry> DieArray;
+ typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>
+ die_iterator_range;
class DWOHolder {
object::OwningBinary<object::ObjectFile> DWOFile;
@@ -138,6 +143,12 @@ class DWARFUnit {
const DWARFUnitIndex::Entry *IndexEntry;
+ uint32_t getDIEIndex(const DWARFDebugInfoEntry *Die) {
+ auto First = DieArray.data();
+ assert(Die >= First && Die < First + DieArray.size());
+ return Die - First;
+ }
+
protected:
virtual bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr);
/// Size in bytes of the unit header.
@@ -191,30 +202,46 @@ public:
uint32_t getNextUnitOffset() const { return Offset + Length + 4; }
uint32_t getLength() const { return Length; }
uint16_t getVersion() const { return Version; }
+ dwarf::DwarfFormat getFormat() const {
+ return dwarf::DwarfFormat::DWARF32; // FIXME: Support DWARF64.
+ }
const DWARFAbbreviationDeclarationSet *getAbbreviations() const {
return Abbrevs;
}
uint8_t getAddressByteSize() const { return AddrSize; }
+ uint8_t getRefAddrByteSize() const {
+ if (Version == 2)
+ return AddrSize;
+ return getDwarfOffsetByteSize();
+ }
+ uint8_t getDwarfOffsetByteSize() const {
+ if (getFormat() == dwarf::DwarfFormat::DWARF64)
+ return 8;
+ return 4;
+ }
uint64_t getBaseAddress() const { return BaseAddr; }
void setBaseAddress(uint64_t base_addr) {
BaseAddr = base_addr;
}
- const DWARFDebugInfoEntryMinimal *getUnitDIE(bool ExtractUnitDIEOnly = true) {
+ DWARFDie getUnitDIE(bool ExtractUnitDIEOnly = true) {
extractDIEsIfNeeded(ExtractUnitDIEOnly);
- return DieArray.empty() ? nullptr : &DieArray[0];
+ if (DieArray.empty())
+ return DWARFDie();
+ return DWARFDie(this, &DieArray[0]);
}
const char *getCompilationDir();
- uint64_t getDWOId();
+ Optional<uint64_t> getDWOId();
void collectAddressRanges(DWARFAddressRangesVector &CURanges);
/// getInlinedChainForAddress - fetches inlined chain for a given address.
/// Returns empty chain if there is no subprogram containing address. The
/// chain is valid as long as parsed compile unit DIEs are not cleared.
- DWARFDebugInfoEntryInlinedChain getInlinedChainForAddress(uint64_t Address);
+ void getInlinedChainForAddress(uint64_t Address,
+ SmallVectorImpl<DWARFDie> &InlinedChain);
/// getUnitSection - Return the DWARFUnitSection containing this unit.
const DWARFUnitSectionBase &getUnitSection() const { return UnitSection; }
@@ -232,30 +259,34 @@ public:
/// created by this unit. In other word, it's illegal to call this
/// method on a DIE that isn't accessible by following
/// children/sibling links starting from this unit's getUnitDIE().
- uint32_t getDIEIndex(const DWARFDebugInfoEntryMinimal *DIE) {
- assert(!DieArray.empty() && DIE >= &DieArray[0] &&
- DIE < &DieArray[0] + DieArray.size());
- return DIE - &DieArray[0];
+ uint32_t getDIEIndex(const DWARFDie &D) {
+ return getDIEIndex(D.getDebugInfoEntry());
}
/// \brief Return the DIE object at the given index.
- const DWARFDebugInfoEntryMinimal *getDIEAtIndex(unsigned Index) const {
+ DWARFDie getDIEAtIndex(unsigned Index) {
assert(Index < DieArray.size());
- return &DieArray[Index];
+ return DWARFDie(this, &DieArray[Index]);
}
+ DWARFDie getParent(const DWARFDebugInfoEntry *Die);
+ DWARFDie getSibling(const DWARFDebugInfoEntry *Die);
+
/// \brief Return the DIE object for a given offset inside the
/// unit's DIE vector.
///
/// The unit needs to have its DIEs extracted for this method to work.
- const DWARFDebugInfoEntryMinimal *getDIEForOffset(uint32_t Offset) const {
+ DWARFDie getDIEForOffset(uint32_t Offset) {
+ extractDIEsIfNeeded(false);
assert(!DieArray.empty());
auto it = std::lower_bound(
DieArray.begin(), DieArray.end(), Offset,
- [](const DWARFDebugInfoEntryMinimal &LHS, uint32_t Offset) {
+ [](const DWARFDebugInfoEntry &LHS, uint32_t Offset) {
return LHS.getOffset() < Offset;
});
- return it == DieArray.end() ? nullptr : &*it;
+ if (it == DieArray.end())
+ return DWARFDie();
+ return DWARFDie(this, &*it);
}
uint32_t getLineTableOffset() const {
@@ -265,6 +296,11 @@ public:
return 0;
}
+ die_iterator_range dies() {
+ extractDIEsIfNeeded(false);
+ return die_iterator_range(DieArray.begin(), DieArray.end());
+ }
+
private:
/// Size in bytes of the .debug_info data associated with this compile unit.
size_t getDebugInfoSize() const { return Length + 4 - getHeaderSize(); }
@@ -274,11 +310,7 @@ private:
size_t extractDIEsIfNeeded(bool CUDieOnly);
/// extractDIEsToVector - Appends all parsed DIEs to a vector.
void extractDIEsToVector(bool AppendCUDie, bool AppendNonCUDIEs,
- std::vector<DWARFDebugInfoEntryMinimal> &DIEs) const;
- /// setDIERelations - We read in all of the DIE entries into our flat list
- /// of DIE entries and now we need to go back through all of them and set the
- /// parent, sibling and child pointers for quick DIE navigation.
- void setDIERelations();
+ std::vector<DWARFDebugInfoEntry> &DIEs) const;
/// clearDIEs - Clear parsed DIEs to keep memory usage low.
void clearDIEs(bool KeepCUDie);
@@ -289,7 +321,7 @@ private:
/// getSubprogramForAddress - Returns subprogram DIE with address range
/// encompassing the provided address. The pointer is alive as long as parsed
/// compile unit DIEs are not cleared.
- const DWARFDebugInfoEntryMinimal *getSubprogramForAddress(uint64_t Address);
+ DWARFDie getSubprogramForAddress(uint64_t Address);
};
}
diff --git a/include/llvm/DebugInfo/MSF/ByteStream.h b/include/llvm/DebugInfo/MSF/ByteStream.h
new file mode 100644
index 000000000000..547844be5e5d
--- /dev/null
+++ b/include/llvm/DebugInfo/MSF/ByteStream.h
@@ -0,0 +1,169 @@
+//===- ByteStream.h - Reads stream data from a byte sequence ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_BYTESTREAM_H
+#define LLVM_DEBUGINFO_MSF_BYTESTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/MSF/MSFError.h"
+#include "llvm/DebugInfo/MSF/StreamInterface.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileOutputBuffer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+
+namespace llvm {
+namespace msf {
+
+class ByteStream : public ReadableStream {
+public:
+ ByteStream() = default;
+ explicit ByteStream(ArrayRef<uint8_t> Data) : Data(Data) {}
+ explicit ByteStream(StringRef Data)
+ : Data(Data.bytes_begin(), Data.bytes_end()) {}
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const override {
+ if (Offset > Data.size())
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ if (Data.size() < Size + Offset)
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ Buffer = Data.slice(Offset, Size);
+ return Error::success();
+ }
+
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const override {
+ if (Offset >= Data.size())
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ Buffer = Data.slice(Offset);
+ return Error::success();
+ }
+
+ uint32_t getLength() const override { return Data.size(); }
+
+ ArrayRef<uint8_t> data() const { return Data; }
+
+ StringRef str() const {
+ const char *CharData = reinterpret_cast<const char *>(Data.data());
+ return StringRef(CharData, Data.size());
+ }
+
+protected:
+ ArrayRef<uint8_t> Data;
+};
+
+// MemoryBufferByteStream behaves like a read-only ByteStream, but has its data
+// backed by an llvm::MemoryBuffer. It also owns the underlying MemoryBuffer.
+class MemoryBufferByteStream : public ByteStream {
+public:
+ explicit MemoryBufferByteStream(std::unique_ptr<MemoryBuffer> Buffer)
+ : ByteStream(ArrayRef<uint8_t>(Buffer->getBuffer().bytes_begin(),
+ Buffer->getBuffer().bytes_end())),
+ MemBuffer(std::move(Buffer)) {}
+
+ std::unique_ptr<MemoryBuffer> MemBuffer;
+};
+
+class MutableByteStream : public WritableStream {
+public:
+ MutableByteStream() = default;
+ explicit MutableByteStream(MutableArrayRef<uint8_t> Data)
+ : Data(Data), ImmutableStream(Data) {}
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const override {
+ return ImmutableStream.readBytes(Offset, Size, Buffer);
+ }
+
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const override {
+ return ImmutableStream.readLongestContiguousChunk(Offset, Buffer);
+ }
+
+ uint32_t getLength() const override { return ImmutableStream.getLength(); }
+
+ Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) const override {
+ if (Buffer.empty())
+ return Error::success();
+
+ if (Data.size() < Buffer.size())
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ if (Offset > Buffer.size() - Data.size())
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+
+ uint8_t *DataPtr = const_cast<uint8_t *>(Data.data());
+ ::memcpy(DataPtr + Offset, Buffer.data(), Buffer.size());
+ return Error::success();
+ }
+
+ Error commit() const override { return Error::success(); }
+
+ MutableArrayRef<uint8_t> data() const { return Data; }
+
+private:
+ MutableArrayRef<uint8_t> Data;
+ ByteStream ImmutableStream;
+};
+
+// A simple adapter that acts like a ByteStream but holds ownership over
+// and underlying FileOutputBuffer.
+class FileBufferByteStream : public WritableStream {
+private:
+ class StreamImpl : public MutableByteStream {
+ public:
+ StreamImpl(std::unique_ptr<FileOutputBuffer> Buffer)
+ : MutableByteStream(MutableArrayRef<uint8_t>(Buffer->getBufferStart(),
+ Buffer->getBufferEnd())),
+ FileBuffer(std::move(Buffer)) {}
+
+ Error commit() const override {
+ if (FileBuffer->commit())
+ return llvm::make_error<MSFError>(msf_error_code::not_writable);
+ return Error::success();
+ }
+
+ private:
+ std::unique_ptr<FileOutputBuffer> FileBuffer;
+ };
+
+public:
+ explicit FileBufferByteStream(std::unique_ptr<FileOutputBuffer> Buffer)
+ : Impl(std::move(Buffer)) {}
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const override {
+ return Impl.readBytes(Offset, Size, Buffer);
+ }
+
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const override {
+ return Impl.readLongestContiguousChunk(Offset, Buffer);
+ }
+
+ uint32_t getLength() const override { return Impl.getLength(); }
+
+ Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const override {
+ return Impl.writeBytes(Offset, Data);
+ }
+
+ Error commit() const override { return Impl.commit(); }
+
+private:
+ StreamImpl Impl;
+};
+
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_BYTESTREAM_H
diff --git a/include/llvm/DebugInfo/PDB/Raw/IPDBFile.h b/include/llvm/DebugInfo/MSF/IMSFFile.h
index fccea2ac2470..f98e715e6b15 100644
--- a/include/llvm/DebugInfo/PDB/Raw/IPDBFile.h
+++ b/include/llvm/DebugInfo/MSF/IMSFFile.h
@@ -1,4 +1,4 @@
-//===- IPDBFile.h - Abstract base class for a PDB file ----------*- C++ -*-===//
+//===- IMSFFile.h - Abstract base class for an MSF file ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,23 +7,20 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_PDB_RAW_IPDBFILE_H
-#define LLVM_DEBUGINFO_PDB_RAW_IPDBFILE_H
+#ifndef LLVM_DEBUGINFO_MSF_IMSFFILE_H
+#define LLVM_DEBUGINFO_MSF_IMSFFILE_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
-
-#include <stdint.h>
+#include <cstdint>
namespace llvm {
-namespace pdb {
+namespace msf {
-class IPDBFile {
+class IMSFFile {
public:
- virtual ~IPDBFile() {}
+ virtual ~IMSFFile() = default;
virtual uint32_t getBlockSize() const = 0;
virtual uint32_t getBlockCount() const = 0;
@@ -38,7 +35,8 @@ public:
virtual Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
ArrayRef<uint8_t> Data) const = 0;
};
-}
-}
-#endif // LLVM_DEBUGINFO_PDB_RAW_IPDBFILE_H
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_IMSFFILE_H
diff --git a/include/llvm/DebugInfo/PDB/Raw/MsfBuilder.h b/include/llvm/DebugInfo/MSF/MSFBuilder.h
index 92d9bc042cce..6d067cc1c238 100644
--- a/include/llvm/DebugInfo/PDB/Raw/MsfBuilder.h
+++ b/include/llvm/DebugInfo/MSF/MSFBuilder.h
@@ -7,14 +7,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_PDB_RAW_MSFBUILDER_H
-#define LLVM_DEBUGINFO_PDB_RAW_MSFBUILDER_H
+#ifndef LLVM_DEBUGINFO_MSF_MSFBUILDER_H
+#define LLVM_DEBUGINFO_MSF_MSFBUILDER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
-#include "llvm/DebugInfo/PDB/Raw/MsfCommon.h"
-#include "llvm/DebugInfo/PDB/Raw/PDBFile.h"
+#include "llvm/DebugInfo/MSF/MSFCommon.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
@@ -24,19 +23,19 @@
#include <vector>
namespace llvm {
-namespace pdb {
-class MsfBuilder {
+namespace msf {
+class MSFBuilder {
public:
- /// \brief Create a new `MsfBuilder`.
+ /// \brief Create a new `MSFBuilder`.
///
/// \param BlockSize The internal block size used by the PDB file. See
/// isValidBlockSize() for a list of valid block sizes.
///
/// \param MinBlockCount Causes the builder to reserve up front space for
- /// at least `MinBlockCount` blocks. This is useful when using `MsfBuilder`
- /// to read an existing PDB that you want to write back out later. The
- /// original PDB file's SuperBlock contains the exact number of blocks used
- /// by the file, so is a good hint as to how many blocks the new PDB file
+ /// at least `MinBlockCount` blocks. This is useful when using `MSFBuilder`
+ /// to read an existing MSF that you want to write back out later. The
+ /// original MSF file's SuperBlock contains the exact number of blocks used
+ /// by the file, so is a good hint as to how many blocks the new MSF file
/// will contain. Furthermore, it is actually necessary in this case. To
/// preserve stability of the file's layout, it is helpful to try to keep
/// all streams mapped to their original block numbers. To ensure that this
@@ -45,7 +44,7 @@ public:
///
/// \param CanGrow If true, any operation which results in an attempt to
/// locate a free block when all available blocks have been exhausted will
- /// allocate a new block, thereby growing the size of the final PDB file.
+ /// allocate a new block, thereby growing the size of the final MSF file.
/// When false, any such attempt will result in an error. This is especially
/// useful in testing scenarios when you know your test isn't going to do
/// anything to increase the size of the file, so having an Error returned if
@@ -55,34 +54,34 @@ public:
/// failed. Currently the only way this can fail is if an invalid block size
/// is specified, or `MinBlockCount` does not leave enough room for the
/// mandatory reserved blocks required by an MSF file.
- static Expected<MsfBuilder> create(BumpPtrAllocator &Allocator,
+ static Expected<MSFBuilder> create(BumpPtrAllocator &Allocator,
uint32_t BlockSize,
uint32_t MinBlockCount = 0,
bool CanGrow = true);
/// Request the block map to be at a specific block address. This is useful
- /// when editing a PDB and you want the layout to be as stable as possible.
+ /// when editing a MSF and you want the layout to be as stable as possible.
Error setBlockMapAddr(uint32_t Addr);
Error setDirectoryBlocksHint(ArrayRef<uint32_t> DirBlocks);
void setFreePageMap(uint32_t Fpm);
void setUnknown1(uint32_t Unk1);
/// Add a stream to the MSF file with the given size, occupying the given
- /// list of blocks. This is useful when reading a PDB file and you want a
+ /// list of blocks. This is useful when reading a MSF file and you want a
/// particular stream to occupy the original set of blocks. If the given
/// blocks are already allocated, or if the number of blocks specified is
/// incorrect for the given stream size, this function will return an Error.
- Error addStream(uint32_t Size, ArrayRef<uint32_t> Blocks);
+ Expected<uint32_t> addStream(uint32_t Size, ArrayRef<uint32_t> Blocks);
/// Add a stream to the MSF file with the given size, occupying any available
/// blocks that the builder decides to use. This is useful when building a
/// new PDB file from scratch and you don't care what blocks a stream occupies
/// but you just want it to work.
- Error addStream(uint32_t Size);
+ Expected<uint32_t> addStream(uint32_t Size);
/// Update the size of an existing stream. This will allocate or deallocate
/// blocks as needed to match the requested size. This can fail if `CanGrow`
- /// was set to false when initializing the `MsfBuilder`.
+ /// was set to false when initializing the `MSFBuilder`.
Error setStreamSize(uint32_t Idx, uint32_t Size);
/// Get the total number of streams in the MSF layout. This should return 1
@@ -112,10 +111,12 @@ public:
/// Finalize the layout and build the headers and structures that describe the
/// MSF layout and can be written directly to the MSF file.
- Expected<msf::Layout> build();
+ Expected<MSFLayout> build();
+
+ BumpPtrAllocator &getAllocator() { return Allocator; }
private:
- MsfBuilder(uint32_t BlockSize, uint32_t MinBlockCount, bool CanGrow,
+ MSFBuilder(uint32_t BlockSize, uint32_t MinBlockCount, bool CanGrow,
BumpPtrAllocator &Allocator);
Error allocateBlocks(uint32_t NumBlocks, MutableArrayRef<uint32_t> Blocks);
@@ -127,7 +128,7 @@ private:
bool IsGrowable;
uint32_t FreePageMap;
- uint32_t Unknown1;
+ uint32_t Unknown1 = 0;
uint32_t BlockSize;
uint32_t MininumBlocks;
uint32_t BlockMapAddr;
@@ -135,7 +136,7 @@ private:
std::vector<uint32_t> DirectoryBlocks;
std::vector<std::pair<uint32_t, BlockList>> StreamData;
};
-}
-}
+} // namespace msf
+} // namespace llvm
-#endif
+#endif // LLVM_DEBUGINFO_MSF_MSFBUILDER_H
diff --git a/include/llvm/DebugInfo/PDB/Raw/MsfCommon.h b/include/llvm/DebugInfo/MSF/MSFCommon.h
index 2f6a6986eba9..93a9c808b736 100644
--- a/include/llvm/DebugInfo/PDB/Raw/MsfCommon.h
+++ b/include/llvm/DebugInfo/MSF/MSFCommon.h
@@ -1,4 +1,4 @@
-//===- MsfCommon.h - Common types and functions for MSF files ---*- C++ -*-===//
+//===- MSFCommon.h - Common types and functions for MSF files ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,10 +7,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_PDB_RAW_MSFCOMMON_H
-#define LLVM_DEBUGINFO_PDB_RAW_MSFCOMMON_H
+#ifndef LLVM_DEBUGINFO_MSF_MSFCOMMON_H
+#define LLVM_DEBUGINFO_MSF_MSFCOMMON_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
@@ -19,7 +20,6 @@
#include <vector>
namespace llvm {
-namespace pdb {
namespace msf {
static const char Magic[] = {'M', 'i', 'c', 'r', 'o', 's', 'o', 'f',
't', ' ', 'C', '/', 'C', '+', '+', ' ',
@@ -38,7 +38,7 @@ struct SuperBlock {
// The index of the free block map.
support::ulittle32_t FreeBlockMapBlock;
// This contains the number of blocks resident in the file system. In
- // practice, NumBlocks * BlockSize is equivalent to the size of the PDB
+ // practice, NumBlocks * BlockSize is equivalent to the size of the MSF
// file.
support::ulittle32_t NumBlocks;
// This contains the number of bytes which make up the directory.
@@ -49,8 +49,10 @@ struct SuperBlock {
support::ulittle32_t BlockMapAddr;
};
-struct Layout {
- SuperBlock *SB;
+struct MSFLayout {
+ MSFLayout() : SB(nullptr) {}
+ const SuperBlock *SB;
+ BitVector FreePageMap;
ArrayRef<support::ulittle32_t> DirectoryBlocks;
ArrayRef<support::ulittle32_t> StreamSizes;
std::vector<ArrayRef<support::ulittle32_t>> StreamMap;
@@ -82,9 +84,21 @@ inline uint64_t blockToOffset(uint64_t BlockNumber, uint64_t BlockSize) {
return BlockNumber * BlockSize;
}
-Error validateSuperBlock(const SuperBlock &SB);
+inline uint32_t getFpmIntervalLength(const MSFLayout &L) {
+ return L.SB->BlockSize;
}
+
+inline uint32_t getNumFpmIntervals(const MSFLayout &L) {
+ uint32_t Length = getFpmIntervalLength(L);
+ return llvm::alignTo(L.SB->NumBlocks, Length) / Length;
}
+
+inline uint32_t getFullFpmByteSize(const MSFLayout &L) {
+ return llvm::alignTo(L.SB->NumBlocks, 8) / 8;
}
-#endif
+Error validateSuperBlock(const SuperBlock &SB);
+} // namespace msf
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MSFCOMMON_H
diff --git a/include/llvm/DebugInfo/MSF/MSFError.h b/include/llvm/DebugInfo/MSF/MSFError.h
new file mode 100644
index 000000000000..e66aeca3cd45
--- /dev/null
+++ b/include/llvm/DebugInfo/MSF/MSFError.h
@@ -0,0 +1,47 @@
+//===- MSFError.h - Error extensions for MSF Files --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MSFERROR_H
+#define LLVM_DEBUGINFO_MSF_MSFERROR_H
+
+#include "llvm/Support/Error.h"
+
+#include <string>
+
+namespace llvm {
+namespace msf {
+enum class msf_error_code {
+ unspecified = 1,
+ insufficient_buffer,
+ not_writable,
+ no_stream,
+ invalid_format,
+ block_in_use
+};
+
+/// Base class for errors originating when parsing raw PDB files
+class MSFError : public ErrorInfo<MSFError> {
+public:
+ static char ID;
+ MSFError(msf_error_code C);
+ MSFError(const std::string &Context);
+ MSFError(msf_error_code C, const std::string &Context);
+
+ void log(raw_ostream &OS) const override;
+ const std::string &getErrorMessage() const;
+ std::error_code convertToErrorCode() const override;
+
+private:
+ std::string ErrMsg;
+ msf_error_code Code;
+};
+} // namespace msf
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MSFERROR_H
diff --git a/include/llvm/DebugInfo/MSF/MSFStreamLayout.h b/include/llvm/DebugInfo/MSF/MSFStreamLayout.h
new file mode 100644
index 000000000000..bdde98f52662
--- /dev/null
+++ b/include/llvm/DebugInfo/MSF/MSFStreamLayout.h
@@ -0,0 +1,35 @@
+//===- MSFStreamLayout.h - Describes the layout of a stream -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MSFSTREAMLAYOUT_H
+#define LLVM_DEBUGINFO_MSF_MSFSTREAMLAYOUT_H
+
+#include "llvm/Support/Endian.h"
+
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace msf {
+
+/// \brief Describes the layout of a stream in an MSF layout. A "stream" here
+/// is defined as any logical unit of data which may be arranged inside the MSF
+/// file as a sequence of (possibly discontiguous) blocks. When we want to read
+/// from a particular MSF Stream, we fill out a stream layout structure and the
+/// reader uses it to determine which blocks in the underlying MSF file contain
+/// the data, so that it can be pieced together in the right order.
+class MSFStreamLayout {
+public:
+ uint32_t Length;
+ std::vector<support::ulittle32_t> Blocks;
+};
+} // namespace msf
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MSFSTREAMLAYOUT_H
diff --git a/include/llvm/DebugInfo/MSF/MappedBlockStream.h b/include/llvm/DebugInfo/MSF/MappedBlockStream.h
new file mode 100644
index 000000000000..fff4e9cecef5
--- /dev/null
+++ b/include/llvm/DebugInfo/MSF/MappedBlockStream.h
@@ -0,0 +1,144 @@
+//===- MappedBlockStream.h - Discontiguous stream data in an MSF -*- C++
+//-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
+#define LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/DebugInfo/MSF/MSFStreamLayout.h"
+#include "llvm/DebugInfo/MSF/StreamInterface.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace msf {
+
+struct MSFLayout;
+
+/// MappedBlockStream represents data stored in an MSF file into chunks of a
+/// particular size (called the Block Size), and whose chunks may not be
+/// necessarily contiguous. The arrangement of these chunks MSF the file
+/// is described by some other metadata contained within the MSF file. In
+/// the case of a standard MSF Stream, the layout of the stream's blocks
+/// is described by the MSF "directory", but in the case of the directory
+/// itself, the layout is described by an array at a fixed location within
+/// the MSF. MappedBlockStream provides methods for reading from and writing
+/// to one of these streams transparently, as if it were a contiguous sequence
+/// of bytes.
+class MappedBlockStream : public ReadableStream {
+ friend class WritableMappedBlockStream;
+public:
+ static std::unique_ptr<MappedBlockStream>
+ createStream(uint32_t BlockSize, uint32_t NumBlocks,
+ const MSFStreamLayout &Layout, const ReadableStream &MsfData);
+
+ static std::unique_ptr<MappedBlockStream>
+ createIndexedStream(const MSFLayout &Layout, const ReadableStream &MsfData,
+ uint32_t StreamIndex);
+
+ static std::unique_ptr<MappedBlockStream>
+ createFpmStream(const MSFLayout &Layout, const ReadableStream &MsfData);
+
+ static std::unique_ptr<MappedBlockStream>
+ createDirectoryStream(const MSFLayout &Layout, const ReadableStream &MsfData);
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const override;
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const override;
+
+ uint32_t getLength() const override;
+
+ uint32_t getNumBytesCopied() const;
+
+ llvm::BumpPtrAllocator &getAllocator() { return Pool; }
+
+ void invalidateCache();
+
+ uint32_t getBlockSize() const { return BlockSize; }
+ uint32_t getNumBlocks() const { return NumBlocks; }
+ uint32_t getStreamLength() const { return StreamLayout.Length; }
+
+protected:
+ MappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
+ const MSFStreamLayout &StreamLayout,
+ const ReadableStream &MsfData);
+
+private:
+ const MSFStreamLayout &getStreamLayout() const { return StreamLayout; }
+ void fixCacheAfterWrite(uint32_t Offset, ArrayRef<uint8_t> Data) const;
+
+ Error readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer) const;
+ bool tryReadContiguously(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const;
+
+ const uint32_t BlockSize;
+ const uint32_t NumBlocks;
+ const MSFStreamLayout StreamLayout;
+ const ReadableStream &MsfData;
+
+ typedef MutableArrayRef<uint8_t> CacheEntry;
+ mutable llvm::BumpPtrAllocator Pool;
+ mutable DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
+};
+
+class WritableMappedBlockStream : public WritableStream {
+public:
+ static std::unique_ptr<WritableMappedBlockStream>
+ createStream(uint32_t BlockSize, uint32_t NumBlocks,
+ const MSFStreamLayout &Layout, const WritableStream &MsfData);
+
+ static std::unique_ptr<WritableMappedBlockStream>
+ createIndexedStream(const MSFLayout &Layout, const WritableStream &MsfData,
+ uint32_t StreamIndex);
+
+ static std::unique_ptr<WritableMappedBlockStream>
+ createDirectoryStream(const MSFLayout &Layout, const WritableStream &MsfData);
+
+ static std::unique_ptr<WritableMappedBlockStream>
+ createFpmStream(const MSFLayout &Layout, const WritableStream &MsfData);
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const override;
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const override;
+ uint32_t getLength() const override;
+
+ Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) const override;
+
+ Error commit() const override;
+
+ const MSFStreamLayout &getStreamLayout() const {
+ return ReadInterface.getStreamLayout();
+ }
+ uint32_t getBlockSize() const { return ReadInterface.getBlockSize(); }
+ uint32_t getNumBlocks() const { return ReadInterface.getNumBlocks(); }
+ uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }
+
+protected:
+ WritableMappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
+ const MSFStreamLayout &StreamLayout,
+ const WritableStream &MsfData);
+
+private:
+ MappedBlockStream ReadInterface;
+
+ const WritableStream &WriteInterface;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
diff --git a/include/llvm/DebugInfo/MSF/SequencedItemStream.h b/include/llvm/DebugInfo/MSF/SequencedItemStream.h
new file mode 100644
index 000000000000..1949beef9fff
--- /dev/null
+++ b/include/llvm/DebugInfo/MSF/SequencedItemStream.h
@@ -0,0 +1,93 @@
+//===- SequencedItemStream.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_SEQUENCEDITEMSTREAM_H
+#define LLVM_DEBUGINFO_MSF_SEQUENCEDITEMSTREAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/MSF/MSFError.h"
+#include "llvm/DebugInfo/MSF/StreamInterface.h"
+#include "llvm/Support/Error.h"
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+namespace msf {
+
+template <typename T> struct SequencedItemTraits {
+ static size_t length(const T &Item) = delete;
+ static ArrayRef<uint8_t> bytes(const T &Item) = delete;
+};
+
+/// SequencedItemStream represents a sequence of objects stored in a
+/// standard container but for which it is useful to view as a stream of
+/// contiguous bytes. An example of this might be if you have a std::vector
+/// of TPI records, where each record contains a byte sequence that
+/// represents that one record serialized, but where each consecutive item
+/// might not be allocated immediately after the previous item. Using a
+/// SequencedItemStream, we can adapt the VarStreamArray class to trivially
+/// extract one item at a time, allowing the data to be used anywhere a
+/// VarStreamArray could be used.
+template <typename T, typename Traits = SequencedItemTraits<T>>
+class SequencedItemStream : public ReadableStream {
+public:
+ SequencedItemStream() = default;
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const override {
+ auto ExpectedIndex = translateOffsetIndex(Offset);
+ if (!ExpectedIndex)
+ return ExpectedIndex.takeError();
+ const auto &Item = Items[*ExpectedIndex];
+ if (Size > Traits::length(Item))
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ Buffer = Traits::bytes(Item).take_front(Size);
+ return Error::success();
+ }
+
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const override {
+ auto ExpectedIndex = translateOffsetIndex(Offset);
+ if (!ExpectedIndex)
+ return ExpectedIndex.takeError();
+ Buffer = Traits::bytes(Items[*ExpectedIndex]);
+ return Error::success();
+ }
+
+ void setItems(ArrayRef<T> ItemArray) { Items = ItemArray; }
+
+ uint32_t getLength() const override {
+ uint32_t Size = 0;
+ for (const auto &Item : Items)
+ Size += Traits::length(Item);
+ return Size;
+ }
+
+private:
+ Expected<uint32_t> translateOffsetIndex(uint32_t Offset) const {
+ uint32_t CurrentOffset = 0;
+ uint32_t CurrentIndex = 0;
+ for (const auto &Item : Items) {
+ if (CurrentOffset >= Offset)
+ break;
+ CurrentOffset += Traits::length(Item);
+ ++CurrentIndex;
+ }
+ if (CurrentOffset != Offset)
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ return CurrentIndex;
+ }
+
+ ArrayRef<T> Items;
+};
+
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_SEQUENCEDITEMSTREAM_H
diff --git a/include/llvm/DebugInfo/CodeView/StreamArray.h b/include/llvm/DebugInfo/MSF/StreamArray.h
index 0b9349aac753..d8b74bc75c94 100644
--- a/include/llvm/DebugInfo/CodeView/StreamArray.h
+++ b/include/llvm/DebugInfo/MSF/StreamArray.h
@@ -1,4 +1,4 @@
-//===- StreamArray.h - Array backed by an arbitrary stream ----------------===//
+//===- StreamArray.h - Array backed by an arbitrary stream ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,17 +7,17 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_CODEVIEW_STREAMARRAY_H
-#define LLVM_DEBUGINFO_CODEVIEW_STREAMARRAY_H
+#ifndef LLVM_DEBUGINFO_MSF_STREAMARRAY_H
+#define LLVM_DEBUGINFO_MSF_STREAMARRAY_H
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Error.h"
-
-#include <functional>
-#include <type_traits>
+#include <cassert>
+#include <cstdint>
namespace llvm {
-namespace codeview {
+namespace msf {
/// VarStreamArrayExtractor is intended to be specialized to provide customized
/// extraction logic. On input it receives a StreamRef pointing to the
@@ -34,7 +34,8 @@ namespace codeview {
template <typename T> struct VarStreamArrayExtractor {
// Method intentionally deleted. You must provide an explicit specialization
// with the following method implemented.
- Error operator()(StreamRef Stream, uint32_t &Len, T &Item) const = delete;
+ Error operator()(ReadableStreamRef Stream, uint32_t &Len,
+ T &Item) const = delete;
};
/// VarStreamArray represents an array of variable length records backed by a
@@ -74,17 +75,19 @@ template <typename ValueType, typename Extractor> class VarStreamArrayIterator;
template <typename ValueType,
typename Extractor = VarStreamArrayExtractor<ValueType>>
+
class VarStreamArray {
friend class VarStreamArrayIterator<ValueType, Extractor>;
public:
typedef VarStreamArrayIterator<ValueType, Extractor> Iterator;
- VarStreamArray() {}
+ VarStreamArray() = default;
explicit VarStreamArray(const Extractor &E) : E(E) {}
- explicit VarStreamArray(StreamRef Stream) : Stream(Stream) {}
- VarStreamArray(StreamRef Stream, const Extractor &E) : Stream(Stream), E(E) {}
+ explicit VarStreamArray(ReadableStreamRef Stream) : Stream(Stream) {}
+ VarStreamArray(ReadableStreamRef Stream, const Extractor &E)
+ : Stream(Stream), E(E) {}
VarStreamArray(const VarStreamArray<ValueType, Extractor> &Other)
: Stream(Other.Stream), E(Other.E) {}
@@ -97,10 +100,10 @@ public:
const Extractor &getExtractor() const { return E; }
- StreamRef getUnderlyingStream() const { return Stream; }
+ ReadableStreamRef getUnderlyingStream() const { return Stream; }
private:
- StreamRef Stream;
+ ReadableStreamRef Stream;
Extractor E;
};
@@ -122,9 +125,9 @@ public:
}
}
}
- VarStreamArrayIterator() {}
+ VarStreamArrayIterator() = default;
explicit VarStreamArrayIterator(const Extractor &E) : Extract(E) {}
- ~VarStreamArrayIterator() {}
+ ~VarStreamArrayIterator() = default;
bool operator==(const IterType &R) const {
if (Array && R.Array) {
@@ -189,7 +192,7 @@ private:
}
ValueType ThisValue;
- StreamRef IterRef;
+ ReadableStreamRef IterRef;
const ArrayType *Array{nullptr};
uint32_t ThisLen{0};
bool HasError{false};
@@ -203,8 +206,8 @@ template <typename T> class FixedStreamArray {
friend class FixedStreamArrayIterator<T>;
public:
- FixedStreamArray() : Stream() {}
- FixedStreamArray(StreamRef Stream) : Stream(Stream) {
+ FixedStreamArray() = default;
+ FixedStreamArray(ReadableStreamRef Stream) : Stream(Stream) {
assert(Stream.getLength() % sizeof(T) == 0);
}
@@ -226,14 +229,15 @@ public:
FixedStreamArrayIterator<T> begin() const {
return FixedStreamArrayIterator<T>(*this, 0);
}
+
FixedStreamArrayIterator<T> end() const {
return FixedStreamArrayIterator<T>(*this, size());
}
- StreamRef getUnderlyingStream() const { return Stream; }
+ ReadableStreamRef getUnderlyingStream() const { return Stream; }
private:
- StreamRef Stream;
+ ReadableStreamRef Stream;
};
template <typename T> class FixedStreamArrayIterator {
@@ -269,7 +273,7 @@ private:
uint32_t Index;
};
-} // namespace codeview
+} // namespace msf
} // namespace llvm
-#endif // LLVM_DEBUGINFO_CODEVIEW_STREAMARRAY_H
+#endif // LLVM_DEBUGINFO_MSF_STREAMARRAY_H
diff --git a/include/llvm/DebugInfo/CodeView/StreamInterface.h b/include/llvm/DebugInfo/MSF/StreamInterface.h
index 241aec457870..09782d8e3b30 100644
--- a/include/llvm/DebugInfo/CodeView/StreamInterface.h
+++ b/include/llvm/DebugInfo/MSF/StreamInterface.h
@@ -7,26 +7,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_CODEVIEW_STREAMINTERFACE_H
-#define LLVM_DEBUGINFO_CODEVIEW_STREAMINTERFACE_H
+#ifndef LLVM_DEBUGINFO_MSF_STREAMINTERFACE_H
+#define LLVM_DEBUGINFO_MSF_STREAMINTERFACE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>
namespace llvm {
-namespace codeview {
-
-/// StreamInterface abstracts the notion of a data stream. This way, an
-/// implementation could implement trivial reading from a contiguous memory
-/// buffer or, as in the case of PDB files, reading from a set of possibly
-/// discontiguous blocks. The implementation is required to return references
-/// to stable memory, so if this is not possible (for example in the case of
-/// a PDB file with discontiguous blocks, it must keep its own pool of temp
-/// storage.
-class StreamInterface {
+namespace msf {
+
+class ReadableStream {
public:
- virtual ~StreamInterface() {}
+ virtual ~ReadableStream() = default;
// Given an offset into the stream and a number of bytes, attempt to read
// the bytes and set the output ArrayRef to point to a reference into the
@@ -39,17 +32,22 @@ public:
virtual Error readLongestContiguousChunk(uint32_t Offset,
ArrayRef<uint8_t> &Buffer) const = 0;
+ virtual uint32_t getLength() const = 0;
+};
+
+class WritableStream : public ReadableStream {
+public:
+ ~WritableStream() override = default;
+
// Attempt to write the given bytes into the stream at the desired offset.
// This will always necessitate a copy. Cannot shrink or grow the stream,
// only writes into existing allocated space.
virtual Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const = 0;
- virtual uint32_t getLength() const = 0;
-
virtual Error commit() const = 0;
};
-} // end namespace codeview
+} // end namespace msf
} // end namespace llvm
-#endif // LLVM_DEBUGINFO_CODEVIEW_STREAMINTERFACE_H
+#endif // LLVM_DEBUGINFO_MSF_STREAMINTERFACE_H
diff --git a/include/llvm/DebugInfo/CodeView/StreamReader.h b/include/llvm/DebugInfo/MSF/StreamReader.h
index 2f497c2c43f1..fc2ca78dc18f 100644
--- a/include/llvm/DebugInfo/CodeView/StreamReader.h
+++ b/include/llvm/DebugInfo/MSF/StreamReader.h
@@ -7,35 +7,40 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_CODEVIEW_STREAMREADER_H
-#define LLVM_DEBUGINFO_CODEVIEW_STREAMREADER_H
+#ifndef LLVM_DEBUGINFO_MSF_STREAMREADER_H
+#define LLVM_DEBUGINFO_MSF_STREAMREADER_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/DebugInfo/CodeView/CodeViewError.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
+#include "llvm/DebugInfo/MSF/MSFError.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamInterface.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <string>
namespace llvm {
-namespace codeview {
-
-class StreamRef;
+namespace msf {
class StreamReader {
public:
- StreamReader(StreamRef Stream);
+ StreamReader(ReadableStreamRef Stream);
Error readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer);
Error readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size);
+ Error readInteger(uint8_t &Dest);
Error readInteger(uint16_t &Dest);
Error readInteger(uint32_t &Dest);
+ Error readInteger(uint64_t &Dest);
+ Error readInteger(int8_t &Dest);
+ Error readInteger(int16_t &Dest);
+ Error readInteger(int32_t &Dest);
+ Error readInteger(int64_t &Dest);
Error readZeroString(StringRef &Dest);
Error readFixedString(StringRef &Dest, uint32_t Length);
- Error readStreamRef(StreamRef &Ref);
- Error readStreamRef(StreamRef &Ref, uint32_t Length);
+ Error readStreamRef(ReadableStreamRef &Ref);
+ Error readStreamRef(ReadableStreamRef &Ref, uint32_t Length);
template <typename T> Error readEnum(T &Dest) {
typename std::underlying_type<T>::type N;
@@ -61,8 +66,8 @@ public:
return Error::success();
}
- if (NumElements > UINT32_MAX/sizeof(T))
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
+ if (NumElements > UINT32_MAX / sizeof(T))
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
if (auto EC = readBytes(Bytes, NumElements * sizeof(T)))
return EC;
@@ -72,7 +77,7 @@ public:
template <typename T, typename U>
Error readArray(VarStreamArray<T, U> &Array, uint32_t Size) {
- StreamRef S;
+ ReadableStreamRef S;
if (auto EC = readStreamRef(S, Size))
return EC;
Array = VarStreamArray<T, U>(S, Array.getExtractor());
@@ -87,25 +92,30 @@ public:
}
uint32_t Length = NumItems * sizeof(T);
if (Length / sizeof(T) != NumItems)
- return make_error<CodeViewError>(cv_error_code::corrupt_record);
+ return make_error<MSFError>(msf_error_code::invalid_format);
if (Offset + Length > Stream.getLength())
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
- StreamRef View = Stream.slice(Offset, Length);
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ ReadableStreamRef View = Stream.slice(Offset, Length);
Array = FixedStreamArray<T>(View);
Offset += Length;
return Error::success();
}
+ bool empty() const { return bytesRemaining() == 0; }
void setOffset(uint32_t Off) { Offset = Off; }
uint32_t getOffset() const { return Offset; }
uint32_t getLength() const { return Stream.getLength(); }
uint32_t bytesRemaining() const { return getLength() - getOffset(); }
+ Error skip(uint32_t Amount);
+
+ uint8_t peek() const;
+
private:
- StreamRef Stream;
+ ReadableStreamRef Stream;
uint32_t Offset;
};
-} // namespace codeview
+} // namespace msf
} // namespace llvm
-#endif // LLVM_DEBUGINFO_CODEVIEW_STREAMREADER_H
+#endif // LLVM_DEBUGINFO_MSF_STREAMREADER_H
diff --git a/include/llvm/DebugInfo/MSF/StreamRef.h b/include/llvm/DebugInfo/MSF/StreamRef.h
new file mode 100644
index 000000000000..eee71e53a39b
--- /dev/null
+++ b/include/llvm/DebugInfo/MSF/StreamRef.h
@@ -0,0 +1,135 @@
+//===- StreamRef.h - A copyable reference to a stream -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_MSF_STREAMREF_H
+#define LLVM_DEBUGINFO_MSF_STREAMREF_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/DebugInfo/MSF/MSFError.h"
+#include "llvm/DebugInfo/MSF/StreamInterface.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cstdint>
+
+namespace llvm {
+namespace msf {
+
+template <class StreamType, class RefType> class StreamRefBase {
+public:
+ StreamRefBase() : Stream(nullptr), ViewOffset(0), Length(0) {}
+ StreamRefBase(const StreamType &Stream, uint32_t Offset, uint32_t Length)
+ : Stream(&Stream), ViewOffset(Offset), Length(Length) {}
+
+ uint32_t getLength() const { return Length; }
+ const StreamType *getStream() const { return Stream; }
+
+ RefType drop_front(uint32_t N) const {
+ if (!Stream)
+ return RefType();
+
+ N = std::min(N, Length);
+ return RefType(*Stream, ViewOffset + N, Length - N);
+ }
+
+ RefType keep_front(uint32_t N) const {
+ if (!Stream)
+ return RefType();
+ N = std::min(N, Length);
+ return RefType(*Stream, ViewOffset, N);
+ }
+
+ RefType slice(uint32_t Offset, uint32_t Len) const {
+ return drop_front(Offset).keep_front(Len);
+ }
+
+ bool operator==(const RefType &Other) const {
+ if (Stream != Other.Stream)
+ return false;
+ if (ViewOffset != Other.ViewOffset)
+ return false;
+ if (Length != Other.Length)
+ return false;
+ return true;
+ }
+
+protected:
+ const StreamType *Stream;
+ uint32_t ViewOffset;
+ uint32_t Length;
+};
+
+class ReadableStreamRef
+ : public StreamRefBase<ReadableStream, ReadableStreamRef> {
+public:
+ ReadableStreamRef() = default;
+ ReadableStreamRef(const ReadableStream &Stream)
+ : StreamRefBase(Stream, 0, Stream.getLength()) {}
+ ReadableStreamRef(const ReadableStream &Stream, uint32_t Offset,
+ uint32_t Length)
+ : StreamRefBase(Stream, Offset, Length) {}
+
+ // Use StreamRef.slice() instead.
+ ReadableStreamRef(const ReadableStreamRef &S, uint32_t Offset,
+ uint32_t Length) = delete;
+
+ Error readBytes(uint32_t Offset, uint32_t Size,
+ ArrayRef<uint8_t> &Buffer) const {
+ if (ViewOffset + Offset < Offset)
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ if (Size + Offset > Length)
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ return Stream->readBytes(ViewOffset + Offset, Size, Buffer);
+ }
+
+ // Given an offset into the stream, read as much as possible without copying
+ // any data.
+ Error readLongestContiguousChunk(uint32_t Offset,
+ ArrayRef<uint8_t> &Buffer) const {
+ if (Offset >= Length)
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+
+ if (auto EC = Stream->readLongestContiguousChunk(Offset, Buffer))
+ return EC;
+ // This StreamRef might refer to a smaller window over a larger stream. In
+ // that case we will have read out more bytes than we should return, because
+ // we should not read past the end of the current view.
+ uint32_t MaxLength = Length - Offset;
+ if (Buffer.size() > MaxLength)
+ Buffer = Buffer.slice(0, MaxLength);
+ return Error::success();
+ }
+};
+
+class WritableStreamRef
+ : public StreamRefBase<WritableStream, WritableStreamRef> {
+public:
+ WritableStreamRef() = default;
+ WritableStreamRef(const WritableStream &Stream)
+ : StreamRefBase(Stream, 0, Stream.getLength()) {}
+ WritableStreamRef(const WritableStream &Stream, uint32_t Offset,
+ uint32_t Length)
+ : StreamRefBase(Stream, Offset, Length) {}
+
+ // Use StreamRef.slice() instead.
+ WritableStreamRef(const WritableStreamRef &S, uint32_t Offset,
+ uint32_t Length) = delete;
+
+ Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const {
+ if (Data.size() + Offset > Length)
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
+ return Stream->writeBytes(ViewOffset + Offset, Data);
+ }
+
+ Error commit() const { return Stream->commit(); }
+};
+
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_STREAMREF_H
diff --git a/include/llvm/DebugInfo/CodeView/StreamWriter.h b/include/llvm/DebugInfo/MSF/StreamWriter.h
index 4d393d2ef790..2bb14434dd83 100644
--- a/include/llvm/DebugInfo/CodeView/StreamWriter.h
+++ b/include/llvm/DebugInfo/MSF/StreamWriter.h
@@ -7,34 +7,39 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_CODEVIEW_STREAMWRITER_H
-#define LLVM_DEBUGINFO_CODEVIEW_STREAMWRITER_H
+#ifndef LLVM_DEBUGINFO_MSF_STREAMWRITER_H
+#define LLVM_DEBUGINFO_MSF_STREAMWRITER_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/DebugInfo/CodeView/CodeViewError.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
-#include "llvm/Support/Endian.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/MSF/MSFError.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Error.h"
-
-#include <string>
+#include <cstdint>
+#include <type_traits>
namespace llvm {
-namespace codeview {
-
-class StreamRef;
+namespace msf {
class StreamWriter {
public:
- StreamWriter(StreamRef Stream);
+ StreamWriter() = default;
+ explicit StreamWriter(WritableStreamRef Stream);
Error writeBytes(ArrayRef<uint8_t> Buffer);
+ Error writeInteger(uint8_t Int);
Error writeInteger(uint16_t Dest);
Error writeInteger(uint32_t Dest);
+ Error writeInteger(uint64_t Dest);
+ Error writeInteger(int8_t Int);
+ Error writeInteger(int16_t Dest);
+ Error writeInteger(int32_t Dest);
+ Error writeInteger(int64_t Dest);
Error writeZeroString(StringRef Str);
Error writeFixedString(StringRef Str);
- Error writeStreamRef(StreamRef Ref);
- Error writeStreamRef(StreamRef Ref, uint32_t Size);
+ Error writeStreamRef(ReadableStreamRef Ref);
+ Error writeStreamRef(ReadableStreamRef Ref, uint32_t Size);
template <typename T> Error writeEnum(T Num) {
return writeInteger(
@@ -51,11 +56,11 @@ public:
}
template <typename T> Error writeArray(ArrayRef<T> Array) {
- if (Array.size() == 0)
+ if (Array.empty())
return Error::success();
if (Array.size() > UINT32_MAX / sizeof(T))
- return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
+ return make_error<MSFError>(msf_error_code::insufficient_buffer);
return writeBytes(
ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(Array.data()),
@@ -77,10 +82,11 @@ public:
uint32_t bytesRemaining() const { return getLength() - getOffset(); }
private:
- StreamRef Stream;
- uint32_t Offset;
+ WritableStreamRef Stream;
+ uint32_t Offset = 0;
};
-} // namespace codeview
-} // namespace llvm
-#endif // LLVM_DEBUGINFO_CODEVIEW_STREAMREADER_H
+} // end namespace msf
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_MSF_STREAMWRITER_H
diff --git a/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h b/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
index 50f5c40bcac9..9bf073831565 100644
--- a/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
+++ b/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
@@ -10,8 +10,11 @@
#ifndef LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
#define LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
-#include "IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cstdint>
#include <memory>
namespace llvm {
@@ -23,7 +26,7 @@ public:
ConcreteSymbolEnumerator(std::unique_ptr<IPDBEnumSymbols> SymbolEnumerator)
: Enumerator(std::move(SymbolEnumerator)) {}
- ~ConcreteSymbolEnumerator() override {}
+ ~ConcreteSymbolEnumerator() override = default;
uint32_t getChildCount() const override {
return Enumerator->getChildCount();
@@ -55,7 +58,8 @@ private:
std::unique_ptr<IPDBEnumSymbols> Enumerator;
};
-}
-}
-#endif
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
diff --git a/include/llvm/DebugInfo/PDB/DIA/DIAError.h b/include/llvm/DebugInfo/PDB/DIA/DIAError.h
index f198d07e99d4..35a39a0df5ca 100644
--- a/include/llvm/DebugInfo/PDB/DIA/DIAError.h
+++ b/include/llvm/DebugInfo/PDB/DIA/DIAError.h
@@ -10,10 +10,9 @@
#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
-#include <string>
-
namespace llvm {
namespace pdb {
enum class dia_error_code {
@@ -30,11 +29,11 @@ class DIAError : public ErrorInfo<DIAError> {
public:
static char ID;
DIAError(dia_error_code C);
- DIAError(const std::string &Context);
- DIAError(dia_error_code C, const std::string &Context);
+ DIAError(StringRef Context);
+ DIAError(dia_error_code C, StringRef Context);
void log(raw_ostream &OS) const override;
- const std::string &getErrorMessage() const;
+ StringRef getErrorMessage() const;
std::error_code convertToErrorCode() const override;
private:
diff --git a/include/llvm/DebugInfo/PDB/GenericError.h b/include/llvm/DebugInfo/PDB/GenericError.h
index 959c26161044..466cb455651b 100644
--- a/include/llvm/DebugInfo/PDB/GenericError.h
+++ b/include/llvm/DebugInfo/PDB/GenericError.h
@@ -10,6 +10,7 @@
#ifndef LLVM_DEBUGINFO_PDB_ERROR_H
#define LLVM_DEBUGINFO_PDB_ERROR_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -26,11 +27,11 @@ class GenericError : public ErrorInfo<GenericError> {
public:
static char ID;
GenericError(generic_error_code C);
- GenericError(const std::string &Context);
- GenericError(generic_error_code C, const std::string &Context);
+ GenericError(StringRef Context);
+ GenericError(generic_error_code C, StringRef Context);
void log(raw_ostream &OS) const override;
- const std::string &getErrorMessage() const;
+ StringRef getErrorMessage() const;
std::error_code convertToErrorCode() const override;
private:
diff --git a/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h b/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
index 8e9f6f883679..e48dc250822e 100644
--- a/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
+++ b/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
@@ -10,7 +10,7 @@
#ifndef LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
#define LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
-#include "PDBTypes.h"
+#include <cstdint>
#include <memory>
namespace llvm {
@@ -21,7 +21,7 @@ public:
typedef std::unique_ptr<ChildType> ChildTypePtr;
typedef IPDBEnumChildren<ChildType> MyType;
- virtual ~IPDBEnumChildren() {}
+ virtual ~IPDBEnumChildren() = default;
virtual uint32_t getChildCount() const = 0;
virtual ChildTypePtr getChildAtIndex(uint32_t Index) const = 0;
@@ -29,7 +29,8 @@ public:
virtual void reset() = 0;
virtual MyType *clone() const = 0;
};
-}
-}
-#endif
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
diff --git a/include/llvm/DebugInfo/PDB/IPDBSession.h b/include/llvm/DebugInfo/PDB/IPDBSession.h
index 3d2c37eff2e3..15e97ac198e5 100644
--- a/include/llvm/DebugInfo/PDB/IPDBSession.h
+++ b/include/llvm/DebugInfo/PDB/IPDBSession.h
@@ -40,7 +40,7 @@ public:
T *ConcreteSymbol = dyn_cast<T>(Symbol.get());
if (!ConcreteSymbol)
return nullptr;
- Symbol.release();
+ (void)Symbol.release();
return std::unique_ptr<T>(ConcreteSymbol);
}
diff --git a/include/llvm/DebugInfo/PDB/PDBContext.h b/include/llvm/DebugInfo/PDB/PDBContext.h
index 836e39248438..84ab8ed173cb 100644
--- a/include/llvm/DebugInfo/PDB/PDBContext.h
+++ b/include/llvm/DebugInfo/PDB/PDBContext.h
@@ -12,14 +12,18 @@
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
+#include <cstdint>
+#include <memory>
+#include <string>
namespace llvm {
namespace object {
class COFFObjectFile;
- }
+} // end namespace object
+
+namespace pdb {
- namespace pdb {
/// PDBContext
/// This data structure is the top level entity that deals with PDB debug
/// information parsing. This data structure exists only when there is a
@@ -27,20 +31,18 @@ class COFFObjectFile;
/// (e.g. PDB and DWARF). More control and power over the debug information
/// access can be had by using the PDB interfaces directly.
class PDBContext : public DIContext {
-
- PDBContext(PDBContext &) = delete;
- PDBContext &operator=(PDBContext &) = delete;
-
public:
PDBContext(const object::COFFObjectFile &Object,
std::unique_ptr<IPDBSession> PDBSession);
+ PDBContext(PDBContext &) = delete;
+ PDBContext &operator=(PDBContext &) = delete;
static bool classof(const DIContext *DICtx) {
return DICtx->getKind() == CK_PDB;
}
void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All,
- bool DumpEH = false) override;
+ bool DumpEH = false, bool SummarizeTypes = false) override;
DILineInfo getLineInfoForAddress(
uint64_t Address,
@@ -56,7 +58,9 @@ class COFFObjectFile;
std::string getFunctionName(uint64_t Address, DINameKind NameKind) const;
std::unique_ptr<IPDBSession> Session;
};
- }
-}
-#endif
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
diff --git a/include/llvm/DebugInfo/PDB/PDBTypes.h b/include/llvm/DebugInfo/PDB/PDBTypes.h
index a9325a434366..0d232f15d745 100644
--- a/include/llvm/DebugInfo/PDB/PDBTypes.h
+++ b/include/llvm/DebugInfo/PDB/PDBTypes.h
@@ -12,9 +12,10 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include <functional>
+#include "llvm/DebugInfo/PDB/Raw/RawTypes.h"
#include <cstdint>
#include <cstring>
+#include <functional>
namespace llvm {
namespace pdb {
@@ -73,13 +74,6 @@ enum class PDB_ReaderType {
Raw = 1,
};
-/// Defines a 128-bit unique identifier. This maps to a GUID on Windows, but
-/// is abstracted here for the purposes of non-Windows platforms that don't have
-/// the GUID structure defined.
-struct PDB_UniqueId {
- char Guid[16];
-};
-
/// An enumeration indicating the type of data contained in this table.
enum class PDB_TableType {
Symbols,
diff --git a/include/llvm/DebugInfo/PDB/Raw/DbiStream.h b/include/llvm/DebugInfo/PDB/Raw/DbiStream.h
index 6ab3c8067558..c97ca32ab43d 100644
--- a/include/llvm/DebugInfo/PDB/Raw/DbiStream.h
+++ b/include/llvm/DebugInfo/PDB/Raw/DbiStream.h
@@ -11,10 +11,10 @@
#define LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAM_H
#include "llvm/DebugInfo/CodeView/ModuleSubstream.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
-#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Raw/ModInfo.h"
#include "llvm/DebugInfo/PDB/Raw/NameHashTable.h"
#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
@@ -36,32 +36,8 @@ class ISectionContribVisitor;
class DbiStream {
friend class DbiStreamBuilder;
- struct HeaderInfo {
- support::little32_t VersionSignature;
- support::ulittle32_t VersionHeader;
- support::ulittle32_t Age; // Should match InfoStream.
- support::ulittle16_t GlobalSymbolStreamIndex; // Global symbol stream #
- support::ulittle16_t BuildNumber; // See DbiBuildNo structure.
- support::ulittle16_t PublicSymbolStreamIndex; // Public symbols stream #
- support::ulittle16_t PdbDllVersion; // version of mspdbNNN.dll
- support::ulittle16_t SymRecordStreamIndex; // Symbol records stream #
- support::ulittle16_t PdbDllRbld; // rbld number of mspdbNNN.dll
- support::little32_t ModiSubstreamSize; // Size of module info stream
- support::little32_t SecContrSubstreamSize; // Size of sec. contrib stream
- support::little32_t SectionMapSize; // Size of sec. map substream
- support::little32_t FileInfoSize; // Size of file info substream
- support::little32_t TypeServerSize; // Size of type server map
- support::ulittle32_t MFCTypeServerIndex; // Index of MFC Type Server
- support::little32_t OptionalDbgHdrSize; // Size of DbgHeader info
- support::little32_t ECSubstreamSize; // Size of EC stream (what is EC?)
- support::ulittle16_t Flags; // See DbiFlags enum.
- support::ulittle16_t MachineType; // See PDB_MachineType enum.
-
- support::ulittle32_t Reserved; // Pad to 64 bytes
- };
-
public:
- DbiStream(PDBFile &File, std::unique_ptr<MappedBlockStream> Stream);
+ DbiStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
~DbiStream();
Error reload();
@@ -86,8 +62,6 @@ public:
PDB_Machine getMachineType() const;
- enum { InvalidStreamIndex = 0xffff };
-
/// If the given stream type is present, returns its stream index. If it is
/// not present, returns InvalidStreamIndex.
uint32_t getDebugStreamIndex(DbgHeaderType Type) const;
@@ -96,16 +70,15 @@ public:
Expected<StringRef> getFileNameForIndex(uint32_t Index) const;
- codeview::FixedStreamArray<object::coff_section> getSectionHeaders();
+ msf::FixedStreamArray<object::coff_section> getSectionHeaders();
- codeview::FixedStreamArray<object::FpoData> getFpoRecords();
+ msf::FixedStreamArray<object::FpoData> getFpoRecords();
- codeview::FixedStreamArray<SecMapEntry> getSectionMap() const;
+ msf::FixedStreamArray<SecMapEntry> getSectionMap() const;
void visitSectionContributions(ISectionContribVisitor &Visitor) const;
- Error commit();
-
private:
+ Error initializeModInfoArray();
Error initializeSectionContributionData();
Error initializeSectionHeadersData();
Error initializeSectionMapData();
@@ -113,35 +86,35 @@ private:
Error initializeFpoRecords();
PDBFile &Pdb;
- std::unique_ptr<MappedBlockStream> Stream;
+ std::unique_ptr<msf::MappedBlockStream> Stream;
std::vector<ModuleInfoEx> ModuleInfos;
NameHashTable ECNames;
- codeview::StreamRef ModInfoSubstream;
- codeview::StreamRef SecContrSubstream;
- codeview::StreamRef SecMapSubstream;
- codeview::StreamRef FileInfoSubstream;
- codeview::StreamRef TypeServerMapSubstream;
- codeview::StreamRef ECSubstream;
+ msf::ReadableStreamRef ModInfoSubstream;
+ msf::ReadableStreamRef SecContrSubstream;
+ msf::ReadableStreamRef SecMapSubstream;
+ msf::ReadableStreamRef FileInfoSubstream;
+ msf::ReadableStreamRef TypeServerMapSubstream;
+ msf::ReadableStreamRef ECSubstream;
- codeview::StreamRef NamesBuffer;
+ msf::ReadableStreamRef NamesBuffer;
- codeview::FixedStreamArray<support::ulittle16_t> DbgStreams;
+ msf::FixedStreamArray<support::ulittle16_t> DbgStreams;
PdbRaw_DbiSecContribVer SectionContribVersion;
- codeview::FixedStreamArray<SectionContrib> SectionContribs;
- codeview::FixedStreamArray<SectionContrib2> SectionContribs2;
- codeview::FixedStreamArray<SecMapEntry> SectionMap;
- codeview::FixedStreamArray<support::little32_t> FileNameOffsets;
+ msf::FixedStreamArray<SectionContrib> SectionContribs;
+ msf::FixedStreamArray<SectionContrib2> SectionContribs2;
+ msf::FixedStreamArray<SecMapEntry> SectionMap;
+ msf::FixedStreamArray<support::little32_t> FileNameOffsets;
- std::unique_ptr<MappedBlockStream> SectionHeaderStream;
- codeview::FixedStreamArray<object::coff_section> SectionHeaders;
+ std::unique_ptr<msf::MappedBlockStream> SectionHeaderStream;
+ msf::FixedStreamArray<object::coff_section> SectionHeaders;
- std::unique_ptr<MappedBlockStream> FpoStream;
- codeview::FixedStreamArray<object::FpoData> FpoRecords;
+ std::unique_ptr<msf::MappedBlockStream> FpoStream;
+ msf::FixedStreamArray<object::FpoData> FpoRecords;
- const HeaderInfo *Header;
+ const DbiStreamHeader *Header;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/DbiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Raw/DbiStreamBuilder.h
index 2c7350f3c3e7..99a3ac7fb1da 100644
--- a/include/llvm/DebugInfo/PDB/Raw/DbiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Raw/DbiStreamBuilder.h
@@ -11,20 +11,31 @@
#define LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAMBUILDER_H
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Error.h"
+#include "llvm/DebugInfo/MSF/ByteStream.h"
+#include "llvm/DebugInfo/MSF/StreamReader.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/DebugInfo/PDB/Raw/PDBFile.h"
#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
+#include "llvm/Support/Endian.h"
namespace llvm {
+namespace msf {
+class MSFBuilder;
+}
+namespace object {
+struct coff_section;
+}
namespace pdb {
class DbiStream;
+struct DbiStreamHeader;
class PDBFile;
class DbiStreamBuilder {
public:
- DbiStreamBuilder();
+ DbiStreamBuilder(msf::MSFBuilder &Msf);
DbiStreamBuilder(const DbiStreamBuilder &) = delete;
DbiStreamBuilder &operator=(const DbiStreamBuilder &) = delete;
@@ -36,12 +47,57 @@ public:
void setPdbDllRbld(uint16_t R);
void setFlags(uint16_t F);
void setMachineType(PDB_Machine M);
+ void setSectionContribs(ArrayRef<SectionContrib> SecMap);
+ void setSectionMap(ArrayRef<SecMapEntry> SecMap);
+
+ // Add given bytes as a new stream.
+ Error addDbgStream(pdb::DbgHeaderType Type, ArrayRef<uint8_t> Data);
uint32_t calculateSerializedLength() const;
- Expected<std::unique_ptr<DbiStream>> build(PDBFile &File);
+ Error addModuleInfo(StringRef ObjFile, StringRef Module);
+ Error addModuleSourceFile(StringRef Module, StringRef File);
+
+ Error finalizeMsfLayout();
+
+ Error commit(const msf::MSFLayout &Layout,
+ const msf::WritableStream &Buffer);
+
+ // A helper function to create Section Contributions from COFF input
+ // section headers.
+ static std::vector<SectionContrib>
+ createSectionContribs(ArrayRef<llvm::object::coff_section> SecHdrs);
+
+ // A helper function to create a Section Map from a COFF section header.
+ static std::vector<SecMapEntry>
+ createSectionMap(ArrayRef<llvm::object::coff_section> SecHdrs);
private:
+ struct DebugStream {
+ ArrayRef<uint8_t> Data;
+ uint16_t StreamNumber = 0;
+ };
+
+ Error finalize();
+ uint32_t calculateModiSubstreamSize() const;
+ uint32_t calculateSectionContribsStreamSize() const;
+ uint32_t calculateSectionMapStreamSize() const;
+ uint32_t calculateFileInfoSubstreamSize() const;
+ uint32_t calculateNamesBufferSize() const;
+ uint32_t calculateDbgStreamsSize() const;
+
+ Error generateModiSubstream();
+ Error generateFileInfoSubstream();
+
+ struct ModuleInfo {
+ std::vector<StringRef> SourceFiles;
+ StringRef Obj;
+ StringRef Mod;
+ };
+
+ msf::MSFBuilder &Msf;
+ BumpPtrAllocator &Allocator;
+
Optional<PdbRaw_DbiVer> VerHeader;
uint32_t Age;
uint16_t BuildNumber;
@@ -49,6 +105,20 @@ private:
uint16_t PdbDllRbld;
uint16_t Flags;
PDB_Machine MachineType;
+
+ const DbiStreamHeader *Header;
+
+ StringMap<std::unique_ptr<ModuleInfo>> ModuleInfos;
+ std::vector<ModuleInfo *> ModuleInfoList;
+
+ StringMap<uint32_t> SourceFileNames;
+
+ msf::WritableStreamRef NamesBuffer;
+ msf::MutableByteStream ModInfoBuffer;
+ msf::MutableByteStream FileInfoBuffer;
+ ArrayRef<SectionContrib> SectionContribs;
+ ArrayRef<SecMapEntry> SectionMap;
+ llvm::SmallVector<DebugStream, (int)DbgHeaderType::Max> DbgStreams;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/DirectoryStreamData.h b/include/llvm/DebugInfo/PDB/Raw/DirectoryStreamData.h
deleted file mode 100644
index 0f354315122c..000000000000
--- a/include/llvm/DebugInfo/PDB/Raw/DirectoryStreamData.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//===- DirectoryStreamData.h ---------------------------------- *- C++ --*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_PDB_RAW_DIRECTORYSTREAMDATA_H
-#define LLVM_DEBUGINFO_PDB_RAW_DIRECTORYSTREAMDATA_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/DebugInfo/PDB/Raw/IPDBStreamData.h"
-#include "llvm/DebugInfo/PDB/Raw/PDBFile.h"
-#include "llvm/Support/Endian.h"
-
-namespace llvm {
-namespace pdb {
-class IPDBFile;
-
-class DirectoryStreamData : public IPDBStreamData {
-public:
- DirectoryStreamData(const PDBFile &File) : File(File) {}
-
- virtual uint32_t getLength() { return File.getNumDirectoryBytes(); }
- virtual llvm::ArrayRef<llvm::support::ulittle32_t> getStreamBlocks() {
- return File.getDirectoryBlockArray();
- }
-
-private:
- const PDBFile &File;
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/PDB/Raw/GlobalsStream.h b/include/llvm/DebugInfo/PDB/Raw/GlobalsStream.h
new file mode 100644
index 000000000000..175f093cf53c
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Raw/GlobalsStream.h
@@ -0,0 +1,45 @@
+//===- GlobalsStream.h - PDB Index of Symbols by Name ------ ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_GLOBALS_STREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_GLOBALS_STREAM_H
+
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Raw/RawTypes.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace pdb {
+class DbiStream;
+class PDBFile;
+
+class GlobalsStream {
+public:
+ explicit GlobalsStream(std::unique_ptr<msf::MappedBlockStream> Stream);
+ ~GlobalsStream();
+ Error commit();
+ msf::FixedStreamArray<support::ulittle32_t> getHashBuckets() const {
+ return HashBuckets;
+ }
+ uint32_t getNumBuckets() const { return NumBuckets; }
+ Error reload();
+
+private:
+ msf::FixedStreamArray<support::ulittle32_t> HashBuckets;
+ msf::FixedStreamArray<PSHashRecord> HashRecords;
+ uint32_t NumBuckets;
+ std::unique_ptr<msf::MappedBlockStream> Stream;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/PDB/Raw/IPDBStreamData.h b/include/llvm/DebugInfo/PDB/Raw/IPDBStreamData.h
deleted file mode 100644
index ab3c9f770755..000000000000
--- a/include/llvm/DebugInfo/PDB/Raw/IPDBStreamData.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===- IPDBStreamData.h - Base interface for PDB Stream Data ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_PDB_RAW_IPDBSTREAMDATA_H
-#define LLVM_DEBUGINFO_PDB_RAW_IPDBSTREAMDATA_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/Support/Endian.h"
-
-namespace llvm {
-namespace pdb {
-/// IPDBStream abstracts the notion of PDB stream data. Although we already
-/// have another stream abstraction (namely in the form of StreamInterface
-/// and MappedBlockStream), they assume that the stream data is referenced
-/// the same way. Namely, by looking in the directory to get the list of
-/// stream blocks, and by looking in the array of stream lengths to get the
-/// length. This breaks down for the directory itself, however, since its
-/// length and list of blocks are stored elsewhere. By abstracting the
-/// notion of stream data further, we can use a MappedBlockStream to read
-/// from the directory itself, or from an indexed stream which references
-/// the directory.
-class IPDBStreamData {
-public:
- virtual ~IPDBStreamData() {}
-
- virtual uint32_t getLength() = 0;
- virtual ArrayRef<support::ulittle32_t> getStreamBlocks() = 0;
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/PDB/Raw/ISectionContribVisitor.h b/include/llvm/DebugInfo/PDB/Raw/ISectionContribVisitor.h
index 355a25a38ef8..fb00d6ad4bc7 100644
--- a/include/llvm/DebugInfo/PDB/Raw/ISectionContribVisitor.h
+++ b/include/llvm/DebugInfo/PDB/Raw/ISectionContribVisitor.h
@@ -12,17 +12,19 @@
namespace llvm {
namespace pdb {
+
struct SectionContrib;
struct SectionContrib2;
class ISectionContribVisitor {
public:
- virtual ~ISectionContribVisitor() {}
+ virtual ~ISectionContribVisitor() = default;
virtual void visit(const SectionContrib &C) = 0;
virtual void visit(const SectionContrib2 &C) = 0;
};
-} // namespace pdb
-} // namespace llvm
+
+} // end namespace pdb
+} // end namespace llvm
#endif // LLVM_DEBUGINFO_PDB_RAW_ISECTIONCONTRIBVISITOR_H
diff --git a/include/llvm/DebugInfo/PDB/Raw/IndexedStreamData.h b/include/llvm/DebugInfo/PDB/Raw/IndexedStreamData.h
deleted file mode 100644
index 30563bc5b898..000000000000
--- a/include/llvm/DebugInfo/PDB/Raw/IndexedStreamData.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===- IndexedStreamData.h - Standard PDB Stream Data -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_PDB_RAW_INDEXEDSTREAMDATA_H
-#define LLVM_DEBUGINFO_PDB_RAW_INDEXEDSTREAMDATA_H
-
-#include "llvm/DebugInfo/PDB/Raw/IPDBStreamData.h"
-
-namespace llvm {
-namespace pdb {
-class IPDBFile;
-
-class IndexedStreamData : public IPDBStreamData {
-public:
- IndexedStreamData(uint32_t StreamIdx, const IPDBFile &File);
- virtual ~IndexedStreamData() {}
-
- uint32_t getLength() override;
- ArrayRef<support::ulittle32_t> getStreamBlocks() override;
-
-private:
- uint32_t StreamIdx;
- const IPDBFile &File;
-};
-}
-}
-
-#endif
diff --git a/include/llvm/DebugInfo/PDB/Raw/InfoStream.h b/include/llvm/DebugInfo/PDB/Raw/InfoStream.h
index 1980bec7153e..6b8b94ff1a36 100644
--- a/include/llvm/DebugInfo/PDB/Raw/InfoStream.h
+++ b/include/llvm/DebugInfo/PDB/Raw/InfoStream.h
@@ -11,8 +11,8 @@
#define LLVM_DEBUGINFO_PDB_RAW_PDBINFOSTREAM_H
#include "llvm/ADT/StringMap.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
-#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Raw/NameMap.h"
#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
@@ -27,18 +27,10 @@ class PDBFile;
class InfoStream {
friend class InfoStreamBuilder;
- struct HeaderInfo {
- support::ulittle32_t Version;
- support::ulittle32_t Signature;
- support::ulittle32_t Age;
- PDB_UniqueId Guid;
- };
-
public:
- InfoStream(std::unique_ptr<MappedBlockStream> Stream);
+ InfoStream(std::unique_ptr<msf::MappedBlockStream> Stream);
Error reload();
- Error commit();
PdbRaw_ImplVer getVersion() const;
uint32_t getSignature() const;
@@ -49,7 +41,7 @@ public:
iterator_range<StringMapConstIterator<uint32_t>> named_streams() const;
private:
- std::unique_ptr<MappedBlockStream> Stream;
+ std::unique_ptr<msf::MappedBlockStream> Stream;
// PDB file format version. We only support VC70. See the enumeration
// `PdbRaw_ImplVer` for the other possible values.
diff --git a/include/llvm/DebugInfo/PDB/Raw/InfoStreamBuilder.h b/include/llvm/DebugInfo/PDB/Raw/InfoStreamBuilder.h
index e9869bb27863..cb60b1eb69bd 100644
--- a/include/llvm/DebugInfo/PDB/Raw/InfoStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Raw/InfoStreamBuilder.h
@@ -19,12 +19,16 @@
#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
namespace llvm {
+namespace msf {
+class MSFBuilder;
+class StreamWriter;
+}
namespace pdb {
class PDBFile;
class InfoStreamBuilder {
public:
- InfoStreamBuilder();
+ InfoStreamBuilder(msf::MSFBuilder &Msf);
InfoStreamBuilder(const InfoStreamBuilder &) = delete;
InfoStreamBuilder &operator=(const InfoStreamBuilder &) = delete;
@@ -37,13 +41,18 @@ public:
uint32_t calculateSerializedLength() const;
- Expected<std::unique_ptr<InfoStream>> build(PDBFile &File);
+ Error finalizeMsfLayout();
+
+ Error commit(const msf::MSFLayout &Layout,
+ const msf::WritableStream &Buffer) const;
private:
- Optional<PdbRaw_ImplVer> Ver;
- Optional<uint32_t> Sig;
- Optional<uint32_t> Age;
- Optional<PDB_UniqueId> Guid;
+ msf::MSFBuilder &Msf;
+
+ PdbRaw_ImplVer Ver;
+ uint32_t Sig;
+ uint32_t Age;
+ PDB_UniqueId Guid;
NameMapBuilder NamedStreams;
};
diff --git a/include/llvm/DebugInfo/PDB/Raw/MappedBlockStream.h b/include/llvm/DebugInfo/PDB/Raw/MappedBlockStream.h
deleted file mode 100644
index 36424c0d16ab..000000000000
--- a/include/llvm/DebugInfo/PDB/Raw/MappedBlockStream.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//===- MappedBlockStream.h - Reads stream data from a PDBFile ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_PDB_RAW_MAPPEDBLOCKSTREAM_H
-#define LLVM_DEBUGINFO_PDB_RAW_MAPPEDBLOCKSTREAM_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
-#include "llvm/DebugInfo/PDB/Raw/IPDBStreamData.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/Error.h"
-#include <cstdint>
-#include <vector>
-
-namespace llvm {
-namespace pdb {
-
-class IPDBFile;
-class PDBFile;
-
-class MappedBlockStream : public codeview::StreamInterface {
-public:
- Error readBytes(uint32_t Offset, uint32_t Size,
- ArrayRef<uint8_t> &Buffer) const override;
- Error readLongestContiguousChunk(uint32_t Offset,
- ArrayRef<uint8_t> &Buffer) const override;
- Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Buffer) const override;
-
- uint32_t getLength() const override;
- Error commit() const override;
-
- uint32_t getNumBytesCopied() const;
-
- static Expected<std::unique_ptr<MappedBlockStream>>
- createIndexedStream(uint32_t StreamIdx, const IPDBFile &File);
- static Expected<std::unique_ptr<MappedBlockStream>>
- createDirectoryStream(const PDBFile &File);
-
- llvm::BumpPtrAllocator &getAllocator() { return Pool; }
-
-protected:
- MappedBlockStream(std::unique_ptr<IPDBStreamData> Data, const IPDBFile &File);
-
- Error readBytes(uint32_t Offset, MutableArrayRef<uint8_t> Buffer) const;
- bool tryReadContiguously(uint32_t Offset, uint32_t Size,
- ArrayRef<uint8_t> &Buffer) const;
-
- const IPDBFile &Pdb;
- std::unique_ptr<IPDBStreamData> Data;
-
- typedef MutableArrayRef<uint8_t> CacheEntry;
- mutable llvm::BumpPtrAllocator Pool;
- mutable DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
-};
-
-} // end namespace pdb
-} // end namespace llvm
-
-#endif // LLVM_DEBUGINFO_PDB_RAW_MAPPEDBLOCKSTREAM_H
diff --git a/include/llvm/DebugInfo/PDB/Raw/ModInfo.h b/include/llvm/DebugInfo/PDB/Raw/ModInfo.h
index b8da0bfabf38..bf5cf53b3313 100644
--- a/include/llvm/DebugInfo/PDB/Raw/ModInfo.h
+++ b/include/llvm/DebugInfo/PDB/Raw/ModInfo.h
@@ -11,24 +11,26 @@
#define LLVM_DEBUGINFO_PDB_RAW_MODINFO_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
+#include "llvm/DebugInfo/PDB/Raw/RawTypes.h"
+#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>
namespace llvm {
+
namespace pdb {
class ModInfo {
-private:
- struct FileLayout;
+ friend class DbiStreamBuilder;
public:
ModInfo();
ModInfo(const ModInfo &Info);
~ModInfo();
- static Error initialize(codeview::StreamRef Stream, ModInfo &Info);
+ static Error initialize(msf::ReadableStreamRef Stream, ModInfo &Info);
bool hasECInfo() const;
uint16_t getTypeServerIndex() const;
@@ -48,13 +50,12 @@ public:
private:
StringRef ModuleName;
StringRef ObjFileName;
- const FileLayout *Layout;
+ const ModuleInfoHeader *Layout = nullptr;
};
struct ModuleInfoEx {
ModuleInfoEx(const ModInfo &Info) : Info(Info) {}
- ModuleInfoEx(const ModuleInfoEx &Ex)
- : Info(Ex.Info), SourceFiles(Ex.SourceFiles) {}
+ ModuleInfoEx(const ModuleInfoEx &Ex) = default;
ModInfo Info;
std::vector<StringRef> SourceFiles;
@@ -62,9 +63,10 @@ struct ModuleInfoEx {
} // end namespace pdb
-namespace codeview {
+namespace msf {
+
template <> struct VarStreamArrayExtractor<pdb::ModInfo> {
- Error operator()(StreamRef Stream, uint32_t &Length,
+ Error operator()(ReadableStreamRef Stream, uint32_t &Length,
pdb::ModInfo &Info) const {
if (auto EC = pdb::ModInfo::initialize(Stream, Info))
return EC;
@@ -72,7 +74,8 @@ template <> struct VarStreamArrayExtractor<pdb::ModInfo> {
return Error::success();
}
};
-}
+
+} // end namespace msf
} // end namespace llvm
diff --git a/include/llvm/DebugInfo/PDB/Raw/ModStream.h b/include/llvm/DebugInfo/PDB/Raw/ModStream.h
index d22962cc1e28..d5e7a6830d8d 100644
--- a/include/llvm/DebugInfo/PDB/Raw/ModStream.h
+++ b/include/llvm/DebugInfo/PDB/Raw/ModStream.h
@@ -13,10 +13,10 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/ModuleSubstream.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
-#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -26,11 +26,14 @@ class ModInfo;
class ModStream {
public:
- ModStream(const ModInfo &Module, std::unique_ptr<MappedBlockStream> Stream);
+ ModStream(const ModInfo &Module,
+ std::unique_ptr<msf::MappedBlockStream> Stream);
~ModStream();
Error reload();
+ uint32_t signature() const { return Signature; }
+
iterator_range<codeview::CVSymbolArray::Iterator>
symbols(bool *HadError) const;
@@ -42,12 +45,14 @@ public:
private:
const ModInfo &Mod;
- std::unique_ptr<MappedBlockStream> Stream;
+ uint32_t Signature;
+
+ std::unique_ptr<msf::MappedBlockStream> Stream;
codeview::CVSymbolArray SymbolsSubstream;
- codeview::StreamRef LinesSubstream;
- codeview::StreamRef C13LinesSubstream;
- codeview::StreamRef GlobalRefsSubstream;
+ msf::ReadableStreamRef LinesSubstream;
+ msf::ReadableStreamRef C13LinesSubstream;
+ msf::ReadableStreamRef GlobalRefsSubstream;
codeview::ModuleSubstreamArray LineInfo;
};
diff --git a/include/llvm/DebugInfo/PDB/Raw/NameHashTable.h b/include/llvm/DebugInfo/PDB/Raw/NameHashTable.h
index c9e060a3a70f..00d022d4d8e2 100644
--- a/include/llvm/DebugInfo/PDB/Raw/NameHashTable.h
+++ b/include/llvm/DebugInfo/PDB/Raw/NameHashTable.h
@@ -12,15 +12,15 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>
namespace llvm {
-namespace codeview {
+namespace msf {
class StreamReader;
}
namespace pdb {
@@ -29,7 +29,7 @@ class NameHashTable {
public:
NameHashTable();
- Error load(codeview::StreamReader &Stream);
+ Error load(msf::StreamReader &Stream);
uint32_t getNameCount() const { return NameCount; }
uint32_t getHashVersion() const { return HashVersion; }
@@ -38,11 +38,11 @@ public:
StringRef getStringForID(uint32_t ID) const;
uint32_t getIDForString(StringRef Str) const;
- codeview::FixedStreamArray<support::ulittle32_t> name_ids() const;
+ msf::FixedStreamArray<support::ulittle32_t> name_ids() const;
private:
- codeview::StreamRef NamesBuffer;
- codeview::FixedStreamArray<support::ulittle32_t> IDs;
+ msf::ReadableStreamRef NamesBuffer;
+ msf::FixedStreamArray<support::ulittle32_t> IDs;
uint32_t Signature;
uint32_t HashVersion;
uint32_t NameCount;
diff --git a/include/llvm/DebugInfo/PDB/Raw/NameMap.h b/include/llvm/DebugInfo/PDB/Raw/NameMap.h
index 8a9b0d187ace..de1163bc3079 100644
--- a/include/llvm/DebugInfo/PDB/Raw/NameMap.h
+++ b/include/llvm/DebugInfo/PDB/Raw/NameMap.h
@@ -16,7 +16,7 @@
#include <cstdint>
namespace llvm {
-namespace codeview {
+namespace msf {
class StreamReader;
class StreamWriter;
}
@@ -28,8 +28,7 @@ class NameMap {
public:
NameMap();
- Error load(codeview::StreamReader &Stream);
- Error commit(codeview::StreamWriter &Writer);
+ Error load(msf::StreamReader &Stream);
bool tryGetValue(StringRef Name, uint32_t &Value) const;
diff --git a/include/llvm/DebugInfo/PDB/Raw/NameMapBuilder.h b/include/llvm/DebugInfo/PDB/Raw/NameMapBuilder.h
index bf49bfd9bf2e..f5244ac21808 100644
--- a/include/llvm/DebugInfo/PDB/Raw/NameMapBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Raw/NameMapBuilder.h
@@ -17,6 +17,9 @@
#include <memory>
namespace llvm {
+namespace msf {
+class StreamWriter;
+}
namespace pdb {
class NameMap;
@@ -27,6 +30,7 @@ public:
void addMapping(StringRef Name, uint32_t Mapping);
Expected<std::unique_ptr<NameMap>> build();
+ Error commit(msf::StreamWriter &Writer) const;
uint32_t calculateSerializedLength() const;
diff --git a/include/llvm/DebugInfo/PDB/Raw/PDBFile.h b/include/llvm/DebugInfo/PDB/Raw/PDBFile.h
index f4d7eb47d3b9..29f5b2163d83 100644
--- a/include/llvm/DebugInfo/PDB/Raw/PDBFile.h
+++ b/include/llvm/DebugInfo/PDB/Raw/PDBFile.h
@@ -11,10 +11,10 @@
#define LLVM_DEBUGINFO_PDB_RAW_PDBFILE_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamInterface.h"
-#include "llvm/DebugInfo/PDB/Raw/IPDBFile.h"
-#include "llvm/DebugInfo/PDB/Raw/MsfCommon.h"
+#include "llvm/DebugInfo/MSF/IMSFFile.h"
+#include "llvm/DebugInfo/MSF/MSFCommon.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/MSF/StreamInterface.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
@@ -24,25 +24,26 @@
namespace llvm {
-namespace codeview {
-class StreamInterface;
+namespace msf {
+class MappedBlockStream;
}
namespace pdb {
class DbiStream;
+class GlobalsStream;
class InfoStream;
-class MappedBlockStream;
class NameHashTable;
class PDBFileBuilder;
class PublicsStream;
class SymbolStream;
class TpiStream;
-class PDBFile : public IPDBFile {
+class PDBFile : public msf::IMSFFile {
friend PDBFileBuilder;
public:
- explicit PDBFile(std::unique_ptr<codeview::StreamInterface> PdbFileBuffer);
+ PDBFile(std::unique_ptr<msf::ReadableStream> PdbFileBuffer,
+ BumpPtrAllocator &Allocator);
~PDBFile() override;
uint32_t getFreeBlockMapBlock() const;
@@ -66,11 +67,18 @@ public:
Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
ArrayRef<uint8_t> Data) const override;
- ArrayRef<support::ulittle32_t> getStreamSizes() const { return StreamSizes; }
+ ArrayRef<uint32_t> getFpmPages() const { return FpmPages; }
+
+ ArrayRef<support::ulittle32_t> getStreamSizes() const {
+ return ContainerLayout.StreamSizes;
+ }
ArrayRef<ArrayRef<support::ulittle32_t>> getStreamMap() const {
- return StreamMap;
+ return ContainerLayout.StreamMap;
}
+ const msf::MSFLayout &getMsfLayout() const { return ContainerLayout; }
+ const msf::ReadableStream &getMsfBuffer() const { return *Buffer; }
+
ArrayRef<support::ulittle32_t> getDirectoryBlockArray() const;
Error parseFileHeaders();
@@ -78,33 +86,45 @@ public:
Expected<InfoStream &> getPDBInfoStream();
Expected<DbiStream &> getPDBDbiStream();
+ Expected<GlobalsStream &> getPDBGlobalsStream();
Expected<TpiStream &> getPDBTpiStream();
Expected<TpiStream &> getPDBIpiStream();
Expected<PublicsStream &> getPDBPublicsStream();
Expected<SymbolStream &> getPDBSymbolStream();
Expected<NameHashTable &> getStringTable();
- Error commit();
+ BumpPtrAllocator &getAllocator() { return Allocator; }
+
+ bool hasPDBDbiStream() const;
+ bool hasPDBGlobalsStream();
+ bool hasPDBInfoStream();
+ bool hasPDBIpiStream() const;
+ bool hasPDBPublicsStream();
+ bool hasPDBSymbolStream();
+ bool hasPDBTpiStream() const;
+ bool hasStringTable();
+
+ private:
+ Expected<std::unique_ptr<msf::MappedBlockStream>> safelyCreateIndexedStream(
+ const msf::MSFLayout &Layout, const msf::ReadableStream &MsfData,
+ uint32_t StreamIndex) const;
-private:
- Error setSuperBlock(const msf::SuperBlock *Block);
+ BumpPtrAllocator &Allocator;
- BumpPtrAllocator Allocator;
+ std::unique_ptr<msf::ReadableStream> Buffer;
- std::unique_ptr<codeview::StreamInterface> Buffer;
- const msf::SuperBlock *SB;
- ArrayRef<support::ulittle32_t> StreamSizes;
- ArrayRef<support::ulittle32_t> DirectoryBlocks;
- std::vector<ArrayRef<support::ulittle32_t>> StreamMap;
+ std::vector<uint32_t> FpmPages;
+ msf::MSFLayout ContainerLayout;
+ std::unique_ptr<GlobalsStream> Globals;
std::unique_ptr<InfoStream> Info;
std::unique_ptr<DbiStream> Dbi;
std::unique_ptr<TpiStream> Tpi;
std::unique_ptr<TpiStream> Ipi;
std::unique_ptr<PublicsStream> Publics;
std::unique_ptr<SymbolStream> Symbols;
- std::unique_ptr<MappedBlockStream> DirectoryStream;
- std::unique_ptr<MappedBlockStream> StringTableStream;
+ std::unique_ptr<msf::MappedBlockStream> DirectoryStream;
+ std::unique_ptr<msf::MappedBlockStream> StringTableStream;
std::unique_ptr<NameHashTable> StringTable;
};
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/PDBFileBuilder.h b/include/llvm/DebugInfo/PDB/Raw/PDBFileBuilder.h
index 47c755b43269..27fc4b53b649 100644
--- a/include/llvm/DebugInfo/PDB/Raw/PDBFileBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Raw/PDBFileBuilder.h
@@ -13,45 +13,50 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/PDB/Raw/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
-#include "llvm/DebugInfo/PDB/Raw/MsfBuilder.h"
-#include "llvm/DebugInfo/PDB/Raw/PDBFile.h"
-
#include <memory>
#include <vector>
namespace llvm {
-namespace codeview {
-class StreamInterface;
+namespace msf {
+class MSFBuilder;
}
namespace pdb {
class DbiStreamBuilder;
class InfoStreamBuilder;
-class PDBFile;
+class TpiStreamBuilder;
class PDBFileBuilder {
public:
- explicit PDBFileBuilder(
- std::unique_ptr<codeview::StreamInterface> FileBuffer);
+ explicit PDBFileBuilder(BumpPtrAllocator &Allocator);
PDBFileBuilder(const PDBFileBuilder &) = delete;
PDBFileBuilder &operator=(const PDBFileBuilder &) = delete;
- Error initialize(const msf::SuperBlock &Super);
+ Error initialize(uint32_t BlockSize);
- MsfBuilder &getMsfBuilder();
+ msf::MSFBuilder &getMsfBuilder();
InfoStreamBuilder &getInfoBuilder();
DbiStreamBuilder &getDbiBuilder();
+ TpiStreamBuilder &getTpiBuilder();
+ TpiStreamBuilder &getIpiBuilder();
- Expected<std::unique_ptr<PDBFile>> build();
+ Error commit(StringRef Filename);
private:
+ Expected<msf::MSFLayout> finalizeMsfLayout() const;
+
+ BumpPtrAllocator &Allocator;
+
+ std::unique_ptr<msf::MSFBuilder> Msf;
std::unique_ptr<InfoStreamBuilder> Info;
std::unique_ptr<DbiStreamBuilder> Dbi;
-
- std::unique_ptr<PDBFile> File;
- std::unique_ptr<MsfBuilder> Msf;
+ std::unique_ptr<TpiStreamBuilder> Tpi;
+ std::unique_ptr<TpiStreamBuilder> Ipi;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/PublicsStream.h b/include/llvm/DebugInfo/PDB/Raw/PublicsStream.h
index f5bfb0ed60a9..577f2986ff24 100644
--- a/include/llvm/DebugInfo/PDB/Raw/PublicsStream.h
+++ b/include/llvm/DebugInfo/PDB/Raw/PublicsStream.h
@@ -10,10 +10,10 @@
#ifndef LLVM_DEBUGINFO_PDB_RAW_PUBLICSSTREAM_H
#define LLVM_DEBUGINFO_PDB_RAW_PUBLICSSTREAM_H
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
-#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
#include "llvm/DebugInfo/PDB/Raw/RawTypes.h"
@@ -22,14 +22,14 @@
namespace llvm {
namespace pdb {
class DbiStream;
+struct GSIHashHeader;
class PDBFile;
class PublicsStream {
- struct GSIHashHeader;
struct HeaderInfo;
public:
- PublicsStream(PDBFile &File, std::unique_ptr<MappedBlockStream> Stream);
+ PublicsStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
~PublicsStream();
Error reload();
@@ -38,16 +38,16 @@ public:
uint32_t getNumBuckets() const { return NumBuckets; }
iterator_range<codeview::CVSymbolArray::Iterator>
getSymbols(bool *HadError) const;
- codeview::FixedStreamArray<support::ulittle32_t> getHashBuckets() const {
+ msf::FixedStreamArray<support::ulittle32_t> getHashBuckets() const {
return HashBuckets;
}
- codeview::FixedStreamArray<support::ulittle32_t> getAddressMap() const {
+ msf::FixedStreamArray<support::ulittle32_t> getAddressMap() const {
return AddressMap;
}
- codeview::FixedStreamArray<support::ulittle32_t> getThunkMap() const {
+ msf::FixedStreamArray<support::ulittle32_t> getThunkMap() const {
return ThunkMap;
}
- codeview::FixedStreamArray<SectionOffset> getSectionOffsets() const {
+ msf::FixedStreamArray<SectionOffset> getSectionOffsets() const {
return SectionOffsets;
}
@@ -56,14 +56,14 @@ public:
private:
PDBFile &Pdb;
- std::unique_ptr<MappedBlockStream> Stream;
+ std::unique_ptr<msf::MappedBlockStream> Stream;
uint32_t NumBuckets = 0;
ArrayRef<uint8_t> Bitmap;
- codeview::FixedStreamArray<PSHashRecord> HashRecords;
- codeview::FixedStreamArray<support::ulittle32_t> HashBuckets;
- codeview::FixedStreamArray<support::ulittle32_t> AddressMap;
- codeview::FixedStreamArray<support::ulittle32_t> ThunkMap;
- codeview::FixedStreamArray<SectionOffset> SectionOffsets;
+ msf::FixedStreamArray<PSHashRecord> HashRecords;
+ msf::FixedStreamArray<support::ulittle32_t> HashBuckets;
+ msf::FixedStreamArray<support::ulittle32_t> AddressMap;
+ msf::FixedStreamArray<support::ulittle32_t> ThunkMap;
+ msf::FixedStreamArray<SectionOffset> SectionOffsets;
const HeaderInfo *Header;
const GSIHashHeader *HashHdr;
diff --git a/include/llvm/DebugInfo/PDB/Raw/RawConstants.h b/include/llvm/DebugInfo/PDB/Raw/RawConstants.h
index 8daaf47882d8..af114ff52491 100644
--- a/include/llvm/DebugInfo/PDB/Raw/RawConstants.h
+++ b/include/llvm/DebugInfo/PDB/Raw/RawConstants.h
@@ -17,6 +17,8 @@
namespace llvm {
namespace pdb {
+const uint16_t kInvalidStreamIndex = 0xFFFF;
+
enum PdbRaw_ImplVer : uint32_t {
PdbImplVC2 = 19941610,
PdbImplVC4 = 19950623,
@@ -61,6 +63,8 @@ enum SpecialStream : uint32_t {
StreamTPI = 2,
StreamDBI = 3,
StreamIPI = 4,
+
+ kSpecialStreamCount
};
enum class DbgHeaderType : uint16_t {
diff --git a/include/llvm/DebugInfo/PDB/Raw/RawError.h b/include/llvm/DebugInfo/PDB/Raw/RawError.h
index b0687cddbf48..f96b8066bbe5 100644
--- a/include/llvm/DebugInfo/PDB/Raw/RawError.h
+++ b/include/llvm/DebugInfo/PDB/Raw/RawError.h
@@ -19,11 +19,14 @@ namespace pdb {
enum class raw_error_code {
unspecified = 1,
feature_unsupported,
+ invalid_format,
corrupt_file,
insufficient_buffer,
no_stream,
index_out_of_bounds,
invalid_block_address,
+ duplicate_entry,
+ no_entry,
not_writable,
invalid_tpi_hash,
};
diff --git a/include/llvm/DebugInfo/PDB/Raw/RawSession.h b/include/llvm/DebugInfo/PDB/Raw/RawSession.h
index 73d281eab1a7..5a6c469fcc8e 100644
--- a/include/llvm/DebugInfo/PDB/Raw/RawSession.h
+++ b/include/llvm/DebugInfo/PDB/Raw/RawSession.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
namespace llvm {
@@ -20,7 +21,8 @@ class PDBFile;
class RawSession : public IPDBSession {
public:
- explicit RawSession(std::unique_ptr<PDBFile> PdbFile);
+ RawSession(std::unique_ptr<PDBFile> PdbFile,
+ std::unique_ptr<BumpPtrAllocator> Allocator);
~RawSession() override;
static Error createFromPdb(StringRef Path,
@@ -68,6 +70,7 @@ public:
private:
std::unique_ptr<PDBFile> Pdb;
+ std::unique_ptr<BumpPtrAllocator> Allocator;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/RawTypes.h b/include/llvm/DebugInfo/PDB/Raw/RawTypes.h
index afcfe9405c0f..d404b3994dbc 100644
--- a/include/llvm/DebugInfo/PDB/Raw/RawTypes.h
+++ b/include/llvm/DebugInfo/PDB/Raw/RawTypes.h
@@ -80,6 +80,228 @@ struct TypeIndexOffset {
support::ulittle32_t Offset;
};
+/// Some of the values are stored in bitfields. Since this needs to be portable
+/// across compilers and architectures (big / little endian in particular) we
+/// can't use the actual structures below, but must instead do the shifting
+/// and masking ourselves. The struct definitions are provided for reference.
+struct DbiFlags {
+ /// uint16_t IncrementalLinking : 1; // True if linked incrementally
+ /// uint16_t IsStripped : 1; // True if private symbols were
+ /// stripped.
+ /// uint16_t HasCTypes : 1; // True if linked with /debug:ctypes.
+ /// uint16_t Reserved : 13;
+ static const uint16_t FlagIncrementalMask = 0x0001;
+ static const uint16_t FlagStrippedMask = 0x0002;
+ static const uint16_t FlagHasCTypesMask = 0x0004;
+};
+
+struct DbiBuildNo {
+ /// uint16_t MinorVersion : 8;
+ /// uint16_t MajorVersion : 7;
+ /// uint16_t NewVersionFormat : 1;
+ static const uint16_t BuildMinorMask = 0x00FF;
+ static const uint16_t BuildMinorShift = 0;
+
+ static const uint16_t BuildMajorMask = 0x7F00;
+ static const uint16_t BuildMajorShift = 8;
+};
+
+/// The fixed size header that appears at the beginning of the DBI Stream.
+struct DbiStreamHeader {
+ support::little32_t VersionSignature;
+ support::ulittle32_t VersionHeader;
+
+ /// How "old" is this DBI Stream. Should match the age of the PDB InfoStream.
+ support::ulittle32_t Age;
+
+ /// Global symbol stream #
+ support::ulittle16_t GlobalSymbolStreamIndex;
+
+ /// See DbiBuildNo structure.
+ support::ulittle16_t BuildNumber;
+
+ /// Public symbols stream #
+ support::ulittle16_t PublicSymbolStreamIndex;
+
+ /// version of mspdbNNN.dll
+ support::ulittle16_t PdbDllVersion;
+
+ /// Symbol records stream #
+ support::ulittle16_t SymRecordStreamIndex;
+
+ /// rbld number of mspdbNNN.dll
+ support::ulittle16_t PdbDllRbld;
+
+ /// Size of module info stream
+ support::little32_t ModiSubstreamSize;
+
+ /// Size of sec. contrib stream
+ support::little32_t SecContrSubstreamSize;
+
+ /// Size of sec. map substream
+ support::little32_t SectionMapSize;
+
+ /// Size of file info substream
+ support::little32_t FileInfoSize;
+
+ /// Size of type server map
+ support::little32_t TypeServerSize;
+
+ /// Index of MFC Type Server
+ support::ulittle32_t MFCTypeServerIndex;
+
+ /// Size of DbgHeader info
+ support::little32_t OptionalDbgHdrSize;
+
+ /// Size of EC stream (what is EC?)
+ support::little32_t ECSubstreamSize;
+
+ /// See DbiFlags enum.
+ support::ulittle16_t Flags;
+
+ /// See PDB_MachineType enum.
+ support::ulittle16_t MachineType;
+
+ /// Pad to 64 bytes
+ support::ulittle32_t Reserved;
+};
+static_assert(sizeof(DbiStreamHeader) == 64, "Invalid DbiStreamHeader size!");
+
+struct SectionContribEntry {
+ support::ulittle16_t Section;
+ char Padding1[2];
+ support::little32_t Offset;
+ support::little32_t Size;
+ support::ulittle32_t Characteristics;
+ support::ulittle16_t ModuleIndex;
+ char Padding2[2];
+ support::ulittle32_t DataCrc;
+ support::ulittle32_t RelocCrc;
+};
+
+/// The header preceeding the File Info Substream of the DBI stream.
+struct FileInfoSubstreamHeader {
+ /// Total # of modules, should match number of records in the ModuleInfo
+ /// substream.
+ support::ulittle16_t NumModules;
+
+ /// Total # of source files. This value is not accurate because PDB actually
+ /// supports more than 64k source files, so we ignore it and compute the value
+ /// from other stream fields.
+ support::ulittle16_t NumSourceFiles;
+
+ /// Following this header the File Info Substream is laid out as follows:
+ /// ulittle16_t ModIndices[NumModules];
+ /// ulittle16_t ModFileCounts[NumModules];
+ /// ulittle32_t FileNameOffsets[NumSourceFiles];
+ /// char Names[][NumSourceFiles];
+ /// with the caveat that `NumSourceFiles` cannot be trusted, so
+ /// it is computed by summing the `ModFileCounts` array.
+};
+
+struct ModInfoFlags {
+ /// uint16_t fWritten : 1; // True if ModInfo is dirty
+ /// uint16_t fECEnabled : 1; // Is EC symbolic info present? (What is EC?)
+ /// uint16_t unused : 6; // Reserved
+ /// uint16_t iTSM : 8; // Type Server Index for this module
+ static const uint16_t HasECFlagMask = 0x2;
+
+ static const uint16_t TypeServerIndexMask = 0xFF00;
+ static const uint16_t TypeServerIndexShift = 8;
+};
+
+/// The header preceeding each entry in the Module Info substream of the DBI
+/// stream.
+struct ModuleInfoHeader {
+ /// Currently opened module. This field is a pointer in the reference
+ /// implementation, but that won't work on 64-bit systems, and anyway it
+ /// doesn't make sense to read a pointer from a file. For now it is unused,
+ /// so just ignore it.
+ support::ulittle32_t Mod;
+
+ /// First section contribution of this module.
+ SectionContribEntry SC;
+
+ /// See ModInfoFlags definition.
+ support::ulittle16_t Flags;
+
+ /// Stream Number of module debug info
+ support::ulittle16_t ModDiStream;
+
+ /// Size of local symbol debug info in above stream
+ support::ulittle32_t SymBytes;
+
+ /// Size of line number debug info in above stream
+ support::ulittle32_t LineBytes;
+
+ /// Size of C13 line number info in above stream
+ support::ulittle32_t C13Bytes;
+
+ /// Number of files contributing to this module
+ support::ulittle16_t NumFiles;
+
+ /// Padding so the next field is 4-byte aligned.
+ char Padding1[2];
+
+ /// Array of [0..NumFiles) DBI name buffer offsets. This field is a pointer
+ /// in the reference implementation, but as with `Mod`, we ignore it for now
+ /// since it is unused.
+ support::ulittle32_t FileNameOffs;
+
+ /// Name Index for src file name
+ support::ulittle32_t SrcFileNameNI;
+
+ /// Name Index for path to compiler PDB
+ support::ulittle32_t PdbFilePathNI;
+
+ /// Following this header are two zero terminated strings.
+ /// char ModuleName[];
+ /// char ObjFileName[];
+};
+
+/// Defines a 128-bit unique identifier. This maps to a GUID on Windows, but
+/// is abstracted here for the purposes of non-Windows platforms that don't have
+/// the GUID structure defined.
+struct PDB_UniqueId {
+ uint8_t Guid[16];
+};
+
+// The header preceeding the global TPI stream.
+// This corresponds to `HDR` in PDB/dbi/tpi.h.
+struct TpiStreamHeader {
+ struct EmbeddedBuf {
+ support::little32_t Off;
+ support::ulittle32_t Length;
+ };
+
+ support::ulittle32_t Version;
+ support::ulittle32_t HeaderSize;
+ support::ulittle32_t TypeIndexBegin;
+ support::ulittle32_t TypeIndexEnd;
+ support::ulittle32_t TypeRecordBytes;
+
+ // The following members correspond to `TpiHash` in PDB/dbi/tpi.h.
+ support::ulittle16_t HashStreamIndex;
+ support::ulittle16_t HashAuxStreamIndex;
+ support::ulittle32_t HashKeySize;
+ support::ulittle32_t NumHashBuckets;
+
+ EmbeddedBuf HashValueBuffer;
+ EmbeddedBuf IndexOffsetBuffer;
+ EmbeddedBuf HashAdjBuffer;
+};
+
+const uint32_t MinTpiHashBuckets = 0x1000;
+const uint32_t MaxTpiHashBuckets = 0x40000;
+
+/// The header preceeding the global PDB Stream (Stream 1)
+struct InfoStreamHeader {
+ support::ulittle32_t Version;
+ support::ulittle32_t Signature;
+ support::ulittle32_t Age;
+ PDB_UniqueId Guid;
+};
+
} // namespace pdb
} // namespace llvm
diff --git a/include/llvm/DebugInfo/PDB/Raw/SymbolStream.h b/include/llvm/DebugInfo/PDB/Raw/SymbolStream.h
index 685a23411a3b..41d5e6ad64a0 100644
--- a/include/llvm/DebugInfo/PDB/Raw/SymbolStream.h
+++ b/include/llvm/DebugInfo/PDB/Raw/SymbolStream.h
@@ -10,19 +10,20 @@
#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBSYMBOLSTREAM_H
#define LLVM_DEBUGINFO_PDB_RAW_PDBSYMBOLSTREAM_H
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
-#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
#include "llvm/Support/Error.h"
namespace llvm {
+namespace msf {
+class MappedBlockStream;
+}
namespace pdb {
class PDBFile;
class SymbolStream {
public:
- SymbolStream(std::unique_ptr<MappedBlockStream> Stream);
+ SymbolStream(std::unique_ptr<msf::MappedBlockStream> Stream);
~SymbolStream();
Error reload();
@@ -33,7 +34,7 @@ public:
private:
codeview::CVSymbolArray SymbolRecords;
- std::unique_ptr<MappedBlockStream> Stream;
+ std::unique_ptr<msf::MappedBlockStream> Stream;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/TpiHashing.h b/include/llvm/DebugInfo/PDB/Raw/TpiHashing.h
new file mode 100644
index 000000000000..67a4952fcdfe
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Raw/TpiHashing.h
@@ -0,0 +1,95 @@
+//===- TpiHashing.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_TPIHASHING_H
+#define LLVM_DEBUGINFO_PDB_TPIHASHING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
+#include "llvm/DebugInfo/PDB/Raw/RawError.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+namespace pdb {
+
+class TpiHashUpdater : public codeview::TypeVisitorCallbacks {
+public:
+ TpiHashUpdater() = default;
+
+#define TYPE_RECORD(EnumName, EnumVal, Name) \
+ virtual Error visitKnownRecord(codeview::CVType &CVR, \
+ codeview::Name##Record &Record) override { \
+ visitKnownRecordImpl(CVR, Record); \
+ return Error::success(); \
+ }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD(EnumName, EnumVal, Name)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/TypeRecords.def"
+
+private:
+ template <typename RecordKind>
+ void visitKnownRecordImpl(codeview::CVType &CVR, RecordKind &Record) {
+ CVR.Hash = 0;
+ }
+
+ void visitKnownRecordImpl(codeview::CVType &CVR,
+ codeview::UdtSourceLineRecord &Rec);
+ void visitKnownRecordImpl(codeview::CVType &CVR,
+ codeview::UdtModSourceLineRecord &Rec);
+ void visitKnownRecordImpl(codeview::CVType &CVR, codeview::ClassRecord &Rec);
+ void visitKnownRecordImpl(codeview::CVType &CVR, codeview::EnumRecord &Rec);
+ void visitKnownRecordImpl(codeview::CVType &CVR, codeview::UnionRecord &Rec);
+};
+
+class TpiHashVerifier : public codeview::TypeVisitorCallbacks {
+public:
+ TpiHashVerifier(msf::FixedStreamArray<support::ulittle32_t> &HashValues,
+ uint32_t NumHashBuckets)
+ : HashValues(HashValues), NumHashBuckets(NumHashBuckets) {}
+
+ Error visitKnownRecord(codeview::CVType &CVR,
+ codeview::UdtSourceLineRecord &Rec) override;
+ Error visitKnownRecord(codeview::CVType &CVR,
+ codeview::UdtModSourceLineRecord &Rec) override;
+ Error visitKnownRecord(codeview::CVType &CVR,
+ codeview::ClassRecord &Rec) override;
+ Error visitKnownRecord(codeview::CVType &CVR,
+ codeview::EnumRecord &Rec) override;
+ Error visitKnownRecord(codeview::CVType &CVR,
+ codeview::UnionRecord &Rec) override;
+ Error visitTypeBegin(codeview::CVType &CVR) override;
+
+private:
+ Error verifySourceLine(codeview::TypeIndex TI);
+
+ Error errorInvalidHash() {
+ return make_error<RawError>(
+ raw_error_code::invalid_tpi_hash,
+ "Type index is 0x" +
+ utohexstr(codeview::TypeIndex::FirstNonSimpleIndex + Index));
+ }
+
+ msf::FixedStreamArray<support::ulittle32_t> HashValues;
+ codeview::CVType RawRecord;
+ uint32_t NumHashBuckets;
+ uint32_t Index = -1;
+};
+
+} // end namespace pdb
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_TPIHASHING_H
diff --git a/include/llvm/DebugInfo/PDB/Raw/TpiStream.h b/include/llvm/DebugInfo/PDB/Raw/TpiStream.h
index 4f36d70aabed..de21abe4c817 100644
--- a/include/llvm/DebugInfo/PDB/Raw/TpiStream.h
+++ b/include/llvm/DebugInfo/PDB/Raw/TpiStream.h
@@ -10,11 +10,9 @@
#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAM_H
#define LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAM_H
-#include "llvm/DebugInfo/CodeView/StreamArray.h"
-#include "llvm/DebugInfo/CodeView/StreamRef.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/MSF/StreamArray.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
-#include "llvm/DebugInfo/PDB/Raw/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
#include "llvm/DebugInfo/PDB/Raw/RawTypes.h"
#include "llvm/Support/raw_ostream.h"
@@ -22,14 +20,18 @@
#include "llvm/Support/Error.h"
namespace llvm {
+namespace msf {
+class MappedBlockStream;
+}
namespace pdb {
class PDBFile;
class TpiStream {
- struct HeaderInfo;
+ friend class TpiStreamBuilder;
public:
- TpiStream(const PDBFile &File, std::unique_ptr<MappedBlockStream> Stream);
+ TpiStream(const PDBFile &File,
+ std::unique_ptr<msf::MappedBlockStream> Stream);
~TpiStream();
Error reload();
@@ -43,9 +45,9 @@ public:
uint32_t getHashKeySize() const;
uint32_t NumHashBuckets() const;
- codeview::FixedStreamArray<support::ulittle32_t> getHashValues() const;
- codeview::FixedStreamArray<TypeIndexOffset> getTypeIndexOffsets() const;
- codeview::FixedStreamArray<TypeIndexOffset> getHashAdjustments() const;
+ msf::FixedStreamArray<support::ulittle32_t> getHashValues() const;
+ msf::FixedStreamArray<TypeIndexOffset> getTypeIndexOffsets() const;
+ msf::FixedStreamArray<TypeIndexOffset> getHashAdjustments() const;
iterator_range<codeview::CVTypeArray::Iterator> types(bool *HadError) const;
@@ -55,16 +57,16 @@ private:
Error verifyHashValues();
const PDBFile &Pdb;
- std::unique_ptr<MappedBlockStream> Stream;
+ std::unique_ptr<msf::MappedBlockStream> Stream;
codeview::CVTypeArray TypeRecords;
- std::unique_ptr<MappedBlockStream> HashStream;
- codeview::FixedStreamArray<support::ulittle32_t> HashValues;
- codeview::FixedStreamArray<TypeIndexOffset> TypeIndexOffsets;
- codeview::FixedStreamArray<TypeIndexOffset> HashAdjustments;
+ std::unique_ptr<msf::ReadableStream> HashStream;
+ msf::FixedStreamArray<support::ulittle32_t> HashValues;
+ msf::FixedStreamArray<TypeIndexOffset> TypeIndexOffsets;
+ msf::FixedStreamArray<TypeIndexOffset> HashAdjustments;
- const HeaderInfo *Header;
+ const TpiStreamHeader *Header;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Raw/TpiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Raw/TpiStreamBuilder.h
new file mode 100644
index 000000000000..f9a642126f53
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Raw/TpiStreamBuilder.h
@@ -0,0 +1,82 @@
+//===- TpiStreamBuilder.h - PDB Tpi Stream Creation -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBTPISTREAMBUILDER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/DebugInfo/MSF/ByteStream.h"
+#include "llvm/DebugInfo/MSF/SequencedItemStream.h"
+#include "llvm/DebugInfo/PDB/Raw/RawConstants.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+class TypeRecord;
+}
+namespace msf {
+class ByteStream;
+class MSFBuilder;
+struct MSFLayout;
+class ReadableStreamRef;
+class WritableStream;
+
+template <> struct SequencedItemTraits<llvm::codeview::CVType> {
+ static size_t length(const codeview::CVType &Item) { return Item.length(); }
+ static ArrayRef<uint8_t> bytes(const codeview::CVType &Item) {
+ return Item.data();
+ }
+};
+}
+namespace pdb {
+class PDBFile;
+class TpiStream;
+struct TpiStreamHeader;
+
+class TpiStreamBuilder {
+public:
+ explicit TpiStreamBuilder(msf::MSFBuilder &Msf, uint32_t StreamIdx);
+ ~TpiStreamBuilder();
+
+ TpiStreamBuilder(const TpiStreamBuilder &) = delete;
+ TpiStreamBuilder &operator=(const TpiStreamBuilder &) = delete;
+
+ void setVersionHeader(PdbRaw_TpiVer Version);
+ void addTypeRecord(const codeview::CVType &Record);
+
+ Error finalizeMsfLayout();
+
+ Error commit(const msf::MSFLayout &Layout, const msf::WritableStream &Buffer);
+
+ uint32_t calculateSerializedLength() const;
+
+private:
+ uint32_t calculateHashBufferSize() const;
+ Error finalize();
+
+ msf::MSFBuilder &Msf;
+ BumpPtrAllocator &Allocator;
+
+ Optional<PdbRaw_TpiVer> VerHeader;
+ std::vector<codeview::CVType> TypeRecords;
+ msf::SequencedItemStream<codeview::CVType> TypeRecordStream;
+ uint32_t HashStreamIndex = kInvalidStreamIndex;
+ std::unique_ptr<msf::ByteStream> HashValueStream;
+
+ const TpiStreamHeader *Header;
+ uint32_t Idx;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/Demangle/Demangle.h b/include/llvm/Demangle/Demangle.h
new file mode 100644
index 000000000000..d2eb56b39f9b
--- /dev/null
+++ b/include/llvm/Demangle/Demangle.h
@@ -0,0 +1,28 @@
+//===--- Demangle.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cstddef>
+
+namespace llvm {
+/// This is a llvm local version of __cxa_demangle. Other than the name and
+/// being in the llvm namespace it is identical.
+///
+/// The mangled_name is demangled into buf and returned. If the buffer is not
+/// large enough, realloc is used to expand it.
+///
+/// The *status will be set to
+/// unknown_error: -4
+/// invalid_args: -3
+/// invalid_mangled_name: -2
+/// memory_alloc_failure: -1
+/// success: 0
+
+char *itaniumDemangle(const char *mangled_name, char *buf, size_t *n,
+ int *status);
+}
diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h
index ab13028b3ae0..f68337c43271 100644
--- a/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -137,13 +137,13 @@ protected:
std::unique_ptr<Module> M,
std::string *ErrorStr,
std::shared_ptr<MCJITMemoryManager> MM,
- std::shared_ptr<RuntimeDyld::SymbolResolver> SR,
+ std::shared_ptr<JITSymbolResolver> SR,
std::unique_ptr<TargetMachine> TM);
static ExecutionEngine *(*OrcMCJITReplacementCtor)(
std::string *ErrorStr,
std::shared_ptr<MCJITMemoryManager> MM,
- std::shared_ptr<RuntimeDyld::SymbolResolver> SR,
+ std::shared_ptr<JITSymbolResolver> SR,
std::unique_ptr<TargetMachine> TM);
static ExecutionEngine *(*InterpCtor)(std::unique_ptr<Module> M,
@@ -198,22 +198,33 @@ public:
const DataLayout &getDataLayout() const { return DL; }
- /// removeModule - Remove a Module from the list of modules. Returns true if
- /// M is found.
+ /// removeModule - Removes a Module from the list of modules, but does not
+ /// free the module's memory. Returns true if M is found, in which case the
+ /// caller assumes responsibility for deleting the module.
+ //
+ // FIXME: This stealth ownership transfer is horrible. This will probably be
+ // fixed by deleting ExecutionEngine.
virtual bool removeModule(Module *M);
/// FindFunctionNamed - Search all of the active modules to find the function that
/// defines FnName. This is very slow operation and shouldn't be used for
/// general code.
- virtual Function *FindFunctionNamed(const char *FnName);
+ virtual Function *FindFunctionNamed(StringRef FnName);
/// FindGlobalVariableNamed - Search all of the active modules to find the global variable
/// that defines Name. This is very slow operation and shouldn't be used for
/// general code.
- virtual GlobalVariable *FindGlobalVariableNamed(const char *Name, bool AllowInternal = false);
+ virtual GlobalVariable *FindGlobalVariableNamed(StringRef Name, bool AllowInternal = false);
/// runFunction - Execute the specified function with the specified arguments,
/// and return the result.
+ ///
+ /// For MCJIT execution engines, clients are encouraged to use the
+ /// "GetFunctionAddress" method (rather than runFunction) and cast the
+ /// returned uint64_t to the desired function pointer type. However, for
+ /// backwards compatibility MCJIT's implementation can execute 'main-like'
+ /// function (i.e. those returning void or int, and taking either no
+ /// arguments or (int, char*[])).
virtual GenericValue runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) = 0;
@@ -516,7 +527,7 @@ private:
std::string *ErrorStr;
CodeGenOpt::Level OptLevel;
std::shared_ptr<MCJITMemoryManager> MemMgr;
- std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver;
+ std::shared_ptr<JITSymbolResolver> Resolver;
TargetOptions Options;
Optional<Reloc::Model> RelocModel;
CodeModel::Model CMModel;
@@ -555,7 +566,7 @@ public:
setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);
EngineBuilder&
- setSymbolResolver(std::unique_ptr<RuntimeDyld::SymbolResolver> SR);
+ setSymbolResolver(std::unique_ptr<JITSymbolResolver> SR);
/// setErrorStr - Set the error string to write to on error. This option
/// defaults to NULL.
diff --git a/include/llvm/ExecutionEngine/JITEventListener.h b/include/llvm/ExecutionEngine/JITEventListener.h
index be7c0448bb78..94ec4e36a199 100644
--- a/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/include/llvm/ExecutionEngine/JITEventListener.h
@@ -18,18 +18,18 @@
#include "RuntimeDyld.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/DebugLoc.h"
-#include "llvm/Support/DataTypes.h"
+#include <cstdint>
#include <vector>
namespace llvm {
-class Function;
+
+class IntelJITEventsWrapper;
class MachineFunction;
class OProfileWrapper;
-class IntelJITEventsWrapper;
namespace object {
class ObjectFile;
-}
+} // end namespace object
/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
/// about a generated machine code function.
@@ -60,8 +60,8 @@ public:
typedef JITEvent_EmittedFunctionDetails EmittedFunctionDetails;
public:
- JITEventListener() {}
- virtual ~JITEventListener() {}
+ JITEventListener() = default;
+ virtual ~JITEventListener() = default;
/// NotifyObjectEmitted - Called after an object has been successfully
/// emitted to memory. NotifyFunctionEmitted will not be called for
@@ -81,7 +81,7 @@ public:
// Get a pointe to the GDB debugger registration listener.
static JITEventListener *createGDBRegistrationListener();
-#if defined(LLVM_USE_INTEL_JITEVENTS) && LLVM_USE_INTEL_JITEVENTS
+#if LLVM_USE_INTEL_JITEVENTS
// Construct an IntelJITEventListener
static JITEventListener *createIntelJITEventListener();
@@ -97,7 +97,7 @@ public:
}
#endif // USE_INTEL_JITEVENTS
-#if defined(LLVM_USE_OPROFILE) && LLVM_USE_OPROFILE
+#if LLVM_USE_OPROFILE
// Construct an OProfileJITEventListener
static JITEventListener *createOProfileJITEventListener();
@@ -105,7 +105,6 @@ public:
static JITEventListener *createOProfileJITEventListener(
OProfileWrapper* AlternativeImpl);
#else
-
static JITEventListener *createOProfileJITEventListener() { return nullptr; }
static JITEventListener *createOProfileJITEventListener(
@@ -113,10 +112,11 @@ public:
return nullptr;
}
#endif // USE_OPROFILE
+
private:
virtual void anchor();
};
-} // end namespace llvm.
+} // end namespace llvm
-#endif // defined LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+#endif // LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
diff --git a/include/llvm/ExecutionEngine/JITSymbol.h b/include/llvm/ExecutionEngine/JITSymbol.h
new file mode 100644
index 000000000000..88929482ce76
--- /dev/null
+++ b/include/llvm/ExecutionEngine/JITSymbol.h
@@ -0,0 +1,197 @@
+//===----------- JITSymbol.h - JIT symbol abstraction -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstraction for target process addresses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITSYMBOL_H
+#define LLVM_EXECUTIONENGINE_JITSYMBOL_H
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <string>
+
+namespace llvm {
+
+class GlobalValue;
+
+namespace object {
+ class BasicSymbolRef;
+} // end namespace object
+
+/// @brief Represents an address in the target process's address space.
+typedef uint64_t JITTargetAddress;
+
+/// @brief Flags for symbols in the JIT.
+class JITSymbolFlags {
+public:
+ typedef uint8_t UnderlyingType;
+
+ enum FlagNames : UnderlyingType {
+ None = 0,
+ Weak = 1U << 0,
+ Common = 1U << 1,
+ Absolute = 1U << 2,
+ Exported = 1U << 3
+ };
+
+ /// @brief Default-construct a JITSymbolFlags instance.
+ JITSymbolFlags() : Flags(None) {}
+
+ /// @brief Construct a JITSymbolFlags instance from the given flags.
+ JITSymbolFlags(FlagNames Flags) : Flags(Flags) {}
+
+ /// @brief Returns true is the Weak flag is set.
+ bool isWeak() const {
+ return (Flags & Weak) == Weak;
+ }
+
+ /// @brief Returns true is the Weak flag is set.
+ bool isCommon() const {
+ return (Flags & Common) == Common;
+ }
+
+ bool isStrongDefinition() const {
+ return !isWeak() && !isCommon();
+ }
+
+ /// @brief Returns true is the Weak flag is set.
+ bool isExported() const {
+ return (Flags & Exported) == Exported;
+ }
+
+ operator UnderlyingType&() { return Flags; }
+
+ /// Construct a JITSymbolFlags value based on the flags of the given global
+ /// value.
+ static JITSymbolFlags fromGlobalValue(const GlobalValue &GV);
+
+ /// Construct a JITSymbolFlags value based on the flags of the given libobject
+ /// symbol.
+ static JITSymbolFlags fromObjectSymbol(const object::BasicSymbolRef &Symbol);
+
+private:
+ UnderlyingType Flags;
+};
+
+/// @brief Represents a symbol that has been evaluated to an address already.
+class JITEvaluatedSymbol {
+public:
+ /// @brief Create a 'null' symbol.
+ JITEvaluatedSymbol(std::nullptr_t)
+ : Address(0) {}
+
+ /// @brief Create a symbol for the given address and flags.
+ JITEvaluatedSymbol(JITTargetAddress Address, JITSymbolFlags Flags)
+ : Address(Address), Flags(Flags) {}
+
+ /// @brief An evaluated symbol converts to 'true' if its address is non-zero.
+ explicit operator bool() const { return Address != 0; }
+
+ /// @brief Return the address of this symbol.
+ JITTargetAddress getAddress() const { return Address; }
+
+ /// @brief Return the flags for this symbol.
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ JITTargetAddress Address;
+ JITSymbolFlags Flags;
+};
+
+/// @brief Represents a symbol in the JIT.
+class JITSymbol {
+public:
+ typedef std::function<JITTargetAddress()> GetAddressFtor;
+
+ /// @brief Create a 'null' symbol that represents failure to find a symbol
+ /// definition.
+ JITSymbol(std::nullptr_t)
+ : CachedAddr(0) {}
+
+ /// @brief Create a symbol for a definition with a known address.
+ JITSymbol(JITTargetAddress Addr, JITSymbolFlags Flags)
+ : CachedAddr(Addr), Flags(Flags) {}
+
+ /// @brief Construct a JITSymbol from a JITEvaluatedSymbol.
+ JITSymbol(JITEvaluatedSymbol Sym)
+ : CachedAddr(Sym.getAddress()), Flags(Sym.getFlags()) {}
+
+ /// @brief Create a symbol for a definition that doesn't have a known address
+ /// yet.
+ /// @param GetAddress A functor to materialize a definition (fixing the
+ /// address) on demand.
+ ///
+ /// This constructor allows a JIT layer to provide a reference to a symbol
+ /// definition without actually materializing the definition up front. The
+ /// user can materialize the definition at any time by calling the getAddress
+ /// method.
+ JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
+ : GetAddress(std::move(GetAddress)), CachedAddr(0), Flags(Flags) {}
+
+ /// @brief Returns true if the symbol exists, false otherwise.
+ explicit operator bool() const { return CachedAddr || GetAddress; }
+
+ /// @brief Get the address of the symbol in the target address space. Returns
+ /// '0' if the symbol does not exist.
+ JITTargetAddress getAddress() {
+ if (GetAddress) {
+ CachedAddr = GetAddress();
+ assert(CachedAddr && "Symbol could not be materialized.");
+ GetAddress = nullptr;
+ }
+ return CachedAddr;
+ }
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ GetAddressFtor GetAddress;
+ JITTargetAddress CachedAddr;
+ JITSymbolFlags Flags;
+};
+
+/// \brief Symbol resolution.
+class JITSymbolResolver {
+public:
+ virtual ~JITSymbolResolver() = default;
+
+ /// This method returns the address of the specified symbol if it exists
+ /// within the logical dynamic library represented by this JITSymbolResolver.
+ /// Unlike findSymbol, queries through this interface should return addresses
+ /// for hidden symbols.
+ ///
+ /// This is of particular importance for the Orc JIT APIs, which support lazy
+ /// compilation by breaking up modules: Each of those broken out modules
+ /// must be able to resolve hidden symbols provided by the others. Clients
+ /// writing memory managers for MCJIT can usually ignore this method.
+ ///
+ /// This method will be queried by RuntimeDyld when checking for previous
+ /// definitions of common symbols.
+ virtual JITSymbol findSymbolInLogicalDylib(const std::string &Name) = 0;
+
+ /// This method returns the address of the specified function or variable.
+ /// It is used to resolve symbols during module linking.
+ ///
+ /// If the returned symbol's address is equal to ~0ULL then RuntimeDyld will
+ /// skip all relocations for that symbol, and the client will be responsible
+ /// for handling them manually.
+ virtual JITSymbol findSymbol(const std::string &Name) = 0;
+
+private:
+ virtual void anchor();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITSYMBOL_H
diff --git a/include/llvm/ExecutionEngine/JITSymbolFlags.h b/include/llvm/ExecutionEngine/JITSymbolFlags.h
deleted file mode 100644
index 7e1d57dabc81..000000000000
--- a/include/llvm/ExecutionEngine/JITSymbolFlags.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//===------ JITSymbolFlags.h - Flags for symbols in the JIT -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Symbol flags for symbols in the JIT (e.g. weak, exported).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_EXECUTIONENGINE_JITSYMBOLFLAGS_H
-#define LLVM_EXECUTIONENGINE_JITSYMBOLFLAGS_H
-
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/Object/SymbolicFile.h"
-
-namespace llvm {
-
-/// @brief Flags for symbols in the JIT.
-enum class JITSymbolFlags : char {
- None = 0,
- Weak = 1U << 0,
- Exported = 1U << 1
-};
-
-inline JITSymbolFlags operator|(JITSymbolFlags LHS, JITSymbolFlags RHS) {
- typedef std::underlying_type<JITSymbolFlags>::type UT;
- return static_cast<JITSymbolFlags>(
- static_cast<UT>(LHS) | static_cast<UT>(RHS));
-}
-
-inline JITSymbolFlags& operator |=(JITSymbolFlags &LHS, JITSymbolFlags RHS) {
- LHS = LHS | RHS;
- return LHS;
-}
-
-inline JITSymbolFlags operator&(JITSymbolFlags LHS, JITSymbolFlags RHS) {
- typedef std::underlying_type<JITSymbolFlags>::type UT;
- return static_cast<JITSymbolFlags>(
- static_cast<UT>(LHS) & static_cast<UT>(RHS));
-}
-
-inline JITSymbolFlags& operator &=(JITSymbolFlags &LHS, JITSymbolFlags RHS) {
- LHS = LHS & RHS;
- return LHS;
-}
-
-/// @brief Base class for symbols in the JIT.
-class JITSymbolBase {
-public:
- JITSymbolBase(JITSymbolFlags Flags) : Flags(Flags) {}
-
- JITSymbolFlags getFlags() const { return Flags; }
-
- bool isWeak() const {
- return (Flags & JITSymbolFlags::Weak) == JITSymbolFlags::Weak;
- }
-
- bool isExported() const {
- return (Flags & JITSymbolFlags::Exported) == JITSymbolFlags::Exported;
- }
-
- static JITSymbolFlags flagsFromGlobalValue(const GlobalValue &GV) {
- JITSymbolFlags Flags = JITSymbolFlags::None;
- if (GV.hasWeakLinkage())
- Flags |= JITSymbolFlags::Weak;
- if (!GV.hasLocalLinkage() && !GV.hasHiddenVisibility())
- Flags |= JITSymbolFlags::Exported;
- return Flags;
- }
-
- static JITSymbolFlags
- flagsFromObjectSymbol(const object::BasicSymbolRef &Symbol) {
- JITSymbolFlags Flags = JITSymbolFlags::None;
- if (Symbol.getFlags() & object::BasicSymbolRef::SF_Weak)
- Flags |= JITSymbolFlags::Weak;
- if (Symbol.getFlags() & object::BasicSymbolRef::SF_Exported)
- Flags |= JITSymbolFlags::Exported;
- return Flags;
- }
-
-private:
- JITSymbolFlags Flags;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/include/llvm/ExecutionEngine/ObjectCache.h b/include/llvm/ExecutionEngine/ObjectCache.h
index cc01a4e58999..077044408e09 100644
--- a/include/llvm/ExecutionEngine/ObjectCache.h
+++ b/include/llvm/ExecutionEngine/ObjectCache.h
@@ -1,4 +1,4 @@
-//===-- ObjectCache.h - Class definition for the ObjectCache -----C++ -*-===//
+//===-- ObjectCache.h - Class definition for the ObjectCache ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,6 +11,7 @@
#define LLVM_EXECUTIONENGINE_OBJECTCACHE_H
#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
namespace llvm {
@@ -21,10 +22,11 @@ class Module;
/// have already been compiled and an object file is available.
class ObjectCache {
virtual void anchor();
+
public:
- ObjectCache() { }
+ ObjectCache() = default;
- virtual ~ObjectCache() { }
+ virtual ~ObjectCache() = default;
/// notifyObjectCompiled - Provides a pointer to compiled code for Module M.
virtual void notifyObjectCompiled(const Module *M, MemoryBufferRef Obj) = 0;
@@ -35,6 +37,6 @@ public:
virtual std::unique_ptr<MemoryBuffer> getObject(const Module* M) = 0;
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_EXECUTIONENGINE_OBJECTCACHE_H
diff --git a/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h b/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
index b07561152ec0..0f00ad006a7d 100644
--- a/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
+++ b/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
@@ -49,7 +49,7 @@ public:
init(this->SV.begin(), this->SV.end(), false);
}
- const char* getBufferIdentifier() const override { return BufferName.c_str(); }
+ StringRef getBufferIdentifier() const override { return BufferName; }
BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
diff --git a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
index ef88dd03ad4f..aa096478cd9e 100644
--- a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -15,16 +15,35 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
-#include "IndirectionUtils.h"
-#include "LambdaResolver.h"
-#include "LogicalDylib.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iterator>
#include <list>
#include <memory>
#include <set>
+#include <string>
#include <utility>
+#include <vector>
namespace llvm {
namespace orc {
@@ -41,11 +60,11 @@ template <typename BaseLayerT,
typename IndirectStubsMgrT = IndirectStubsManager>
class CompileOnDemandLayer {
private:
-
template <typename MaterializerFtor>
class LambdaMaterializer final : public ValueMaterializer {
public:
LambdaMaterializer(MaterializerFtor M) : M(std::move(M)) {}
+
Value *materialize(Value *V) final { return M(V); }
private:
@@ -67,7 +86,8 @@ private:
ResourceOwner() = default;
ResourceOwner(const ResourceOwner&) = delete;
ResourceOwner& operator=(const ResourceOwner&) = delete;
- virtual ~ResourceOwner() { }
+ virtual ~ResourceOwner() = default;
+
virtual ResourceT& getResource() const = 0;
};
@@ -76,7 +96,9 @@ private:
public:
ResourceOwnerImpl(ResourcePtrT ResourcePtr)
: ResourcePtr(std::move(ResourcePtr)) {}
+
ResourceT& getResource() const override { return *ResourcePtr; }
+
private:
ResourcePtrT ResourcePtr;
};
@@ -88,76 +110,80 @@ private:
return llvm::make_unique<RO>(std::move(ResourcePtr));
}
- struct LogicalModuleResources {
- std::unique_ptr<ResourceOwner<Module>> SourceModule;
- std::set<const Function*> StubsToClone;
- std::unique_ptr<IndirectStubsMgrT> StubsMgr;
-
- LogicalModuleResources() = default;
-
- // Explicit move constructor to make MSVC happy.
- LogicalModuleResources(LogicalModuleResources &&Other)
- : SourceModule(std::move(Other.SourceModule)),
- StubsToClone(std::move(Other.StubsToClone)),
- StubsMgr(std::move(Other.StubsMgr)) {}
-
- // Explicit move assignment to make MSVC happy.
- LogicalModuleResources& operator=(LogicalModuleResources &&Other) {
- SourceModule = std::move(Other.SourceModule);
- StubsToClone = std::move(Other.StubsToClone);
- StubsMgr = std::move(Other.StubsMgr);
- return *this;
- }
-
- JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
- if (Name.endswith("$stub_ptr") && !ExportedSymbolsOnly) {
- assert(!ExportedSymbolsOnly && "Stubs are never exported");
- return StubsMgr->findPointer(Name.drop_back(9));
- }
- return StubsMgr->findStub(Name, ExportedSymbolsOnly);
+ class StaticGlobalRenamer {
+ public:
+ StaticGlobalRenamer() = default;
+ StaticGlobalRenamer(StaticGlobalRenamer &&) = default;
+ StaticGlobalRenamer &operator=(StaticGlobalRenamer &&) = default;
+
+ void rename(Module &M) {
+ for (auto &F : M)
+ if (F.hasLocalLinkage())
+ F.setName("$static." + Twine(NextId++));
+ for (auto &G : M.globals())
+ if (G.hasLocalLinkage())
+ G.setName("$static." + Twine(NextId++));
}
+ private:
+ unsigned NextId = 0;
};
- struct LogicalDylibResources {
- typedef std::function<RuntimeDyld::SymbolInfo(const std::string&)>
- SymbolResolverFtor;
+ struct LogicalDylib {
+ typedef std::function<JITSymbol(const std::string&)> SymbolResolverFtor;
typedef std::function<typename BaseLayerT::ModuleSetHandleT(
BaseLayerT&,
std::unique_ptr<Module>,
- std::unique_ptr<RuntimeDyld::SymbolResolver>)>
+ std::unique_ptr<JITSymbolResolver>)>
ModuleAdderFtor;
- LogicalDylibResources() = default;
+ struct SourceModuleEntry {
+ std::unique_ptr<ResourceOwner<Module>> SourceMod;
+ std::set<Function*> StubsToClone;
+ };
- // Explicit move constructor to make MSVC happy.
- LogicalDylibResources(LogicalDylibResources &&Other)
- : ExternalSymbolResolver(std::move(Other.ExternalSymbolResolver)),
- MemMgr(std::move(Other.MemMgr)),
- ModuleAdder(std::move(Other.ModuleAdder)) {}
+ typedef std::vector<SourceModuleEntry> SourceModulesList;
+ typedef typename SourceModulesList::size_type SourceModuleHandle;
- // Explicit move assignment operator to make MSVC happy.
- LogicalDylibResources& operator=(LogicalDylibResources &&Other) {
- ExternalSymbolResolver = std::move(Other.ExternalSymbolResolver);
- MemMgr = std::move(Other.MemMgr);
- ModuleAdder = std::move(Other.ModuleAdder);
- return *this;
+ SourceModuleHandle
+ addSourceModule(std::unique_ptr<ResourceOwner<Module>> M) {
+ SourceModuleHandle H = SourceModules.size();
+ SourceModules.push_back(SourceModuleEntry());
+ SourceModules.back().SourceMod = std::move(M);
+ return H;
}
- std::unique_ptr<RuntimeDyld::SymbolResolver> ExternalSymbolResolver;
+ Module& getSourceModule(SourceModuleHandle H) {
+ return SourceModules[H].SourceMod->getResource();
+ }
+
+ std::set<Function*>& getStubsToClone(SourceModuleHandle H) {
+ return SourceModules[H].StubsToClone;
+ }
+
+ JITSymbol findSymbol(BaseLayerT &BaseLayer, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = StubsMgr->findStub(Name, ExportedSymbolsOnly))
+ return Sym;
+ for (auto BLH : BaseLayerHandles)
+ if (auto Sym = BaseLayer.findSymbolIn(BLH, Name, ExportedSymbolsOnly))
+ return Sym;
+ return nullptr;
+ }
+
+ std::unique_ptr<JITSymbolResolver> ExternalSymbolResolver;
std::unique_ptr<ResourceOwner<RuntimeDyld::MemoryManager>> MemMgr;
+ std::unique_ptr<IndirectStubsMgrT> StubsMgr;
+ StaticGlobalRenamer StaticRenamer;
ModuleAdderFtor ModuleAdder;
+ SourceModulesList SourceModules;
+ std::vector<BaseLayerModuleSetHandleT> BaseLayerHandles;
};
- typedef LogicalDylib<BaseLayerT, LogicalModuleResources,
- LogicalDylibResources> CODLogicalDylib;
-
- typedef typename CODLogicalDylib::LogicalModuleHandle LogicalModuleHandle;
- typedef std::list<CODLogicalDylib> LogicalDylibList;
+ typedef std::list<LogicalDylib> LogicalDylibList;
public:
-
/// @brief Handle to a set of loaded modules.
typedef typename LogicalDylibList::iterator ModuleSetHandleT;
@@ -185,18 +211,17 @@ public:
MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
- LogicalDylibs.push_back(CODLogicalDylib(BaseLayer));
- auto &LDResources = LogicalDylibs.back().getDylibResources();
-
- LDResources.ExternalSymbolResolver = std::move(Resolver);
+ LogicalDylibs.push_back(LogicalDylib());
+ auto &LD = LogicalDylibs.back();
+ LD.ExternalSymbolResolver = std::move(Resolver);
+ LD.StubsMgr = CreateIndirectStubsManager();
auto &MemMgrRef = *MemMgr;
- LDResources.MemMgr =
- wrapOwnership<RuntimeDyld::MemoryManager>(std::move(MemMgr));
+ LD.MemMgr = wrapOwnership<RuntimeDyld::MemoryManager>(std::move(MemMgr));
- LDResources.ModuleAdder =
+ LD.ModuleAdder =
[&MemMgrRef](BaseLayerT &B, std::unique_ptr<Module> M,
- std::unique_ptr<RuntimeDyld::SymbolResolver> R) {
+ std::unique_ptr<JITSymbolResolver> R) {
std::vector<std::unique_ptr<Module>> Ms;
Ms.push_back(std::move(M));
return B.addModuleSet(std::move(Ms), &MemMgrRef, std::move(R));
@@ -223,9 +248,12 @@ public:
/// @return A handle for the given named symbol, if it exists.
JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
for (auto LDI = LogicalDylibs.begin(), LDE = LogicalDylibs.end();
- LDI != LDE; ++LDI)
- if (auto Symbol = findSymbolIn(LDI, Name, ExportedSymbolsOnly))
- return Symbol;
+ LDI != LDE; ++LDI) {
+ if (auto Sym = LDI->StubsMgr->findStub(Name, ExportedSymbolsOnly))
+ return Sym;
+ if (auto Sym = findSymbolIn(LDI, Name, ExportedSymbolsOnly))
+ return Sym;
+ }
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
@@ -233,25 +261,50 @@ public:
/// below this one.
JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
- return H->findSymbol(Name, ExportedSymbolsOnly);
+ return H->findSymbol(BaseLayer, Name, ExportedSymbolsOnly);
}
-private:
+ /// @brief Update the stub for the given function to point at FnBodyAddr.
+ /// This can be used to support re-optimization.
+ /// @return true if the function exists and the stub is updated, false
+ /// otherwise.
+ //
+ // FIXME: We should track and free associated resources (unused compile
+ // callbacks, uncompiled IR, and no-longer-needed/reachable function
+ // implementations).
+ // FIXME: Return Error once the JIT APIs are Errorized.
+ bool updatePointer(std::string FuncName, JITTargetAddress FnBodyAddr) {
+ //Find out which logical dylib contains our symbol
+ auto LDI = LogicalDylibs.begin();
+ for (auto LDE = LogicalDylibs.end(); LDI != LDE; ++LDI) {
+ if (auto LMResources = LDI->getLogicalModuleResourcesForSymbol(FuncName, false)) {
+ Module &SrcM = LMResources->SourceModule->getResource();
+ std::string CalledFnName = mangle(FuncName, SrcM.getDataLayout());
+ if (auto EC = LMResources->StubsMgr->updatePointer(CalledFnName, FnBodyAddr))
+ return false;
+ else
+ return true;
+ }
+ }
+ return false;
+ }
+private:
template <typename ModulePtrT>
- void addLogicalModule(CODLogicalDylib &LD, ModulePtrT SrcMPtr) {
+ void addLogicalModule(LogicalDylib &LD, ModulePtrT SrcMPtr) {
+
+ // Rename all static functions / globals to $static.X :
+ // This will unique the names across all modules in the logical dylib,
+ // simplifying symbol lookup.
+ LD.StaticRenamer.rename(*SrcMPtr);
// Bump the linkage and rename any anonymous/privote members in SrcM to
// ensure that everything will resolve properly after we partition SrcM.
makeAllSymbolsExternallyAccessible(*SrcMPtr);
// Create a logical module handle for SrcM within the logical dylib.
- auto LMH = LD.createLogicalModule();
- auto &LMResources = LD.getLogicalModuleResources(LMH);
-
- LMResources.SourceModule = wrapOwnership<Module>(std::move(SrcMPtr));
-
- Module &SrcM = LMResources.SourceModule->getResource();
+ Module &SrcM = *SrcMPtr;
+ auto LMId = LD.addSourceModule(wrapOwnership<Module>(std::move(SrcMPtr)));
// Create stub functions.
const DataLayout &DL = SrcM.getDataLayout();
@@ -262,34 +315,40 @@ private:
if (F.isDeclaration())
continue;
+ // Skip weak functions for which we already have definitions.
+ auto MangledName = mangle(F.getName(), DL);
+ if (F.hasWeakLinkage() || F.hasLinkOnceLinkage())
+ if (auto Sym = LD.findSymbol(BaseLayer, MangledName, false))
+ continue;
+
// Record all functions defined by this module.
if (CloneStubsIntoPartitions)
- LMResources.StubsToClone.insert(&F);
+ LD.getStubsToClone(LMId).insert(&F);
// Create a callback, associate it with the stub for the function,
// and set the compile action to compile the partition containing the
// function.
auto CCInfo = CompileCallbackMgr.getCompileCallback();
- StubInits[mangle(F.getName(), DL)] =
+ StubInits[MangledName] =
std::make_pair(CCInfo.getAddress(),
- JITSymbolBase::flagsFromGlobalValue(F));
- CCInfo.setCompileAction([this, &LD, LMH, &F]() {
- return this->extractAndCompile(LD, LMH, F);
+ JITSymbolFlags::fromGlobalValue(F));
+ CCInfo.setCompileAction([this, &LD, LMId, &F]() {
+ return this->extractAndCompile(LD, LMId, F);
});
}
- LMResources.StubsMgr = CreateIndirectStubsManager();
- auto EC = LMResources.StubsMgr->createStubs(StubInits);
+ auto EC = LD.StubsMgr->createStubs(StubInits);
(void)EC;
// FIXME: This should be propagated back to the user. Stub creation may
// fail for remote JITs.
assert(!EC && "Error generating stubs");
}
- // If this module doesn't contain any globals or aliases we can bail out
- // early and avoid the overhead of creating and managing an empty globals
- // module.
- if (SrcM.global_empty() && SrcM.alias_empty())
+ // If this module doesn't contain any globals, aliases, or module flags then
+ // we can bail out early and avoid the overhead of creating and managing an
+ // empty globals module.
+ if (SrcM.global_empty() && SrcM.alias_empty() &&
+ !SrcM.getModuleFlagsMetadata())
return;
// Create the GlobalValues module.
@@ -309,12 +368,15 @@ private:
if (!VMap.count(&A))
cloneGlobalAliasDecl(*GVsM, A, VMap);
+ // Clone the module flags.
+ cloneModuleFlagsMetadata(*GVsM, SrcM, VMap);
+
// Now we need to clone the GV and alias initializers.
// Initializers may refer to functions declared (but not defined) in this
// module. Build a materializer to clone decls on demand.
auto Materializer = createLambdaMaterializer(
- [this, &GVsM, &LMResources](Value *V) -> Value* {
+ [this, &LD, &GVsM](Value *V) -> Value* {
if (auto *F = dyn_cast<Function>(V)) {
// Decls in the original module just get cloned.
if (F->isDeclaration())
@@ -325,7 +387,7 @@ private:
// instead.
const DataLayout &DL = GVsM->getDataLayout();
std::string FName = mangle(F->getName(), DL);
- auto StubSym = LMResources.StubsMgr->findStub(FName, false);
+ auto StubSym = LD.StubsMgr->findStub(FName, false);
unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(F->getType());
ConstantInt *StubAddr =
ConstantInt::get(GVsM->getContext(),
@@ -357,21 +419,20 @@ private:
// Build a resolver for the globals module and add it to the base layer.
auto GVsResolver = createLambdaResolver(
- [&LD, LMH](const std::string &Name) {
- auto &LMResources = LD.getLogicalModuleResources(LMH);
- if (auto Sym = LMResources.StubsMgr->findStub(Name, false))
- return Sym.toRuntimeDyldSymbol();
- auto &LDResolver = LD.getDylibResources().ExternalSymbolResolver;
- return LDResolver->findSymbolInLogicalDylib(Name);
+ [this, &LD, LMId](const std::string &Name) {
+ if (auto Sym = LD.StubsMgr->findStub(Name, false))
+ return Sym;
+ if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
+ return Sym;
+ return LD.ExternalSymbolResolver->findSymbolInLogicalDylib(Name);
},
[&LD](const std::string &Name) {
- auto &LDResolver = LD.getDylibResources().ExternalSymbolResolver;
- return LDResolver->findSymbol(Name);
+ return LD.ExternalSymbolResolver->findSymbol(Name);
});
- auto GVsH = LD.getDylibResources().ModuleAdder(BaseLayer, std::move(GVsM),
- std::move(GVsResolver));
- LD.addToLogicalModule(LMH, GVsH);
+ auto GVsH = LD.ModuleAdder(BaseLayer, std::move(GVsM),
+ std::move(GVsResolver));
+ LD.BaseLayerHandles.push_back(GVsH);
}
static std::string mangle(StringRef Name, const DataLayout &DL) {
@@ -383,11 +444,11 @@ private:
return MangledName;
}
- TargetAddress extractAndCompile(CODLogicalDylib &LD,
- LogicalModuleHandle LMH,
- Function &F) {
- auto &LMResources = LD.getLogicalModuleResources(LMH);
- Module &SrcM = LMResources.SourceModule->getResource();
+ JITTargetAddress
+ extractAndCompile(LogicalDylib &LD,
+ typename LogicalDylib::SourceModuleHandle LMId,
+ Function &F) {
+ Module &SrcM = LD.getSourceModule(LMId);
// If F is a declaration we must already have compiled it.
if (F.isDeclaration())
@@ -397,15 +458,15 @@ private:
std::string CalledFnName = mangle(F.getName(), SrcM.getDataLayout());
auto Part = Partition(F);
- auto PartH = emitPartition(LD, LMH, Part);
+ auto PartH = emitPartition(LD, LMId, Part);
- TargetAddress CalledAddr = 0;
+ JITTargetAddress CalledAddr = 0;
for (auto *SubF : Part) {
std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
auto FnBodySym = BaseLayer.findSymbolIn(PartH, FnName, false);
assert(FnBodySym && "Couldn't find function body.");
- TargetAddress FnBodyAddr = FnBodySym.getAddress();
+ JITTargetAddress FnBodyAddr = FnBodySym.getAddress();
// If this is the function we're calling record the address so we can
// return it from this function.
@@ -413,7 +474,7 @@ private:
CalledAddr = FnBodyAddr;
// Update the function body pointer for the stub.
- if (auto EC = LMResources.StubsMgr->updatePointer(FnName, FnBodyAddr))
+ if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
return 0;
}
@@ -421,11 +482,11 @@ private:
}
template <typename PartitionT>
- BaseLayerModuleSetHandleT emitPartition(CODLogicalDylib &LD,
- LogicalModuleHandle LMH,
- const PartitionT &Part) {
- auto &LMResources = LD.getLogicalModuleResources(LMH);
- Module &SrcM = LMResources.SourceModule->getResource();
+ BaseLayerModuleSetHandleT
+ emitPartition(LogicalDylib &LD,
+ typename LogicalDylib::SourceModuleHandle LMId,
+ const PartitionT &Part) {
+ Module &SrcM = LD.getSourceModule(LMId);
// Create the module.
std::string NewName = SrcM.getName();
@@ -438,14 +499,14 @@ private:
M->setDataLayout(SrcM.getDataLayout());
ValueToValueMapTy VMap;
- auto Materializer = createLambdaMaterializer([this, &LMResources, &M,
+ auto Materializer = createLambdaMaterializer([this, &LD, &LMId, &M,
&VMap](Value *V) -> Value * {
if (auto *GV = dyn_cast<GlobalVariable>(V))
return cloneGlobalVariableDecl(*M, *GV);
if (auto *F = dyn_cast<Function>(V)) {
// Check whether we want to clone an available_externally definition.
- if (!LMResources.StubsToClone.count(F))
+ if (!LD.getStubsToClone(LMId).count(F))
return cloneFunctionDecl(*M, *F);
// Ok - we want an inlinable stub. For that to work we need a decl
@@ -485,19 +546,16 @@ private:
// Create memory manager and symbol resolver.
auto Resolver = createLambdaResolver(
- [this, &LD, LMH](const std::string &Name) {
- if (auto Sym = LD.findSymbolInternally(LMH, Name))
- return Sym.toRuntimeDyldSymbol();
- auto &LDResolver = LD.getDylibResources().ExternalSymbolResolver;
- return LDResolver->findSymbolInLogicalDylib(Name);
+ [this, &LD, LMId](const std::string &Name) {
+ if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
+ return Sym;
+ return LD.ExternalSymbolResolver->findSymbolInLogicalDylib(Name);
},
[this, &LD](const std::string &Name) {
- auto &LDResolver = LD.getDylibResources().ExternalSymbolResolver;
- return LDResolver->findSymbol(Name);
+ return LD.ExternalSymbolResolver->findSymbol(Name);
});
- return LD.getDylibResources().ModuleAdder(BaseLayer, std::move(M),
- std::move(Resolver));
+ return LD.ModuleAdder(BaseLayer, std::move(M), std::move(Resolver));
}
BaseLayerT &BaseLayer;
@@ -509,7 +567,7 @@ private:
bool CloneStubsIntoPartitions;
};
-} // End namespace orc.
-} // End namespace llvm.
+} // end namespace orc
+} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index c10508cc8a62..a32278b8a81e 100644
--- a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -14,9 +14,9 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
-#include "JITSymbol.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include <vector>
@@ -43,12 +43,12 @@ public:
/// This class provides a read-only view of the element with any casts on
/// the function stripped away.
struct Element {
- Element(unsigned Priority, const Function *Func, const Value *Data)
+ Element(unsigned Priority, Function *Func, Value *Data)
: Priority(Priority), Func(Func), Data(Data) {}
unsigned Priority;
- const Function *Func;
- const Value *Data;
+ Function *Func;
+ Value *Data;
};
/// @brief Construct an iterator instance. If End is true then this iterator
@@ -144,10 +144,10 @@ public:
}
/// Search overrided symbols.
- RuntimeDyld::SymbolInfo searchOverrides(const std::string &Name) {
+ JITEvaluatedSymbol searchOverrides(const std::string &Name) {
auto I = CXXRuntimeOverrides.find(Name);
if (I != CXXRuntimeOverrides.end())
- return RuntimeDyld::SymbolInfo(I->second, JITSymbolFlags::Exported);
+ return JITEvaluatedSymbol(I->second, JITSymbolFlags::Exported);
return nullptr;
}
@@ -158,15 +158,15 @@ public:
private:
template <typename PtrTy>
- TargetAddress toTargetAddress(PtrTy* P) {
- return static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(P));
+ JITTargetAddress toTargetAddress(PtrTy* P) {
+ return static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(P));
}
- void addOverride(const std::string &Name, TargetAddress Addr) {
+ void addOverride(const std::string &Name, JITTargetAddress Addr) {
CXXRuntimeOverrides.insert(std::make_pair(Name, Addr));
}
- StringMap<TargetAddress> CXXRuntimeOverrides;
+ StringMap<JITTargetAddress> CXXRuntimeOverrides;
typedef void (*DestructorPtr)(void*);
typedef std::pair<DestructorPtr, void*> CXXDestructorDataPair;
diff --git a/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h b/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
index 9fa222c340f8..634d1480ae4c 100644
--- a/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
@@ -15,7 +15,7 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
-#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
#include <map>
namespace llvm {
@@ -52,7 +52,7 @@ public:
void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
/// @brief Manually set the address to return for the given symbol.
- void setGlobalMapping(const std::string &Name, TargetAddress Addr) {
+ void setGlobalMapping(const std::string &Name, JITTargetAddress Addr) {
SymbolTable[Name] = Addr;
}
@@ -99,7 +99,7 @@ public:
private:
BaseLayerT &BaseLayer;
- std::map<std::string, TargetAddress> SymbolTable;
+ std::map<std::string, JITTargetAddress> SymbolTable;
};
} // End namespace orc.
diff --git a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
index e6ce18a42b8b..f16dd021ea51 100644
--- a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -14,9 +14,8 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
-#include "JITSymbol.h"
#include "llvm/ExecutionEngine/ObjectCache.h"
-#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Object/ObjectFile.h"
#include <memory>
diff --git a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
index 4dabb9a41494..c67297b111b9 100644
--- a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -14,7 +14,7 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
-#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
namespace llvm {
namespace orc {
diff --git a/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
index 51172c51e136..07bbd921dad6 100644
--- a/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
+++ b/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -14,14 +14,26 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
-#include "JITSymbol.h"
-#include "LambdaResolver.h"
-#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Memory.h"
#include "llvm/Support/Process.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <system_error>
+#include <utility>
+#include <vector>
namespace llvm {
namespace orc {
@@ -29,37 +41,37 @@ namespace orc {
/// @brief Target-independent base class for compile callback management.
class JITCompileCallbackManager {
public:
- typedef std::function<TargetAddress()> CompileFtor;
+ typedef std::function<JITTargetAddress()> CompileFtor;
/// @brief Handle to a newly created compile callback. Can be used to get an
/// IR constant representing the address of the trampoline, and to set
/// the compile action for the callback.
class CompileCallbackInfo {
public:
- CompileCallbackInfo(TargetAddress Addr, CompileFtor &Compile)
+ CompileCallbackInfo(JITTargetAddress Addr, CompileFtor &Compile)
: Addr(Addr), Compile(Compile) {}
- TargetAddress getAddress() const { return Addr; }
+ JITTargetAddress getAddress() const { return Addr; }
void setCompileAction(CompileFtor Compile) {
this->Compile = std::move(Compile);
}
private:
- TargetAddress Addr;
+ JITTargetAddress Addr;
CompileFtor &Compile;
};
/// @brief Construct a JITCompileCallbackManager.
/// @param ErrorHandlerAddress The address of an error handler in the target
/// process to be used if a compile callback fails.
- JITCompileCallbackManager(TargetAddress ErrorHandlerAddress)
+ JITCompileCallbackManager(JITTargetAddress ErrorHandlerAddress)
: ErrorHandlerAddress(ErrorHandlerAddress) {}
- virtual ~JITCompileCallbackManager() {}
+ virtual ~JITCompileCallbackManager() = default;
/// @brief Execute the callback for the given trampoline id. Called by the JIT
/// to compile functions on demand.
- TargetAddress executeCompileCallback(TargetAddress TrampolineAddr) {
+ JITTargetAddress executeCompileCallback(JITTargetAddress TrampolineAddr) {
auto I = ActiveTrampolines.find(TrampolineAddr);
// FIXME: Also raise an error in the Orc error-handler when we finally have
// one.
@@ -86,13 +98,13 @@ public:
/// @brief Reserve a compile callback.
CompileCallbackInfo getCompileCallback() {
- TargetAddress TrampolineAddr = getAvailableTrampolineAddr();
+ JITTargetAddress TrampolineAddr = getAvailableTrampolineAddr();
auto &Compile = this->ActiveTrampolines[TrampolineAddr];
return CompileCallbackInfo(TrampolineAddr, Compile);
}
/// @brief Get a CompileCallbackInfo for an existing callback.
- CompileCallbackInfo getCompileCallbackInfo(TargetAddress TrampolineAddr) {
+ CompileCallbackInfo getCompileCallbackInfo(JITTargetAddress TrampolineAddr) {
auto I = ActiveTrampolines.find(TrampolineAddr);
assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
return CompileCallbackInfo(I->first, I->second);
@@ -103,7 +115,7 @@ public:
/// Note: Callbacks are auto-released after they execute. This method should
/// only be called to manually release a callback that is not going to
/// execute.
- void releaseCompileCallback(TargetAddress TrampolineAddr) {
+ void releaseCompileCallback(JITTargetAddress TrampolineAddr) {
auto I = ActiveTrampolines.find(TrampolineAddr);
assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
ActiveTrampolines.erase(I);
@@ -111,19 +123,19 @@ public:
}
protected:
- TargetAddress ErrorHandlerAddress;
+ JITTargetAddress ErrorHandlerAddress;
- typedef std::map<TargetAddress, CompileFtor> TrampolineMapT;
+ typedef std::map<JITTargetAddress, CompileFtor> TrampolineMapT;
TrampolineMapT ActiveTrampolines;
- std::vector<TargetAddress> AvailableTrampolines;
+ std::vector<JITTargetAddress> AvailableTrampolines;
private:
- TargetAddress getAvailableTrampolineAddr() {
+ JITTargetAddress getAvailableTrampolineAddr() {
if (this->AvailableTrampolines.empty())
grow();
assert(!this->AvailableTrampolines.empty() &&
"Failed to grow available trampolines.");
- TargetAddress TrampolineAddr = this->AvailableTrampolines.back();
+ JITTargetAddress TrampolineAddr = this->AvailableTrampolines.back();
this->AvailableTrampolines.pop_back();
return TrampolineAddr;
}
@@ -141,7 +153,7 @@ public:
/// @brief Construct a InProcessJITCompileCallbackManager.
/// @param ErrorHandlerAddress The address of an error handler in the target
/// process to be used if a compile callback fails.
- LocalJITCompileCallbackManager(TargetAddress ErrorHandlerAddress)
+ LocalJITCompileCallbackManager(JITTargetAddress ErrorHandlerAddress)
: JITCompileCallbackManager(ErrorHandlerAddress) {
/// Set up the resolver block.
@@ -161,11 +173,12 @@ public:
}
private:
- static TargetAddress reenter(void *CCMgr, void *TrampolineId) {
+ static JITTargetAddress reenter(void *CCMgr, void *TrampolineId) {
JITCompileCallbackManager *Mgr =
static_cast<JITCompileCallbackManager *>(CCMgr);
return Mgr->executeCompileCallback(
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(TrampolineId)));
+ static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineId)));
}
void grow() override {
@@ -188,7 +201,7 @@ private:
for (unsigned I = 0; I < NumTrampolines; ++I)
this->AvailableTrampolines.push_back(
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(
TrampolineMem + (I * TargetT::TrampolineSize))));
EC = sys::Memory::protectMappedMemory(TrampolineBlock.getMemoryBlock(),
@@ -207,12 +220,12 @@ private:
class IndirectStubsManager {
public:
/// @brief Map type for initializing the manager. See init.
- typedef StringMap<std::pair<TargetAddress, JITSymbolFlags>> StubInitsMap;
+ typedef StringMap<std::pair<JITTargetAddress, JITSymbolFlags>> StubInitsMap;
- virtual ~IndirectStubsManager() {}
+ virtual ~IndirectStubsManager() = default;
/// @brief Create a single stub with the given name, target address and flags.
- virtual Error createStub(StringRef StubName, TargetAddress StubAddr,
+ virtual Error createStub(StringRef StubName, JITTargetAddress StubAddr,
JITSymbolFlags StubFlags) = 0;
/// @brief Create StubInits.size() stubs with the given names, target
@@ -228,7 +241,7 @@ public:
virtual JITSymbol findPointer(StringRef Name) = 0;
/// @brief Change the value of the implementation pointer for the stub.
- virtual Error updatePointer(StringRef Name, TargetAddress NewAddr) = 0;
+ virtual Error updatePointer(StringRef Name, JITTargetAddress NewAddr) = 0;
private:
virtual void anchor();
@@ -239,7 +252,7 @@ private:
template <typename TargetT>
class LocalIndirectStubsManager : public IndirectStubsManager {
public:
- Error createStub(StringRef StubName, TargetAddress StubAddr,
+ Error createStub(StringRef StubName, JITTargetAddress StubAddr,
JITSymbolFlags StubFlags) override {
if (auto Err = reserveStubs(1))
return Err;
@@ -268,9 +281,9 @@ public:
void *StubAddr = IndirectStubsInfos[Key.first].getStub(Key.second);
assert(StubAddr && "Missing stub address");
auto StubTargetAddr =
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(StubAddr));
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(StubAddr));
auto StubSymbol = JITSymbol(StubTargetAddr, I->second.second);
- if (ExportedStubsOnly && !StubSymbol.isExported())
+ if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
return nullptr;
return StubSymbol;
}
@@ -283,11 +296,11 @@ public:
void *PtrAddr = IndirectStubsInfos[Key.first].getPtr(Key.second);
assert(PtrAddr && "Missing pointer address");
auto PtrTargetAddr =
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(PtrAddr));
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(PtrAddr));
return JITSymbol(PtrTargetAddr, I->second.second);
}
- Error updatePointer(StringRef Name, TargetAddress NewAddr) override {
+ Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
auto I = StubIndexes.find(Name);
assert(I != StubIndexes.end() && "No stub pointer for symbol");
auto Key = I->second.first;
@@ -313,7 +326,7 @@ private:
return Error::success();
}
- void createStubInternal(StringRef StubName, TargetAddress InitAddr,
+ void createStubInternal(StringRef StubName, JITTargetAddress InitAddr,
JITSymbolFlags StubFlags) {
auto Key = FreeStubs.back();
FreeStubs.pop_back();
@@ -335,7 +348,7 @@ private:
/// manager if a compile callback fails.
std::unique_ptr<JITCompileCallbackManager>
createLocalCompileCallbackManager(const Triple &T,
- TargetAddress ErrorHandlerAddress);
+ JITTargetAddress ErrorHandlerAddress);
/// @brief Create a local indriect stubs manager builder.
///
@@ -348,7 +361,7 @@ createLocalIndirectStubsManagerBuilder(const Triple &T);
///
/// Usage example: Turn a trampoline address into a function pointer constant
/// for use in a stub.
-Constant *createIRTypedAddress(FunctionType &FT, TargetAddress Addr);
+Constant *createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr);
/// @brief Create a function pointer with the given type, name, and initializer
/// in the given Module.
@@ -410,11 +423,15 @@ void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
ValueMaterializer *Materializer = nullptr,
GlobalVariable *NewGV = nullptr);
-/// @brief Clone
+/// @brief Clone a global alias declaration into a new module.
GlobalAlias *cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
ValueToValueMapTy &VMap);
-} // End namespace orc.
-} // End namespace llvm.
+/// @brief Clone module flags metadata into the destination module.
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+ ValueToValueMapTy &VMap);
+
+} // end namespace orc
+} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
diff --git a/include/llvm/ExecutionEngine/Orc/JITSymbol.h b/include/llvm/ExecutionEngine/Orc/JITSymbol.h
deleted file mode 100644
index 464417e4e6d5..000000000000
--- a/include/llvm/ExecutionEngine/Orc/JITSymbol.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//===----------- JITSymbol.h - JIT symbol abstraction -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Abstraction for target process addresses.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
-#define LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
-
-#include "llvm/ExecutionEngine/JITSymbolFlags.h"
-#include "llvm/ExecutionEngine/RuntimeDyld.h"
-#include "llvm/Support/DataTypes.h"
-#include <cassert>
-#include <functional>
-
-namespace llvm {
-namespace orc {
-
-/// @brief Represents an address in the target process's address space.
-typedef uint64_t TargetAddress;
-
-/// @brief Represents a symbol in the JIT.
-class JITSymbol : public JITSymbolBase {
-public:
-
- typedef std::function<TargetAddress()> GetAddressFtor;
-
- /// @brief Create a 'null' symbol that represents failure to find a symbol
- /// definition.
- JITSymbol(std::nullptr_t)
- : JITSymbolBase(JITSymbolFlags::None), CachedAddr(0) {}
-
- /// @brief Create a symbol for a definition with a known address.
- JITSymbol(TargetAddress Addr, JITSymbolFlags Flags)
- : JITSymbolBase(Flags), CachedAddr(Addr) {}
-
- /// @brief Create a symbol for a definition that doesn't have a known address
- /// yet.
- /// @param GetAddress A functor to materialize a definition (fixing the
- /// address) on demand.
- ///
- /// This constructor allows a JIT layer to provide a reference to a symbol
- /// definition without actually materializing the definition up front. The
- /// user can materialize the definition at any time by calling the getAddress
- /// method.
- JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
- : JITSymbolBase(Flags), GetAddress(std::move(GetAddress)), CachedAddr(0) {}
-
- /// @brief Create a JITSymbol from a RuntimeDyld::SymbolInfo.
- JITSymbol(const RuntimeDyld::SymbolInfo &Sym)
- : JITSymbolBase(Sym.getFlags()), CachedAddr(Sym.getAddress()) {}
-
- /// @brief Returns true if the symbol exists, false otherwise.
- explicit operator bool() const { return CachedAddr || GetAddress; }
-
- /// @brief Get the address of the symbol in the target address space. Returns
- /// '0' if the symbol does not exist.
- TargetAddress getAddress() {
- if (GetAddress) {
- CachedAddr = GetAddress();
- assert(CachedAddr && "Symbol could not be materialized.");
- GetAddress = nullptr;
- }
- return CachedAddr;
- }
-
- /// @brief Convert this JITSymbol to a RuntimeDyld::SymbolInfo.
- RuntimeDyld::SymbolInfo toRuntimeDyldSymbol() {
- return RuntimeDyld::SymbolInfo(getAddress(), getFlags());
- }
-
-private:
- GetAddressFtor GetAddress;
- TargetAddress CachedAddr;
-};
-
-} // End namespace orc.
-} // End namespace llvm.
-
-#endif // LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
diff --git a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
index a42b9d5c29d1..cbe2a80edf1c 100644
--- a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
+++ b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -23,7 +23,7 @@ namespace llvm {
namespace orc {
template <typename DylibLookupFtorT, typename ExternalLookupFtorT>
-class LambdaResolver : public RuntimeDyld::SymbolResolver {
+class LambdaResolver : public JITSymbolResolver {
public:
LambdaResolver(DylibLookupFtorT DylibLookupFtor,
@@ -31,12 +31,11 @@ public:
: DylibLookupFtor(DylibLookupFtor),
ExternalLookupFtor(ExternalLookupFtor) {}
- RuntimeDyld::SymbolInfo
- findSymbolInLogicalDylib(const std::string &Name) final {
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) final {
return DylibLookupFtor(Name);
}
- RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final {
+ JITSymbol findSymbol(const std::string &Name) final {
return ExternalLookupFtor(Name);
}
diff --git a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
index c5fb6b847b30..53d4c0cfe5d4 100644
--- a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -14,14 +14,20 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
-#include "JITSymbol.h"
-#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
#include <list>
+#include <memory>
+#include <string>
namespace llvm {
namespace orc {
@@ -39,8 +45,8 @@ public:
private:
class EmissionDeferredSet {
public:
- EmissionDeferredSet() : EmitState(NotEmitted) {}
- virtual ~EmissionDeferredSet() {}
+ EmissionDeferredSet() = default;
+ virtual ~EmissionDeferredSet() = default;
JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
switch (EmitState) {
@@ -50,9 +56,9 @@ private:
// (a StringRef) may go away before the lambda is executed.
// FIXME: Use capture-init when we move to C++14.
std::string PName = Name;
- JITSymbolFlags Flags = JITSymbolBase::flagsFromGlobalValue(*GV);
+ JITSymbolFlags Flags = JITSymbolFlags::fromGlobalValue(*GV);
auto GetAddress =
- [this, ExportedSymbolsOnly, PName, &B]() -> TargetAddress {
+ [this, ExportedSymbolsOnly, PName, &B]() -> JITTargetAddress {
if (this->EmitState == Emitting)
return 0;
else if (this->EmitState == NotEmitted) {
@@ -106,7 +112,7 @@ private:
virtual BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) = 0;
private:
- enum { NotEmitted, Emitting, Emitted } EmitState;
+ enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
BaseLayerHandleT Handle;
};
@@ -121,7 +127,6 @@ private:
Resolver(std::move(Resolver)) {}
protected:
-
const GlobalValue* searchGVs(StringRef Name,
bool ExportedSymbolsOnly) const override {
// FIXME: We could clean all this up if we had a way to reliably demangle
@@ -277,7 +282,6 @@ public:
void emitAndFinalize(ModuleSetHandleT H) {
(*H)->emitAndFinalize(BaseLayer);
}
-
};
template <typename BaseLayerT>
@@ -293,7 +297,7 @@ LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet::create(
std::move(Resolver));
}
-} // End namespace orc.
-} // End namespace llvm.
+} // end namespace orc
+} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/LogicalDylib.h b/include/llvm/ExecutionEngine/Orc/LogicalDylib.h
deleted file mode 100644
index 883fa9eac560..000000000000
--- a/include/llvm/ExecutionEngine/Orc/LogicalDylib.h
+++ /dev/null
@@ -1,135 +0,0 @@
-//===--- LogicalDylib.h - Simulates dylib-style symbol lookup ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Simulates symbol resolution inside a dylib.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
-#define LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
-
-#include "llvm/ExecutionEngine/Orc/JITSymbol.h"
-#include <string>
-#include <vector>
-
-namespace llvm {
-namespace orc {
-
-template <typename BaseLayerT,
- typename LogicalModuleResources,
- typename LogicalDylibResources>
-class LogicalDylib {
-public:
- typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
-private:
-
- typedef std::vector<BaseLayerModuleSetHandleT> BaseLayerHandleList;
-
- struct LogicalModule {
- // Make this move-only to ensure they don't get duplicated across moves of
- // LogicalDylib or anything like that.
- LogicalModule(LogicalModule &&RHS)
- : Resources(std::move(RHS.Resources)),
- BaseLayerHandles(std::move(RHS.BaseLayerHandles)) {}
- LogicalModule() = default;
- LogicalModuleResources Resources;
- BaseLayerHandleList BaseLayerHandles;
- };
- typedef std::vector<LogicalModule> LogicalModuleList;
-
-public:
-
- typedef typename BaseLayerHandleList::iterator BaseLayerHandleIterator;
- typedef typename LogicalModuleList::iterator LogicalModuleHandle;
-
- LogicalDylib(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
-
- ~LogicalDylib() {
- for (auto &LM : LogicalModules)
- for (auto BLH : LM.BaseLayerHandles)
- BaseLayer.removeModuleSet(BLH);
- }
-
- // If possible, remove this and ~LogicalDylib once the work in the dtor is
- // moved to members (eg: self-unregistering base layer handles).
- LogicalDylib(LogicalDylib &&RHS)
- : BaseLayer(std::move(RHS.BaseLayer)),
- LogicalModules(std::move(RHS.LogicalModules)),
- DylibResources(std::move(RHS.DylibResources)) {}
-
- LogicalModuleHandle createLogicalModule() {
- LogicalModules.push_back(LogicalModule());
- return std::prev(LogicalModules.end());
- }
-
- void addToLogicalModule(LogicalModuleHandle LMH,
- BaseLayerModuleSetHandleT BaseLayerHandle) {
- LMH->BaseLayerHandles.push_back(BaseLayerHandle);
- }
-
- LogicalModuleResources& getLogicalModuleResources(LogicalModuleHandle LMH) {
- return LMH->Resources;
- }
-
- BaseLayerHandleIterator moduleHandlesBegin(LogicalModuleHandle LMH) {
- return LMH->BaseLayerHandles.begin();
- }
-
- BaseLayerHandleIterator moduleHandlesEnd(LogicalModuleHandle LMH) {
- return LMH->BaseLayerHandles.end();
- }
-
- JITSymbol findSymbolInLogicalModule(LogicalModuleHandle LMH,
- const std::string &Name,
- bool ExportedSymbolsOnly) {
-
- if (auto StubSym = LMH->Resources.findSymbol(Name, ExportedSymbolsOnly))
- return StubSym;
-
- for (auto BLH : LMH->BaseLayerHandles)
- if (auto Symbol = BaseLayer.findSymbolIn(BLH, Name, ExportedSymbolsOnly))
- return Symbol;
- return nullptr;
- }
-
- JITSymbol findSymbolInternally(LogicalModuleHandle LMH,
- const std::string &Name) {
- if (auto Symbol = findSymbolInLogicalModule(LMH, Name, false))
- return Symbol;
-
- for (auto LMI = LogicalModules.begin(), LME = LogicalModules.end();
- LMI != LME; ++LMI) {
- if (LMI != LMH)
- if (auto Symbol = findSymbolInLogicalModule(LMI, Name, false))
- return Symbol;
- }
-
- return nullptr;
- }
-
- JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
- for (auto LMI = LogicalModules.begin(), LME = LogicalModules.end();
- LMI != LME; ++LMI)
- if (auto Sym = findSymbolInLogicalModule(LMI, Name, ExportedSymbolsOnly))
- return Sym;
- return nullptr;
- }
-
- LogicalDylibResources& getDylibResources() { return DylibResources; }
-
-protected:
- BaseLayerT BaseLayer;
- LogicalModuleList LogicalModules;
- LogicalDylibResources DylibResources;
-};
-
-} // End namespace orc.
-} // End namespace llvm.
-
-#endif // LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
diff --git a/include/llvm/ExecutionEngine/Orc/NullResolver.h b/include/llvm/ExecutionEngine/Orc/NullResolver.h
index 1560c6d86e0f..957b94912b3f 100644
--- a/include/llvm/ExecutionEngine/Orc/NullResolver.h
+++ b/include/llvm/ExecutionEngine/Orc/NullResolver.h
@@ -22,12 +22,11 @@ namespace orc {
/// SymbolResolver impliementation that rejects all resolution requests.
/// Useful for clients that have no cross-object fixups.
-class NullResolver : public RuntimeDyld::SymbolResolver {
+class NullResolver : public JITSymbolResolver {
public:
- RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final;
+ JITSymbol findSymbol(const std::string &Name) final;
- RuntimeDyld::SymbolInfo
- findSymbolInLogicalDylib(const std::string &Name) final;
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) final;
};
} // End namespace orc.
diff --git a/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h b/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
index a7798d8beb8d..0588d2228598 100644
--- a/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
@@ -14,12 +14,23 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
-#include "JITSymbol.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include <cassert>
+#include <algorithm>
+#include <functional>
#include <list>
#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
namespace llvm {
namespace orc {
@@ -34,11 +45,11 @@ protected:
/// had been provided by this instance. Higher level layers are responsible
/// for taking any action required to handle the missing symbols.
class LinkedObjectSet {
- LinkedObjectSet(const LinkedObjectSet&) = delete;
- void operator=(const LinkedObjectSet&) = delete;
public:
LinkedObjectSet() = default;
- virtual ~LinkedObjectSet() {}
+ LinkedObjectSet(const LinkedObjectSet&) = delete;
+ void operator=(const LinkedObjectSet&) = delete;
+ virtual ~LinkedObjectSet() = default;
virtual void finalize() = 0;
@@ -46,21 +57,22 @@ protected:
getSymbolMaterializer(std::string Name) = 0;
virtual void mapSectionAddress(const void *LocalAddress,
- TargetAddress TargetAddr) const = 0;
+ JITTargetAddress TargetAddr) const = 0;
JITSymbol getSymbol(StringRef Name, bool ExportedSymbolsOnly) {
auto SymEntry = SymbolTable.find(Name);
if (SymEntry == SymbolTable.end())
return nullptr;
- if (!SymEntry->second.isExported() && ExportedSymbolsOnly)
+ if (!SymEntry->second.getFlags().isExported() && ExportedSymbolsOnly)
return nullptr;
if (!Finalized)
return JITSymbol(getSymbolMaterializer(Name),
SymEntry->second.getFlags());
return JITSymbol(SymEntry->second);
}
+
protected:
- StringMap<RuntimeDyld::SymbolInfo> SymbolTable;
+ StringMap<JITEvaluatedSymbol> SymbolTable;
bool Finalized = false;
};
@@ -71,7 +83,6 @@ public:
typedef LinkedObjectSetListT::iterator ObjSetHandleT;
};
-
/// @brief Default (no-op) action to perform when loading objects.
class DoNothingOnNotifyLoaded {
public:
@@ -89,12 +100,10 @@ public:
template <typename NotifyLoadedFtor = DoNothingOnNotifyLoaded>
class ObjectLinkingLayer : public ObjectLinkingLayerBase {
public:
-
/// @brief Functor for receiving finalization notifications.
typedef std::function<void(ObjSetHandleT)> NotifyFinalizedFtor;
private:
-
template <typename ObjSetT, typename MemoryManagerPtrT,
typename SymbolResolverPtrT, typename FinalizerFtor>
class ConcreteLinkedObjectSet : public LinkedObjectSet {
@@ -122,10 +131,10 @@ private:
RTDyld.setProcessAllSections(PFC->ProcessAllSections);
PFC->RTDyld = &RTDyld;
+ this->Finalized = true;
PFC->Finalizer(PFC->Handle, RTDyld, std::move(PFC->Objects),
[&]() {
this->updateSymbolTable(RTDyld);
- this->Finalized = true;
});
// Release resources.
@@ -144,14 +153,13 @@ private:
}
void mapSectionAddress(const void *LocalAddress,
- TargetAddress TargetAddr) const override {
+ JITTargetAddress TargetAddr) const override {
assert(PFC && "mapSectionAddress called on finalized LinkedObjectSet");
assert(PFC->RTDyld && "mapSectionAddress called on raw LinkedObjectSet");
PFC->RTDyld->mapSectionAddress(LocalAddress, TargetAddr);
}
private:
-
void buildInitialSymbolTable(const ObjSetT &Objects) {
for (const auto &Obj : Objects)
for (auto &Symbol : getObject(*Obj).symbols()) {
@@ -163,9 +171,9 @@ private:
consumeError(SymbolName.takeError());
continue;
}
- auto Flags = JITSymbol::flagsFromObjectSymbol(Symbol);
+ auto Flags = JITSymbolFlags::fromObjectSymbol(Symbol);
SymbolTable.insert(
- std::make_pair(*SymbolName, RuntimeDyld::SymbolInfo(0, Flags)));
+ std::make_pair(*SymbolName, JITEvaluatedSymbol(0, Flags)));
}
}
@@ -212,7 +220,6 @@ private:
}
public:
-
/// @brief LoadedObjectInfo list. Contains a list of owning pointers to
/// RuntimeDyld::LoadedObjectInfo instances.
typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
@@ -248,7 +255,6 @@ public:
ObjSetHandleT addObjectSet(ObjSetT Objects,
MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
-
auto Finalizer = [&](ObjSetHandleT H, RuntimeDyld &RTDyld,
const ObjSetT &Objs,
std::function<void()> LOSHandleLoad) {
@@ -322,7 +328,7 @@ public:
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
- TargetAddress TargetAddr) {
+ JITTargetAddress TargetAddr) {
(*H)->mapSectionAddress(LocalAddress, TargetAddr);
}
@@ -334,7 +340,6 @@ public:
}
private:
-
static const object::ObjectFile& getObject(const object::ObjectFile &Obj) {
return Obj;
}
@@ -351,7 +356,7 @@ private:
bool ProcessAllSections;
};
-} // End namespace orc.
-} // End namespace llvm
+} // end namespace orc
+} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
index 2ffe71c94356..173c106cd3ec 100644
--- a/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -14,7 +14,7 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
-#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
namespace llvm {
namespace orc {
@@ -83,7 +83,7 @@ public:
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
- TargetAddress TargetAddr) {
+ JITTargetAddress TargetAddr) {
BaseLayer.mapSectionAddress(H, LocalAddress, TargetAddr);
}
diff --git a/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
index 4a8d0b0b801c..fa236b0de88a 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
@@ -37,7 +37,8 @@ public:
static const unsigned TrampolineSize = 1;
static const unsigned ResolverCodeSize = 1;
- typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
+ typedef JITTargetAddress (*JITReentryFn)(void *CallbackMgr,
+ void *TrampolineId);
static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
void *CallbackMgr) {
@@ -115,7 +116,8 @@ public:
typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
- typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
+ typedef JITTargetAddress (*JITReentryFn)(void *CallbackMgr,
+ void *TrampolineId);
/// @brief Write the resolver code into the given memory. The user is be
/// responsible for allocating the memory and setting permissions.
@@ -170,7 +172,8 @@ public:
class OrcX86_64_SysV : public OrcX86_64_Base {
public:
static const unsigned ResolverCodeSize = 0x6C;
- typedef TargetAddress(*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
+ typedef JITTargetAddress (*JITReentryFn)(void *CallbackMgr,
+ void *TrampolineId);
/// @brief Write the resolver code into the given memory. The user is be
/// responsible for allocating the memory and setting permissions.
@@ -184,7 +187,8 @@ public:
class OrcX86_64_Win32 : public OrcX86_64_Base {
public:
static const unsigned ResolverCodeSize = 0x74;
- typedef TargetAddress(*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
+ typedef JITTargetAddress (*JITReentryFn)(void *CallbackMgr,
+ void *TrampolineId);
/// @brief Write the resolver code into the given memory. The user is be
/// responsible for allocating the memory and setting permissions.
@@ -203,7 +207,8 @@ public:
typedef GenericIndirectStubsInfo<8> IndirectStubsInfo;
- typedef TargetAddress (*JITReentryFn)(void *CallbackMgr, void *TrampolineId);
+ typedef JITTargetAddress (*JITReentryFn)(void *CallbackMgr,
+ void *TrampolineId);
/// @brief Write the resolver code into the given memory. The user is be
/// responsible for allocating the memory and setting permissions.
diff --git a/include/llvm/ExecutionEngine/Orc/OrcError.h b/include/llvm/ExecutionEngine/Orc/OrcError.h
index 1b3f25fae162..b74988cce2fb 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcError.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcError.h
@@ -27,8 +27,10 @@ enum class OrcErrorCode : int {
RemoteMProtectAddrUnrecognized,
RemoteIndirectStubsOwnerDoesNotExist,
RemoteIndirectStubsOwnerIdAlreadyInUse,
+ RPCResponseAbandoned,
UnexpectedRPCCall,
UnexpectedRPCResponse,
+ UnknownRPCFunction
};
Error orcError(OrcErrorCode ErrCode);
diff --git a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
index 5c867e7e7fd4..8647db56cd2f 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
@@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
//
// This file defines the OrcRemoteTargetClient class and helpers. This class
-// can be used to communicate over an RPCChannel with an OrcRemoteTargetServer
-// instance to support remote-JITing.
+// can be used to communicate over an RawByteChannel with an
+// OrcRemoteTargetServer instance to support remote-JITing.
//
//===----------------------------------------------------------------------===//
@@ -36,23 +36,6 @@ namespace remote {
template <typename ChannelT>
class OrcRemoteTargetClient : public OrcRemoteTargetRPCAPI {
public:
- // FIXME: Remove move/copy ops once MSVC supports synthesizing move ops.
-
- OrcRemoteTargetClient(const OrcRemoteTargetClient &) = delete;
- OrcRemoteTargetClient &operator=(const OrcRemoteTargetClient &) = delete;
-
- OrcRemoteTargetClient(OrcRemoteTargetClient &&Other)
- : Channel(Other.Channel), ExistingError(std::move(Other.ExistingError)),
- RemoteTargetTriple(std::move(Other.RemoteTargetTriple)),
- RemotePointerSize(std::move(Other.RemotePointerSize)),
- RemotePageSize(std::move(Other.RemotePageSize)),
- RemoteTrampolineSize(std::move(Other.RemoteTrampolineSize)),
- RemoteIndirectStubSize(std::move(Other.RemoteIndirectStubSize)),
- AllocatorIds(std::move(Other.AllocatorIds)),
- IndirectStubOwnerIds(std::move(Other.IndirectStubOwnerIds)) {}
-
- OrcRemoteTargetClient &operator=(OrcRemoteTargetClient &&) = delete;
-
/// Remote memory manager.
class RCMemoryManager : public RuntimeDyld::MemoryManager {
public:
@@ -61,18 +44,10 @@ public:
DEBUG(dbgs() << "Created remote allocator " << Id << "\n");
}
- RCMemoryManager(RCMemoryManager &&Other)
- : Client(std::move(Other.Client)), Id(std::move(Other.Id)),
- Unmapped(std::move(Other.Unmapped)),
- Unfinalized(std::move(Other.Unfinalized)) {}
-
- RCMemoryManager operator=(RCMemoryManager &&Other) {
- Client = std::move(Other.Client);
- Id = std::move(Other.Id);
- Unmapped = std::move(Other.Unmapped);
- Unfinalized = std::move(Other.Unfinalized);
- return *this;
- }
+ RCMemoryManager(const RCMemoryManager &) = delete;
+ RCMemoryManager &operator=(const RCMemoryManager &) = delete;
+ RCMemoryManager(RCMemoryManager &&) = default;
+ RCMemoryManager &operator=(RCMemoryManager &&) = default;
~RCMemoryManager() override {
Client.destroyRemoteAllocator(Id);
@@ -185,7 +160,7 @@ public:
DEBUG(dbgs() << "Allocator " << Id << " applied mappings:\n");
for (auto &ObjAllocs : Unmapped) {
{
- TargetAddress NextCodeAddr = ObjAllocs.RemoteCodeAddr;
+ JITTargetAddress NextCodeAddr = ObjAllocs.RemoteCodeAddr;
for (auto &Alloc : ObjAllocs.CodeAllocs) {
NextCodeAddr = alignTo(NextCodeAddr, Alloc.getAlign());
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextCodeAddr);
@@ -197,7 +172,7 @@ public:
}
}
{
- TargetAddress NextRODataAddr = ObjAllocs.RemoteRODataAddr;
+ JITTargetAddress NextRODataAddr = ObjAllocs.RemoteRODataAddr;
for (auto &Alloc : ObjAllocs.RODataAllocs) {
NextRODataAddr = alignTo(NextRODataAddr, Alloc.getAlign());
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRODataAddr);
@@ -210,7 +185,7 @@ public:
}
}
{
- TargetAddress NextRWDataAddr = ObjAllocs.RemoteRWDataAddr;
+ JITTargetAddress NextRWDataAddr = ObjAllocs.RemoteRWDataAddr;
for (auto &Alloc : ObjAllocs.RWDataAllocs) {
NextRWDataAddr = alignTo(NextRWDataAddr, Alloc.getAlign());
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRWDataAddr);
@@ -366,18 +341,10 @@ public:
Alloc(uint64_t Size, unsigned Align)
: Size(Size), Align(Align), Contents(new char[Size + Align - 1]) {}
- Alloc(Alloc &&Other)
- : Size(std::move(Other.Size)), Align(std::move(Other.Align)),
- Contents(std::move(Other.Contents)),
- RemoteAddr(std::move(Other.RemoteAddr)) {}
-
- Alloc &operator=(Alloc &&Other) {
- Size = std::move(Other.Size);
- Align = std::move(Other.Align);
- Contents = std::move(Other.Contents);
- RemoteAddr = std::move(Other.RemoteAddr);
- return *this;
- }
+ Alloc(const Alloc &) = delete;
+ Alloc &operator=(const Alloc &) = delete;
+ Alloc(Alloc &&) = default;
+ Alloc &operator=(Alloc &&) = default;
uint64_t getSize() const { return Size; }
@@ -389,43 +356,29 @@ public:
return reinterpret_cast<char *>(LocalAddr);
}
- void setRemoteAddress(TargetAddress RemoteAddr) {
+ void setRemoteAddress(JITTargetAddress RemoteAddr) {
this->RemoteAddr = RemoteAddr;
}
- TargetAddress getRemoteAddress() const { return RemoteAddr; }
+ JITTargetAddress getRemoteAddress() const { return RemoteAddr; }
private:
uint64_t Size;
unsigned Align;
std::unique_ptr<char[]> Contents;
- TargetAddress RemoteAddr = 0;
+ JITTargetAddress RemoteAddr = 0;
};
struct ObjectAllocs {
ObjectAllocs() = default;
-
- ObjectAllocs(ObjectAllocs &&Other)
- : RemoteCodeAddr(std::move(Other.RemoteCodeAddr)),
- RemoteRODataAddr(std::move(Other.RemoteRODataAddr)),
- RemoteRWDataAddr(std::move(Other.RemoteRWDataAddr)),
- CodeAllocs(std::move(Other.CodeAllocs)),
- RODataAllocs(std::move(Other.RODataAllocs)),
- RWDataAllocs(std::move(Other.RWDataAllocs)) {}
-
- ObjectAllocs &operator=(ObjectAllocs &&Other) {
- RemoteCodeAddr = std::move(Other.RemoteCodeAddr);
- RemoteRODataAddr = std::move(Other.RemoteRODataAddr);
- RemoteRWDataAddr = std::move(Other.RemoteRWDataAddr);
- CodeAllocs = std::move(Other.CodeAllocs);
- RODataAllocs = std::move(Other.RODataAllocs);
- RWDataAllocs = std::move(Other.RWDataAllocs);
- return *this;
- }
-
- TargetAddress RemoteCodeAddr = 0;
- TargetAddress RemoteRODataAddr = 0;
- TargetAddress RemoteRWDataAddr = 0;
+ ObjectAllocs(const ObjectAllocs &) = delete;
+ ObjectAllocs &operator=(const ObjectAllocs &) = delete;
+ ObjectAllocs(ObjectAllocs &&) = default;
+ ObjectAllocs &operator=(ObjectAllocs &&) = default;
+
+ JITTargetAddress RemoteCodeAddr = 0;
+ JITTargetAddress RemoteRODataAddr = 0;
+ JITTargetAddress RemoteRWDataAddr = 0;
std::vector<Alloc> CodeAllocs, RODataAllocs, RWDataAllocs;
};
@@ -450,7 +403,7 @@ public:
}
}
- Error createStub(StringRef StubName, TargetAddress StubAddr,
+ Error createStub(StringRef StubName, JITTargetAddress StubAddr,
JITSymbolFlags StubFlags) override {
if (auto Err = reserveStubs(1))
return Err;
@@ -477,7 +430,7 @@ public:
auto Key = I->second.first;
auto Flags = I->second.second;
auto StubSymbol = JITSymbol(getStubAddr(Key), Flags);
- if (ExportedStubsOnly && !StubSymbol.isExported())
+ if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
return nullptr;
return StubSymbol;
}
@@ -491,7 +444,7 @@ public:
return JITSymbol(getPtrAddr(Key), Flags);
}
- Error updatePointer(StringRef Name, TargetAddress NewAddr) override {
+ Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
auto I = StubIndexes.find(Name);
assert(I != StubIndexes.end() && "No stub pointer for symbol");
auto Key = I->second.first;
@@ -500,8 +453,8 @@ public:
private:
struct RemoteIndirectStubsInfo {
- TargetAddress StubBase;
- TargetAddress PtrBase;
+ JITTargetAddress StubBase;
+ JITTargetAddress PtrBase;
unsigned NumStubs;
};
@@ -517,8 +470,8 @@ public:
return Error::success();
unsigned NewStubsRequired = NumStubs - FreeStubs.size();
- TargetAddress StubBase;
- TargetAddress PtrBase;
+ JITTargetAddress StubBase;
+ JITTargetAddress PtrBase;
unsigned NumStubsEmitted;
if (auto StubInfoOrErr = Remote.emitIndirectStubs(Id, NewStubsRequired))
@@ -535,7 +488,7 @@ public:
return Error::success();
}
- Error createStubInternal(StringRef StubName, TargetAddress InitAddr,
+ Error createStubInternal(StringRef StubName, JITTargetAddress InitAddr,
JITSymbolFlags StubFlags) {
auto Key = FreeStubs.back();
FreeStubs.pop_back();
@@ -543,14 +496,14 @@ public:
return Remote.writePointer(getPtrAddr(Key), InitAddr);
}
- TargetAddress getStubAddr(StubKey K) {
+ JITTargetAddress getStubAddr(StubKey K) {
assert(RemoteIndirectStubsInfos[K.first].StubBase != 0 &&
"Missing stub address");
return RemoteIndirectStubsInfos[K.first].StubBase +
K.second * Remote.getIndirectStubSize();
}
- TargetAddress getPtrAddr(StubKey K) {
+ JITTargetAddress getPtrAddr(StubKey K) {
assert(RemoteIndirectStubsInfos[K.first].PtrBase != 0 &&
"Missing pointer address");
return RemoteIndirectStubsInfos[K.first].PtrBase +
@@ -561,13 +514,13 @@ public:
/// Remote compile callback manager.
class RCCompileCallbackManager : public JITCompileCallbackManager {
public:
- RCCompileCallbackManager(TargetAddress ErrorHandlerAddress,
+ RCCompileCallbackManager(JITTargetAddress ErrorHandlerAddress,
OrcRemoteTargetClient &Remote)
: JITCompileCallbackManager(ErrorHandlerAddress), Remote(Remote) {}
private:
void grow() override {
- TargetAddress BlockAddr = 0;
+ JITTargetAddress BlockAddr = 0;
uint32_t NumTrampolines = 0;
if (auto TrampolineInfoOrErr = Remote.emitTrampolineBlock())
std::tie(BlockAddr, NumTrampolines) = *TrampolineInfoOrErr;
@@ -587,48 +540,38 @@ public:
/// Create an OrcRemoteTargetClient.
/// Channel is the ChannelT instance to communicate on. It is assumed that
/// the channel is ready to be read from and written to.
- static Expected<OrcRemoteTargetClient> Create(ChannelT &Channel) {
- Error Err;
- OrcRemoteTargetClient H(Channel, Err);
+ static Expected<std::unique_ptr<OrcRemoteTargetClient>>
+ Create(ChannelT &Channel) {
+ Error Err = Error::success();
+ std::unique_ptr<OrcRemoteTargetClient> Client(
+ new OrcRemoteTargetClient(Channel, Err));
if (Err)
return std::move(Err);
- return Expected<OrcRemoteTargetClient>(std::move(H));
+ return std::move(Client);
}
/// Call the int(void) function at the given address in the target and return
/// its result.
- Expected<int> callIntVoid(TargetAddress Addr) {
+ Expected<int> callIntVoid(JITTargetAddress Addr) {
DEBUG(dbgs() << "Calling int(*)(void) " << format("0x%016x", Addr) << "\n");
-
- auto Listen = [&](RPCChannel &C, uint32_t Id) {
- return listenForCompileRequests(C, Id);
- };
- return callSTHandling<CallIntVoid>(Channel, Listen, Addr);
+ return callB<CallIntVoid>(Addr);
}
/// Call the int(int, char*[]) function at the given address in the target and
/// return its result.
- Expected<int> callMain(TargetAddress Addr,
+ Expected<int> callMain(JITTargetAddress Addr,
const std::vector<std::string> &Args) {
DEBUG(dbgs() << "Calling int(*)(int, char*[]) " << format("0x%016x", Addr)
<< "\n");
-
- auto Listen = [&](RPCChannel &C, uint32_t Id) {
- return listenForCompileRequests(C, Id);
- };
- return callSTHandling<CallMain>(Channel, Listen, Addr, Args);
+ return callB<CallMain>(Addr, Args);
}
/// Call the void() function at the given address in the target and wait for
/// it to finish.
- Error callVoidVoid(TargetAddress Addr) {
+ Error callVoidVoid(JITTargetAddress Addr) {
DEBUG(dbgs() << "Calling void(*)(void) " << format("0x%016x", Addr)
<< "\n");
-
- auto Listen = [&](RPCChannel &C, uint32_t Id) {
- return listenForCompileRequests(C, Id);
- };
- return callSTHandling<CallVoidVoid>(Channel, Listen, Addr);
+ return callB<CallVoidVoid>(Addr);
}
/// Create an RCMemoryManager which will allocate its memory on the remote
@@ -637,7 +580,7 @@ public:
assert(!MM && "MemoryManager should be null before creation.");
auto Id = AllocatorIds.getNext();
- if (auto Err = callST<CreateRemoteAllocator>(Channel, Id))
+ if (auto Err = callB<CreateRemoteAllocator>(Id))
return Err;
MM = llvm::make_unique<RCMemoryManager>(*this, Id);
return Error::success();
@@ -648,20 +591,20 @@ public:
Error createIndirectStubsManager(std::unique_ptr<RCIndirectStubsManager> &I) {
assert(!I && "Indirect stubs manager should be null before creation.");
auto Id = IndirectStubOwnerIds.getNext();
- if (auto Err = callST<CreateIndirectStubsOwner>(Channel, Id))
+ if (auto Err = callB<CreateIndirectStubsOwner>(Id))
return Err;
I = llvm::make_unique<RCIndirectStubsManager>(*this, Id);
return Error::success();
}
Expected<RCCompileCallbackManager &>
- enableCompileCallbacks(TargetAddress ErrorHandlerAddress) {
+ enableCompileCallbacks(JITTargetAddress ErrorHandlerAddress) {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
// Emit the resolver block on the JIT server.
- if (auto Err = callST<EmitResolverBlock>(Channel))
+ if (auto Err = callB<EmitResolverBlock>())
return std::move(Err);
// Create the callback manager.
@@ -673,23 +616,32 @@ public:
/// Search for symbols in the remote process. Note: This should be used by
/// symbol resolvers *after* they've searched the local symbol table in the
/// JIT stack.
- Expected<TargetAddress> getSymbolAddress(StringRef Name) {
+ Expected<JITTargetAddress> getSymbolAddress(StringRef Name) {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
- return callST<GetSymbolAddress>(Channel, Name);
+ return callB<GetSymbolAddress>(Name);
}
/// Get the triple for the remote target.
const std::string &getTargetTriple() const { return RemoteTargetTriple; }
- Error terminateSession() { return callST<TerminateSession>(Channel); }
+ Error terminateSession() { return callB<TerminateSession>(); }
private:
- OrcRemoteTargetClient(ChannelT &Channel, Error &Err) : Channel(Channel) {
- ErrorAsOutParameter EAO(Err);
- if (auto RIOrErr = callST<GetRemoteInfo>(Channel)) {
+ OrcRemoteTargetClient(ChannelT &Channel, Error &Err)
+ : OrcRemoteTargetRPCAPI(Channel) {
+ ErrorAsOutParameter EAO(&Err);
+
+ addHandler<RequestCompile>(
+ [this](JITTargetAddress Addr) -> JITTargetAddress {
+ if (CallbackManager)
+ return CallbackManager->executeCompileCallback(Addr);
+ return 0;
+ });
+
+ if (auto RIOrErr = callB<GetRemoteInfo>()) {
std::tie(RemoteTargetTriple, RemotePointerSize, RemotePageSize,
RemoteTrampolineSize, RemoteIndirectStubSize) = *RIOrErr;
Err = Error::success();
@@ -698,12 +650,12 @@ private:
}
}
- Error deregisterEHFrames(TargetAddress Addr, uint32_t Size) {
- return callST<RegisterEHFrames>(Channel, Addr, Size);
+ Error deregisterEHFrames(JITTargetAddress Addr, uint32_t Size) {
+ return callB<RegisterEHFrames>(Addr, Size);
}
void destroyRemoteAllocator(ResourceIdMgr::ResourceId Id) {
- if (auto Err = callST<DestroyRemoteAllocator>(Channel, Id)) {
+ if (auto Err = callB<DestroyRemoteAllocator>(Id)) {
// FIXME: This will be triggered by a removeModuleSet call: Propagate
// error return up through that.
llvm_unreachable("Failed to destroy remote allocator.");
@@ -713,20 +665,20 @@ private:
Error destroyIndirectStubsManager(ResourceIdMgr::ResourceId Id) {
IndirectStubOwnerIds.release(Id);
- return callST<DestroyIndirectStubsOwner>(Channel, Id);
+ return callB<DestroyIndirectStubsOwner>(Id);
}
- Expected<std::tuple<TargetAddress, TargetAddress, uint32_t>>
+ Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
emitIndirectStubs(ResourceIdMgr::ResourceId Id, uint32_t NumStubsRequired) {
- return callST<EmitIndirectStubs>(Channel, Id, NumStubsRequired);
+ return callB<EmitIndirectStubs>(Id, NumStubsRequired);
}
- Expected<std::tuple<TargetAddress, uint32_t>> emitTrampolineBlock() {
+ Expected<std::tuple<JITTargetAddress, uint32_t>> emitTrampolineBlock() {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
- return callST<EmitTrampolineBlock>(Channel);
+ return callB<EmitTrampolineBlock>();
}
uint32_t getIndirectStubSize() const { return RemoteIndirectStubSize; }
@@ -735,79 +687,53 @@ private:
uint32_t getTrampolineSize() const { return RemoteTrampolineSize; }
- Error listenForCompileRequests(RPCChannel &C, uint32_t &Id) {
- assert(CallbackManager &&
- "No calback manager. enableCompileCallbacks must be called first");
-
- // Check for an 'out-of-band' error, e.g. from an MM destructor.
- if (ExistingError)
- return std::move(ExistingError);
-
- // FIXME: CompileCallback could be an anonymous lambda defined at the use
- // site below, but that triggers a GCC 4.7 ICE. When we move off
- // GCC 4.7, tidy this up.
- auto CompileCallback =
- [this](TargetAddress Addr) -> Expected<TargetAddress> {
- return this->CallbackManager->executeCompileCallback(Addr);
- };
-
- if (Id == RequestCompileId) {
- if (auto Err = handle<RequestCompile>(C, CompileCallback))
- return Err;
- return Error::success();
- }
- // else
- return orcError(OrcErrorCode::UnexpectedRPCCall);
- }
-
- Expected<std::vector<char>> readMem(char *Dst, TargetAddress Src,
+ Expected<std::vector<char>> readMem(char *Dst, JITTargetAddress Src,
uint64_t Size) {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
- return callST<ReadMem>(Channel, Src, Size);
+ return callB<ReadMem>(Src, Size);
}
- Error registerEHFrames(TargetAddress &RAddr, uint32_t Size) {
- return callST<RegisterEHFrames>(Channel, RAddr, Size);
+ Error registerEHFrames(JITTargetAddress &RAddr, uint32_t Size) {
+ return callB<RegisterEHFrames>(RAddr, Size);
}
- Expected<TargetAddress> reserveMem(ResourceIdMgr::ResourceId Id,
- uint64_t Size, uint32_t Align) {
+ Expected<JITTargetAddress> reserveMem(ResourceIdMgr::ResourceId Id,
+ uint64_t Size, uint32_t Align) {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
- return callST<ReserveMem>(Channel, Id, Size, Align);
+ return callB<ReserveMem>(Id, Size, Align);
}
Error setProtections(ResourceIdMgr::ResourceId Id,
- TargetAddress RemoteSegAddr, unsigned ProtFlags) {
- return callST<SetProtections>(Channel, Id, RemoteSegAddr, ProtFlags);
+ JITTargetAddress RemoteSegAddr, unsigned ProtFlags) {
+ return callB<SetProtections>(Id, RemoteSegAddr, ProtFlags);
}
- Error writeMem(TargetAddress Addr, const char *Src, uint64_t Size) {
+ Error writeMem(JITTargetAddress Addr, const char *Src, uint64_t Size) {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
- return callST<WriteMem>(Channel, DirectBufferWriter(Src, Addr, Size));
+ return callB<WriteMem>(DirectBufferWriter(Src, Addr, Size));
}
- Error writePointer(TargetAddress Addr, TargetAddress PtrVal) {
+ Error writePointer(JITTargetAddress Addr, JITTargetAddress PtrVal) {
// Check for an 'out-of-band' error, e.g. from an MM destructor.
if (ExistingError)
return std::move(ExistingError);
- return callST<WritePtr>(Channel, Addr, PtrVal);
+ return callB<WritePtr>(Addr, PtrVal);
}
static Error doNothing() { return Error::success(); }
- ChannelT &Channel;
- Error ExistingError;
+ Error ExistingError = Error::success();
std::string RemoteTargetTriple;
uint32_t RemotePointerSize = 0;
uint32_t RemotePageSize = 0;
diff --git a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
index 74d851522f79..ab2b0fad89fd 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
@@ -16,9 +16,9 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
-#include "JITSymbol.h"
-#include "RPCChannel.h"
#include "RPCUtils.h"
+#include "RawByteChannel.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
namespace llvm {
namespace orc {
@@ -27,42 +27,63 @@ namespace remote {
class DirectBufferWriter {
public:
DirectBufferWriter() = default;
- DirectBufferWriter(const char *Src, TargetAddress Dst, uint64_t Size)
+ DirectBufferWriter(const char *Src, JITTargetAddress Dst, uint64_t Size)
: Src(Src), Dst(Dst), Size(Size) {}
const char *getSrc() const { return Src; }
- TargetAddress getDst() const { return Dst; }
+ JITTargetAddress getDst() const { return Dst; }
uint64_t getSize() const { return Size; }
private:
const char *Src;
- TargetAddress Dst;
+ JITTargetAddress Dst;
uint64_t Size;
};
-inline Error serialize(RPCChannel &C, const DirectBufferWriter &DBW) {
- if (auto EC = serialize(C, DBW.getDst()))
- return EC;
- if (auto EC = serialize(C, DBW.getSize()))
- return EC;
- return C.appendBytes(DBW.getSrc(), DBW.getSize());
-}
-
-inline Error deserialize(RPCChannel &C, DirectBufferWriter &DBW) {
- TargetAddress Dst;
- if (auto EC = deserialize(C, Dst))
- return EC;
- uint64_t Size;
- if (auto EC = deserialize(C, Size))
- return EC;
- char *Addr = reinterpret_cast<char *>(static_cast<uintptr_t>(Dst));
+} // end namespace remote
+
+namespace rpc {
+
+template <> class RPCTypeName<remote::DirectBufferWriter> {
+public:
+ static const char *getName() { return "DirectBufferWriter"; }
+};
- DBW = DirectBufferWriter(0, Dst, Size);
+template <typename ChannelT>
+class SerializationTraits<
+ ChannelT, remote::DirectBufferWriter, remote::DirectBufferWriter,
+ typename std::enable_if<
+ std::is_base_of<RawByteChannel, ChannelT>::value>::type> {
+public:
+ static Error serialize(ChannelT &C, const remote::DirectBufferWriter &DBW) {
+ if (auto EC = serializeSeq(C, DBW.getDst()))
+ return EC;
+ if (auto EC = serializeSeq(C, DBW.getSize()))
+ return EC;
+ return C.appendBytes(DBW.getSrc(), DBW.getSize());
+ }
+
+ static Error deserialize(ChannelT &C, remote::DirectBufferWriter &DBW) {
+ JITTargetAddress Dst;
+ if (auto EC = deserializeSeq(C, Dst))
+ return EC;
+ uint64_t Size;
+ if (auto EC = deserializeSeq(C, Size))
+ return EC;
+ char *Addr = reinterpret_cast<char *>(static_cast<uintptr_t>(Dst));
+
+ DBW = remote::DirectBufferWriter(0, Dst, Size);
+
+ return C.readBytes(Addr, Size);
+ }
+};
- return C.readBytes(Addr, Size);
-}
+} // end namespace rpc
-class OrcRemoteTargetRPCAPI : public RPC<RPCChannel> {
+namespace remote {
+
+class OrcRemoteTargetRPCAPI
+ : public rpc::SingleThreadedRPC<rpc::RawByteChannel> {
protected:
class ResourceIdMgr {
public:
@@ -86,117 +107,157 @@ protected:
public:
// FIXME: Remove constructors once MSVC supports synthesizing move-ops.
- OrcRemoteTargetRPCAPI() = default;
- OrcRemoteTargetRPCAPI(const OrcRemoteTargetRPCAPI &) = delete;
- OrcRemoteTargetRPCAPI &operator=(const OrcRemoteTargetRPCAPI &) = delete;
-
- OrcRemoteTargetRPCAPI(OrcRemoteTargetRPCAPI &&) {}
- OrcRemoteTargetRPCAPI &operator=(OrcRemoteTargetRPCAPI &&) { return *this; }
-
- enum JITFuncId : uint32_t {
- InvalidId = RPCFunctionIdTraits<JITFuncId>::InvalidId,
- CallIntVoidId = RPCFunctionIdTraits<JITFuncId>::FirstValidId,
- CallMainId,
- CallVoidVoidId,
- CreateRemoteAllocatorId,
- CreateIndirectStubsOwnerId,
- DeregisterEHFramesId,
- DestroyRemoteAllocatorId,
- DestroyIndirectStubsOwnerId,
- EmitIndirectStubsId,
- EmitResolverBlockId,
- EmitTrampolineBlockId,
- GetSymbolAddressId,
- GetRemoteInfoId,
- ReadMemId,
- RegisterEHFramesId,
- ReserveMemId,
- RequestCompileId,
- SetProtectionsId,
- TerminateSessionId,
- WriteMemId,
- WritePtrId
- };
-
- static const char *getJITFuncIdName(JITFuncId Id);
-
- typedef Function<CallIntVoidId, int32_t(TargetAddress Addr)> CallIntVoid;
-
- typedef Function<CallMainId,
- int32_t(TargetAddress Addr, std::vector<std::string> Args)>
- CallMain;
-
- typedef Function<CallVoidVoidId, void(TargetAddress FnAddr)> CallVoidVoid;
-
- typedef Function<CreateRemoteAllocatorId,
- void(ResourceIdMgr::ResourceId AllocatorID)>
- CreateRemoteAllocator;
-
- typedef Function<CreateIndirectStubsOwnerId,
- void(ResourceIdMgr::ResourceId StubOwnerID)>
- CreateIndirectStubsOwner;
-
- typedef Function<DeregisterEHFramesId,
- void(TargetAddress Addr, uint32_t Size)>
- DeregisterEHFrames;
-
- typedef Function<DestroyRemoteAllocatorId,
- void(ResourceIdMgr::ResourceId AllocatorID)>
- DestroyRemoteAllocator;
-
- typedef Function<DestroyIndirectStubsOwnerId,
- void(ResourceIdMgr::ResourceId StubsOwnerID)>
- DestroyIndirectStubsOwner;
+ OrcRemoteTargetRPCAPI(rpc::RawByteChannel &C)
+ : rpc::SingleThreadedRPC<rpc::RawByteChannel>(C, true) {}
+
+ class CallIntVoid
+ : public rpc::Function<CallIntVoid, int32_t(JITTargetAddress Addr)> {
+ public:
+ static const char *getName() { return "CallIntVoid"; }
+ };
+
+ class CallMain
+ : public rpc::Function<CallMain, int32_t(JITTargetAddress Addr,
+ std::vector<std::string> Args)> {
+ public:
+ static const char *getName() { return "CallMain"; }
+ };
+
+ class CallVoidVoid
+ : public rpc::Function<CallVoidVoid, void(JITTargetAddress FnAddr)> {
+ public:
+ static const char *getName() { return "CallVoidVoid"; }
+ };
+
+ class CreateRemoteAllocator
+ : public rpc::Function<CreateRemoteAllocator,
+ void(ResourceIdMgr::ResourceId AllocatorID)> {
+ public:
+ static const char *getName() { return "CreateRemoteAllocator"; }
+ };
+
+ class CreateIndirectStubsOwner
+ : public rpc::Function<CreateIndirectStubsOwner,
+ void(ResourceIdMgr::ResourceId StubOwnerID)> {
+ public:
+ static const char *getName() { return "CreateIndirectStubsOwner"; }
+ };
+
+ class DeregisterEHFrames
+ : public rpc::Function<DeregisterEHFrames,
+ void(JITTargetAddress Addr, uint32_t Size)> {
+ public:
+ static const char *getName() { return "DeregisterEHFrames"; }
+ };
+
+ class DestroyRemoteAllocator
+ : public rpc::Function<DestroyRemoteAllocator,
+ void(ResourceIdMgr::ResourceId AllocatorID)> {
+ public:
+ static const char *getName() { return "DestroyRemoteAllocator"; }
+ };
+
+ class DestroyIndirectStubsOwner
+ : public rpc::Function<DestroyIndirectStubsOwner,
+ void(ResourceIdMgr::ResourceId StubsOwnerID)> {
+ public:
+ static const char *getName() { return "DestroyIndirectStubsOwner"; }
+ };
/// EmitIndirectStubs result is (StubsBase, PtrsBase, NumStubsEmitted).
- typedef Function<EmitIndirectStubsId,
- std::tuple<TargetAddress, TargetAddress, uint32_t>(
- ResourceIdMgr::ResourceId StubsOwnerID,
- uint32_t NumStubsRequired)>
- EmitIndirectStubs;
+ class EmitIndirectStubs
+ : public rpc::Function<
+ EmitIndirectStubs,
+ std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>(
+ ResourceIdMgr::ResourceId StubsOwnerID,
+ uint32_t NumStubsRequired)> {
+ public:
+ static const char *getName() { return "EmitIndirectStubs"; }
+ };
- typedef Function<EmitResolverBlockId, void()> EmitResolverBlock;
+ class EmitResolverBlock : public rpc::Function<EmitResolverBlock, void()> {
+ public:
+ static const char *getName() { return "EmitResolverBlock"; }
+ };
/// EmitTrampolineBlock result is (BlockAddr, NumTrampolines).
- typedef Function<EmitTrampolineBlockId, std::tuple<TargetAddress, uint32_t>()>
- EmitTrampolineBlock;
+ class EmitTrampolineBlock
+ : public rpc::Function<EmitTrampolineBlock,
+ std::tuple<JITTargetAddress, uint32_t>()> {
+ public:
+ static const char *getName() { return "EmitTrampolineBlock"; }
+ };
- typedef Function<GetSymbolAddressId, TargetAddress(std::string SymbolName)>
- GetSymbolAddress;
+ class GetSymbolAddress
+ : public rpc::Function<GetSymbolAddress,
+ JITTargetAddress(std::string SymbolName)> {
+ public:
+ static const char *getName() { return "GetSymbolAddress"; }
+ };
/// GetRemoteInfo result is (Triple, PointerSize, PageSize, TrampolineSize,
/// IndirectStubsSize).
- typedef Function<GetRemoteInfoId, std::tuple<std::string, uint32_t, uint32_t,
- uint32_t, uint32_t>()>
- GetRemoteInfo;
+ class GetRemoteInfo
+ : public rpc::Function<
+ GetRemoteInfo,
+ std::tuple<std::string, uint32_t, uint32_t, uint32_t, uint32_t>()> {
+ public:
+ static const char *getName() { return "GetRemoteInfo"; }
+ };
- typedef Function<ReadMemId,
- std::vector<char>(TargetAddress Src, uint64_t Size)>
- ReadMem;
+ class ReadMem
+ : public rpc::Function<ReadMem, std::vector<uint8_t>(JITTargetAddress Src,
+ uint64_t Size)> {
+ public:
+ static const char *getName() { return "ReadMem"; }
+ };
- typedef Function<RegisterEHFramesId, void(TargetAddress Addr, uint32_t Size)>
- RegisterEHFrames;
+ class RegisterEHFrames
+ : public rpc::Function<RegisterEHFrames,
+ void(JITTargetAddress Addr, uint32_t Size)> {
+ public:
+ static const char *getName() { return "RegisterEHFrames"; }
+ };
- typedef Function<ReserveMemId,
- TargetAddress(ResourceIdMgr::ResourceId AllocID,
- uint64_t Size, uint32_t Align)>
- ReserveMem;
+ class ReserveMem
+ : public rpc::Function<ReserveMem,
+ JITTargetAddress(ResourceIdMgr::ResourceId AllocID,
+ uint64_t Size, uint32_t Align)> {
+ public:
+ static const char *getName() { return "ReserveMem"; }
+ };
- typedef Function<RequestCompileId,
- TargetAddress(TargetAddress TrampolineAddr)>
- RequestCompile;
+ class RequestCompile
+ : public rpc::Function<
+ RequestCompile, JITTargetAddress(JITTargetAddress TrampolineAddr)> {
+ public:
+ static const char *getName() { return "RequestCompile"; }
+ };
- typedef Function<SetProtectionsId,
- void(ResourceIdMgr::ResourceId AllocID, TargetAddress Dst,
- uint32_t ProtFlags)>
- SetProtections;
+ class SetProtections
+ : public rpc::Function<SetProtections,
+ void(ResourceIdMgr::ResourceId AllocID,
+ JITTargetAddress Dst, uint32_t ProtFlags)> {
+ public:
+ static const char *getName() { return "SetProtections"; }
+ };
- typedef Function<TerminateSessionId, void()> TerminateSession;
+ class TerminateSession : public rpc::Function<TerminateSession, void()> {
+ public:
+ static const char *getName() { return "TerminateSession"; }
+ };
- typedef Function<WriteMemId, void(DirectBufferWriter DB)> WriteMem;
+ class WriteMem
+ : public rpc::Function<WriteMem, void(remote::DirectBufferWriter DB)> {
+ public:
+ static const char *getName() { return "WriteMem"; }
+ };
- typedef Function<WritePtrId, void(TargetAddress Dst, TargetAddress Val)>
- WritePtr;
+ class WritePtr : public rpc::Function<WritePtr, void(JITTargetAddress Dst,
+ JITTargetAddress Val)> {
+ public:
+ static const char *getName() { return "WritePtr"; }
+ };
};
} // end namespace remote
diff --git a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
index bf4299c69b24..506330fe3a5e 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
@@ -16,12 +16,28 @@
#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
#include "OrcRemoteTargetRPCAPI.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Memory.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <type_traits>
+#include <vector>
#define DEBUG_TYPE "orc-remote"
@@ -32,7 +48,7 @@ namespace remote {
template <typename ChannelT, typename TargetT>
class OrcRemoteTargetServer : public OrcRemoteTargetRPCAPI {
public:
- typedef std::function<TargetAddress(const std::string &Name)>
+ typedef std::function<JITTargetAddress(const std::string &Name)>
SymbolLookupFtor;
typedef std::function<void(uint8_t *Addr, uint32_t Size)>
@@ -41,94 +57,50 @@ public:
OrcRemoteTargetServer(ChannelT &Channel, SymbolLookupFtor SymbolLookup,
EHFrameRegistrationFtor EHFramesRegister,
EHFrameRegistrationFtor EHFramesDeregister)
- : Channel(Channel), SymbolLookup(std::move(SymbolLookup)),
+ : OrcRemoteTargetRPCAPI(Channel), SymbolLookup(std::move(SymbolLookup)),
EHFramesRegister(std::move(EHFramesRegister)),
- EHFramesDeregister(std::move(EHFramesDeregister)) {}
+ EHFramesDeregister(std::move(EHFramesDeregister)),
+ TerminateFlag(false) {
+
+ using ThisT = typename std::remove_reference<decltype(*this)>::type;
+ addHandler<CallIntVoid>(*this, &ThisT::handleCallIntVoid);
+ addHandler<CallMain>(*this, &ThisT::handleCallMain);
+ addHandler<CallVoidVoid>(*this, &ThisT::handleCallVoidVoid);
+ addHandler<CreateRemoteAllocator>(*this,
+ &ThisT::handleCreateRemoteAllocator);
+ addHandler<CreateIndirectStubsOwner>(
+ *this, &ThisT::handleCreateIndirectStubsOwner);
+ addHandler<DeregisterEHFrames>(*this, &ThisT::handleDeregisterEHFrames);
+ addHandler<DestroyRemoteAllocator>(*this,
+ &ThisT::handleDestroyRemoteAllocator);
+ addHandler<DestroyIndirectStubsOwner>(
+ *this, &ThisT::handleDestroyIndirectStubsOwner);
+ addHandler<EmitIndirectStubs>(*this, &ThisT::handleEmitIndirectStubs);
+ addHandler<EmitResolverBlock>(*this, &ThisT::handleEmitResolverBlock);
+ addHandler<EmitTrampolineBlock>(*this, &ThisT::handleEmitTrampolineBlock);
+ addHandler<GetSymbolAddress>(*this, &ThisT::handleGetSymbolAddress);
+ addHandler<GetRemoteInfo>(*this, &ThisT::handleGetRemoteInfo);
+ addHandler<ReadMem>(*this, &ThisT::handleReadMem);
+ addHandler<RegisterEHFrames>(*this, &ThisT::handleRegisterEHFrames);
+ addHandler<ReserveMem>(*this, &ThisT::handleReserveMem);
+ addHandler<SetProtections>(*this, &ThisT::handleSetProtections);
+ addHandler<TerminateSession>(*this, &ThisT::handleTerminateSession);
+ addHandler<WriteMem>(*this, &ThisT::handleWriteMem);
+ addHandler<WritePtr>(*this, &ThisT::handleWritePtr);
+ }
// FIXME: Remove move/copy ops once MSVC supports synthesizing move ops.
OrcRemoteTargetServer(const OrcRemoteTargetServer &) = delete;
OrcRemoteTargetServer &operator=(const OrcRemoteTargetServer &) = delete;
- OrcRemoteTargetServer(OrcRemoteTargetServer &&Other)
- : Channel(Other.Channel), SymbolLookup(std::move(Other.SymbolLookup)),
- EHFramesRegister(std::move(Other.EHFramesRegister)),
- EHFramesDeregister(std::move(Other.EHFramesDeregister)) {}
-
+ OrcRemoteTargetServer(OrcRemoteTargetServer &&Other) = default;
OrcRemoteTargetServer &operator=(OrcRemoteTargetServer &&) = delete;
- Error handleKnownFunction(JITFuncId Id) {
- typedef OrcRemoteTargetServer ThisT;
-
- DEBUG(dbgs() << "Handling known proc: " << getJITFuncIdName(Id) << "\n");
-
- switch (Id) {
- case CallIntVoidId:
- return handle<CallIntVoid>(Channel, *this, &ThisT::handleCallIntVoid);
- case CallMainId:
- return handle<CallMain>(Channel, *this, &ThisT::handleCallMain);
- case CallVoidVoidId:
- return handle<CallVoidVoid>(Channel, *this, &ThisT::handleCallVoidVoid);
- case CreateRemoteAllocatorId:
- return handle<CreateRemoteAllocator>(Channel, *this,
- &ThisT::handleCreateRemoteAllocator);
- case CreateIndirectStubsOwnerId:
- return handle<CreateIndirectStubsOwner>(
- Channel, *this, &ThisT::handleCreateIndirectStubsOwner);
- case DeregisterEHFramesId:
- return handle<DeregisterEHFrames>(Channel, *this,
- &ThisT::handleDeregisterEHFrames);
- case DestroyRemoteAllocatorId:
- return handle<DestroyRemoteAllocator>(
- Channel, *this, &ThisT::handleDestroyRemoteAllocator);
- case DestroyIndirectStubsOwnerId:
- return handle<DestroyIndirectStubsOwner>(
- Channel, *this, &ThisT::handleDestroyIndirectStubsOwner);
- case EmitIndirectStubsId:
- return handle<EmitIndirectStubs>(Channel, *this,
- &ThisT::handleEmitIndirectStubs);
- case EmitResolverBlockId:
- return handle<EmitResolverBlock>(Channel, *this,
- &ThisT::handleEmitResolverBlock);
- case EmitTrampolineBlockId:
- return handle<EmitTrampolineBlock>(Channel, *this,
- &ThisT::handleEmitTrampolineBlock);
- case GetSymbolAddressId:
- return handle<GetSymbolAddress>(Channel, *this,
- &ThisT::handleGetSymbolAddress);
- case GetRemoteInfoId:
- return handle<GetRemoteInfo>(Channel, *this, &ThisT::handleGetRemoteInfo);
- case ReadMemId:
- return handle<ReadMem>(Channel, *this, &ThisT::handleReadMem);
- case RegisterEHFramesId:
- return handle<RegisterEHFrames>(Channel, *this,
- &ThisT::handleRegisterEHFrames);
- case ReserveMemId:
- return handle<ReserveMem>(Channel, *this, &ThisT::handleReserveMem);
- case SetProtectionsId:
- return handle<SetProtections>(Channel, *this,
- &ThisT::handleSetProtections);
- case WriteMemId:
- return handle<WriteMem>(Channel, *this, &ThisT::handleWriteMem);
- case WritePtrId:
- return handle<WritePtr>(Channel, *this, &ThisT::handleWritePtr);
- default:
- return orcError(OrcErrorCode::UnexpectedRPCCall);
- }
-
- llvm_unreachable("Unhandled JIT RPC procedure Id.");
+ Expected<JITTargetAddress> requestCompile(JITTargetAddress TrampolineAddr) {
+ return callB<RequestCompile>(TrampolineAddr);
}
- Expected<TargetAddress> requestCompile(TargetAddress TrampolineAddr) {
- auto Listen = [&](RPCChannel &C, uint32_t Id) {
- return handleKnownFunction(static_cast<JITFuncId>(Id));
- };
-
- return callSTHandling<RequestCompile>(Channel, Listen, TrampolineAddr);
- }
-
- Error handleTerminateSession() {
- return handle<TerminateSession>(Channel, []() { return Error::success(); });
- }
+ bool receivedTerminate() const { return TerminateFlag; }
private:
struct Allocator {
@@ -171,16 +143,16 @@ private:
static Error doNothing() { return Error::success(); }
- static TargetAddress reenter(void *JITTargetAddr, void *TrampolineAddr) {
+ static JITTargetAddress reenter(void *JITTargetAddr, void *TrampolineAddr) {
auto T = static_cast<OrcRemoteTargetServer *>(JITTargetAddr);
- auto AddrOrErr = T->requestCompile(static_cast<TargetAddress>(
+ auto AddrOrErr = T->requestCompile(static_cast<JITTargetAddress>(
reinterpret_cast<uintptr_t>(TrampolineAddr)));
// FIXME: Allow customizable failure substitution functions.
assert(AddrOrErr && "Compile request failed");
return *AddrOrErr;
}
- Expected<int32_t> handleCallIntVoid(TargetAddress Addr) {
+ Expected<int32_t> handleCallIntVoid(JITTargetAddress Addr) {
typedef int (*IntVoidFnTy)();
IntVoidFnTy Fn =
reinterpret_cast<IntVoidFnTy>(static_cast<uintptr_t>(Addr));
@@ -192,7 +164,7 @@ private:
return Result;
}
- Expected<int32_t> handleCallMain(TargetAddress Addr,
+ Expected<int32_t> handleCallMain(JITTargetAddress Addr,
std::vector<std::string> Args) {
typedef int (*MainFnTy)(int, const char *[]);
@@ -211,7 +183,7 @@ private:
return Result;
}
- Error handleCallVoidVoid(TargetAddress Addr) {
+ Error handleCallVoidVoid(JITTargetAddress Addr) {
typedef void (*VoidVoidFnTy)();
VoidVoidFnTy Fn =
reinterpret_cast<VoidVoidFnTy>(static_cast<uintptr_t>(Addr));
@@ -241,7 +213,7 @@ private:
return Error::success();
}
- Error handleDeregisterEHFrames(TargetAddress TAddr, uint32_t Size) {
+ Error handleDeregisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
DEBUG(dbgs() << " Registering EH frames at " << format("0x%016x", TAddr)
<< ", Size = " << Size << " bytes\n");
@@ -266,7 +238,7 @@ private:
return Error::success();
}
- Expected<std::tuple<TargetAddress, TargetAddress, uint32_t>>
+ Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
handleEmitIndirectStubs(ResourceIdMgr::ResourceId Id,
uint32_t NumStubsRequired) {
DEBUG(dbgs() << " ISMgr " << Id << " request " << NumStubsRequired
@@ -281,10 +253,10 @@ private:
TargetT::emitIndirectStubsBlock(IS, NumStubsRequired, nullptr))
return std::move(Err);
- TargetAddress StubsBase =
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(IS.getStub(0)));
- TargetAddress PtrsBase =
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(IS.getPtr(0)));
+ JITTargetAddress StubsBase = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(IS.getStub(0)));
+ JITTargetAddress PtrsBase = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(IS.getPtr(0)));
uint32_t NumStubsEmitted = IS.getNumStubs();
auto &BlockList = StubOwnerItr->second;
@@ -309,7 +281,7 @@ private:
sys::Memory::MF_READ | sys::Memory::MF_EXEC));
}
- Expected<std::tuple<TargetAddress, uint32_t>> handleEmitTrampolineBlock() {
+ Expected<std::tuple<JITTargetAddress, uint32_t>> handleEmitTrampolineBlock() {
std::error_code EC;
auto TrampolineBlock =
sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
@@ -332,14 +304,14 @@ private:
TrampolineBlocks.push_back(std::move(TrampolineBlock));
- auto TrampolineBaseAddr =
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(TrampolineMem));
+ auto TrampolineBaseAddr = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineMem));
return std::make_tuple(TrampolineBaseAddr, NumTrampolines);
}
- Expected<TargetAddress> handleGetSymbolAddress(const std::string &Name) {
- TargetAddress Addr = SymbolLookup(Name);
+ Expected<JITTargetAddress> handleGetSymbolAddress(const std::string &Name) {
+ JITTargetAddress Addr = SymbolLookup(Name);
DEBUG(dbgs() << " Symbol '" << Name << "' = " << format("0x%016x", Addr)
<< "\n");
return Addr;
@@ -362,21 +334,22 @@ private:
IndirectStubSize);
}
- Expected<std::vector<char>> handleReadMem(TargetAddress RSrc, uint64_t Size) {
- char *Src = reinterpret_cast<char *>(static_cast<uintptr_t>(RSrc));
+ Expected<std::vector<uint8_t>> handleReadMem(JITTargetAddress RSrc,
+ uint64_t Size) {
+ uint8_t *Src = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(RSrc));
DEBUG(dbgs() << " Reading " << Size << " bytes from "
<< format("0x%016x", RSrc) << "\n");
- std::vector<char> Buffer;
+ std::vector<uint8_t> Buffer;
Buffer.resize(Size);
- for (char *P = Src; Size != 0; --Size)
+ for (uint8_t *P = Src; Size != 0; --Size)
Buffer.push_back(*P++);
return Buffer;
}
- Error handleRegisterEHFrames(TargetAddress TAddr, uint32_t Size) {
+ Error handleRegisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
DEBUG(dbgs() << " Registering EH frames at " << format("0x%016x", TAddr)
<< ", Size = " << Size << " bytes\n");
@@ -384,8 +357,8 @@ private:
return Error::success();
}
- Expected<TargetAddress> handleReserveMem(ResourceIdMgr::ResourceId Id,
- uint64_t Size, uint32_t Align) {
+ Expected<JITTargetAddress> handleReserveMem(ResourceIdMgr::ResourceId Id,
+ uint64_t Size, uint32_t Align) {
auto I = Allocators.find(Id);
if (I == Allocators.end())
return orcError(OrcErrorCode::RemoteAllocatorDoesNotExist);
@@ -397,14 +370,14 @@ private:
DEBUG(dbgs() << " Allocator " << Id << " reserved " << LocalAllocAddr
<< " (" << Size << " bytes, alignment " << Align << ")\n");
- TargetAddress AllocAddr =
- static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(LocalAllocAddr));
+ JITTargetAddress AllocAddr = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(LocalAllocAddr));
return AllocAddr;
}
- Error handleSetProtections(ResourceIdMgr::ResourceId Id, TargetAddress Addr,
- uint32_t Flags) {
+ Error handleSetProtections(ResourceIdMgr::ResourceId Id,
+ JITTargetAddress Addr, uint32_t Flags) {
auto I = Allocators.find(Id);
if (I == Allocators.end())
return orcError(OrcErrorCode::RemoteAllocatorDoesNotExist);
@@ -417,13 +390,18 @@ private:
return Allocator.setProtections(LocalAddr, Flags);
}
+ Error handleTerminateSession() {
+ TerminateFlag = true;
+ return Error::success();
+ }
+
Error handleWriteMem(DirectBufferWriter DBW) {
DEBUG(dbgs() << " Writing " << DBW.getSize() << " bytes to "
<< format("0x%016x", DBW.getDst()) << "\n");
return Error::success();
}
- Error handleWritePtr(TargetAddress Addr, TargetAddress PtrVal) {
+ Error handleWritePtr(JITTargetAddress Addr, JITTargetAddress PtrVal) {
DEBUG(dbgs() << " Writing pointer *" << format("0x%016x", Addr) << " = "
<< format("0x%016x", PtrVal) << "\n");
uintptr_t *Ptr =
@@ -432,7 +410,6 @@ private:
return Error::success();
}
- ChannelT &Channel;
SymbolLookupFtor SymbolLookup;
EHFrameRegistrationFtor EHFramesRegister, EHFramesDeregister;
std::map<ResourceIdMgr::ResourceId, Allocator> Allocators;
@@ -440,6 +417,7 @@ private:
std::map<ResourceIdMgr::ResourceId, ISBlockOwnerList> IndirectStubsOwners;
sys::OwningMemoryBlock ResolverBlock;
std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+ bool TerminateFlag;
};
} // end namespace remote
diff --git a/include/llvm/ExecutionEngine/Orc/RPCChannel.h b/include/llvm/ExecutionEngine/Orc/RPCChannel.h
deleted file mode 100644
index c569e3cf05b4..000000000000
--- a/include/llvm/ExecutionEngine/Orc/RPCChannel.h
+++ /dev/null
@@ -1,249 +0,0 @@
-//===- llvm/ExecutionEngine/Orc/RPCChannel.h --------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_EXECUTIONENGINE_ORC_RPCCHANNEL_H
-#define LLVM_EXECUTIONENGINE_ORC_RPCCHANNEL_H
-
-#include "OrcError.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/Error.h"
-#include <cstddef>
-#include <cstdint>
-#include <mutex>
-#include <string>
-#include <tuple>
-#include <vector>
-
-namespace llvm {
-namespace orc {
-namespace remote {
-
-/// Interface for byte-streams to be used with RPC.
-class RPCChannel {
-public:
- virtual ~RPCChannel() {}
-
- /// Read Size bytes from the stream into *Dst.
- virtual Error readBytes(char *Dst, unsigned Size) = 0;
-
- /// Read size bytes from *Src and append them to the stream.
- virtual Error appendBytes(const char *Src, unsigned Size) = 0;
-
- /// Flush the stream if possible.
- virtual Error send() = 0;
-
- /// Get the lock for stream reading.
- std::mutex &getReadLock() { return readLock; }
-
- /// Get the lock for stream writing.
- std::mutex &getWriteLock() { return writeLock; }
-
-private:
- std::mutex readLock, writeLock;
-};
-
-/// Notify the channel that we're starting a message send.
-/// Locks the channel for writing.
-inline Error startSendMessage(RPCChannel &C) {
- C.getWriteLock().lock();
- return Error::success();
-}
-
-/// Notify the channel that we're ending a message send.
-/// Unlocks the channel for writing.
-inline Error endSendMessage(RPCChannel &C) {
- C.getWriteLock().unlock();
- return Error::success();
-}
-
-/// Notify the channel that we're starting a message receive.
-/// Locks the channel for reading.
-inline Error startReceiveMessage(RPCChannel &C) {
- C.getReadLock().lock();
- return Error::success();
-}
-
-/// Notify the channel that we're ending a message receive.
-/// Unlocks the channel for reading.
-inline Error endReceiveMessage(RPCChannel &C) {
- C.getReadLock().unlock();
- return Error::success();
-}
-
-/// RPC channel serialization for a variadic list of arguments.
-template <typename T, typename... Ts>
-Error serializeSeq(RPCChannel &C, const T &Arg, const Ts &... Args) {
- if (auto Err = serialize(C, Arg))
- return Err;
- return serializeSeq(C, Args...);
-}
-
-/// RPC channel serialization for an (empty) variadic list of arguments.
-inline Error serializeSeq(RPCChannel &C) { return Error::success(); }
-
-/// RPC channel deserialization for a variadic list of arguments.
-template <typename T, typename... Ts>
-Error deserializeSeq(RPCChannel &C, T &Arg, Ts &... Args) {
- if (auto Err = deserialize(C, Arg))
- return Err;
- return deserializeSeq(C, Args...);
-}
-
-/// RPC channel serialization for an (empty) variadic list of arguments.
-inline Error deserializeSeq(RPCChannel &C) { return Error::success(); }
-
-/// RPC channel serialization for integer primitives.
-template <typename T>
-typename std::enable_if<
- std::is_same<T, uint64_t>::value || std::is_same<T, int64_t>::value ||
- std::is_same<T, uint32_t>::value || std::is_same<T, int32_t>::value ||
- std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
- std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value,
- Error>::type
-serialize(RPCChannel &C, T V) {
- support::endian::byte_swap<T, support::big>(V);
- return C.appendBytes(reinterpret_cast<const char *>(&V), sizeof(T));
-}
-
-/// RPC channel deserialization for integer primitives.
-template <typename T>
-typename std::enable_if<
- std::is_same<T, uint64_t>::value || std::is_same<T, int64_t>::value ||
- std::is_same<T, uint32_t>::value || std::is_same<T, int32_t>::value ||
- std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
- std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value,
- Error>::type
-deserialize(RPCChannel &C, T &V) {
- if (auto Err = C.readBytes(reinterpret_cast<char *>(&V), sizeof(T)))
- return Err;
- support::endian::byte_swap<T, support::big>(V);
- return Error::success();
-}
-
-/// RPC channel serialization for enums.
-template <typename T>
-typename std::enable_if<std::is_enum<T>::value, Error>::type
-serialize(RPCChannel &C, T V) {
- return serialize(C, static_cast<typename std::underlying_type<T>::type>(V));
-}
-
-/// RPC channel deserialization for enums.
-template <typename T>
-typename std::enable_if<std::is_enum<T>::value, Error>::type
-deserialize(RPCChannel &C, T &V) {
- typename std::underlying_type<T>::type Tmp;
- Error Err = deserialize(C, Tmp);
- V = static_cast<T>(Tmp);
- return Err;
-}
-
-/// RPC channel serialization for bools.
-inline Error serialize(RPCChannel &C, bool V) {
- uint8_t VN = V ? 1 : 0;
- return C.appendBytes(reinterpret_cast<const char *>(&VN), 1);
-}
-
-/// RPC channel deserialization for bools.
-inline Error deserialize(RPCChannel &C, bool &V) {
- uint8_t VN = 0;
- if (auto Err = C.readBytes(reinterpret_cast<char *>(&VN), 1))
- return Err;
-
- V = (VN != 0);
- return Error::success();
-}
-
-/// RPC channel serialization for StringRefs.
-/// Note: There is no corresponding deseralization for this, as StringRef
-/// doesn't own its memory and so can't hold the deserialized data.
-inline Error serialize(RPCChannel &C, StringRef S) {
- if (auto Err = serialize(C, static_cast<uint64_t>(S.size())))
- return Err;
- return C.appendBytes((const char *)S.bytes_begin(), S.size());
-}
-
-/// RPC channel serialization for std::strings.
-inline Error serialize(RPCChannel &C, const std::string &S) {
- return serialize(C, StringRef(S));
-}
-
-/// RPC channel deserialization for std::strings.
-inline Error deserialize(RPCChannel &C, std::string &S) {
- uint64_t Count;
- if (auto Err = deserialize(C, Count))
- return Err;
- S.resize(Count);
- return C.readBytes(&S[0], Count);
-}
-
-// Serialization helper for std::tuple.
-template <typename TupleT, size_t... Is>
-inline Error serializeTupleHelper(RPCChannel &C, const TupleT &V,
- llvm::index_sequence<Is...> _) {
- return serializeSeq(C, std::get<Is>(V)...);
-}
-
-/// RPC channel serialization for std::tuple.
-template <typename... ArgTs>
-inline Error serialize(RPCChannel &C, const std::tuple<ArgTs...> &V) {
- return serializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
-}
-
-// Serialization helper for std::tuple.
-template <typename TupleT, size_t... Is>
-inline Error deserializeTupleHelper(RPCChannel &C, TupleT &V,
- llvm::index_sequence<Is...> _) {
- return deserializeSeq(C, std::get<Is>(V)...);
-}
-
-/// RPC channel deserialization for std::tuple.
-template <typename... ArgTs>
-inline Error deserialize(RPCChannel &C, std::tuple<ArgTs...> &V) {
- return deserializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
-}
-
-/// RPC channel serialization for ArrayRef<T>.
-template <typename T> Error serialize(RPCChannel &C, const ArrayRef<T> &A) {
- if (auto Err = serialize(C, static_cast<uint64_t>(A.size())))
- return Err;
-
- for (const auto &E : A)
- if (auto Err = serialize(C, E))
- return Err;
-
- return Error::success();
-}
-
-/// RPC channel serialization for std::array<T>.
-template <typename T> Error serialize(RPCChannel &C, const std::vector<T> &V) {
- return serialize(C, ArrayRef<T>(V));
-}
-
-/// RPC channel deserialization for std::array<T>.
-template <typename T> Error deserialize(RPCChannel &C, std::vector<T> &V) {
- uint64_t Count = 0;
- if (auto Err = deserialize(C, Count))
- return Err;
-
- V.resize(Count);
- for (auto &E : V)
- if (auto Err = deserialize(C, E))
- return Err;
-
- return Error::success();
-}
-
-} // end namespace remote
-} // end namespace orc
-} // end namespace llvm
-
-#endif // LLVM_EXECUTIONENGINE_ORC_RPCCHANNEL_H
diff --git a/include/llvm/ExecutionEngine/Orc/RPCSerialization.h b/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
new file mode 100644
index 000000000000..359a9d81b22b
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
@@ -0,0 +1,373 @@
+//===- llvm/ExecutionEngine/Orc/RPCSerialization.h --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
+#define LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
+
+#include "OrcError.h"
+#include "llvm/Support/thread.h"
+#include <mutex>
+#include <sstream>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+template <typename T>
+class RPCTypeName;
+
+/// TypeNameSequence is a utility for rendering sequences of types to a string
+/// by rendering each type, separated by ", ".
+template <typename... ArgTs> class RPCTypeNameSequence {};
+
+/// Render an empty TypeNameSequence to an ostream.
+template <typename OStream>
+OStream &operator<<(OStream &OS, const RPCTypeNameSequence<> &V) {
+ return OS;
+}
+
+/// Render a TypeNameSequence of a single type to an ostream.
+template <typename OStream, typename ArgT>
+OStream &operator<<(OStream &OS, const RPCTypeNameSequence<ArgT> &V) {
+ OS << RPCTypeName<ArgT>::getName();
+ return OS;
+}
+
+/// Render a TypeNameSequence of more than one type to an ostream.
+template <typename OStream, typename ArgT1, typename ArgT2, typename... ArgTs>
+OStream&
+operator<<(OStream &OS, const RPCTypeNameSequence<ArgT1, ArgT2, ArgTs...> &V) {
+ OS << RPCTypeName<ArgT1>::getName() << ", "
+ << RPCTypeNameSequence<ArgT2, ArgTs...>();
+ return OS;
+}
+
+template <>
+class RPCTypeName<void> {
+public:
+ static const char* getName() { return "void"; }
+};
+
+template <>
+class RPCTypeName<int8_t> {
+public:
+ static const char* getName() { return "int8_t"; }
+};
+
+template <>
+class RPCTypeName<uint8_t> {
+public:
+ static const char* getName() { return "uint8_t"; }
+};
+
+template <>
+class RPCTypeName<int16_t> {
+public:
+ static const char* getName() { return "int16_t"; }
+};
+
+template <>
+class RPCTypeName<uint16_t> {
+public:
+ static const char* getName() { return "uint16_t"; }
+};
+
+template <>
+class RPCTypeName<int32_t> {
+public:
+ static const char* getName() { return "int32_t"; }
+};
+
+template <>
+class RPCTypeName<uint32_t> {
+public:
+ static const char* getName() { return "uint32_t"; }
+};
+
+template <>
+class RPCTypeName<int64_t> {
+public:
+ static const char* getName() { return "int64_t"; }
+};
+
+template <>
+class RPCTypeName<uint64_t> {
+public:
+ static const char* getName() { return "uint64_t"; }
+};
+
+template <>
+class RPCTypeName<bool> {
+public:
+ static const char* getName() { return "bool"; }
+};
+
+template <>
+class RPCTypeName<std::string> {
+public:
+ static const char* getName() { return "std::string"; }
+};
+
+template <typename T1, typename T2>
+class RPCTypeName<std::pair<T1, T2>> {
+public:
+ static const char* getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "std::pair<" << RPCTypeNameSequence<T1, T2>()
+ << ">";
+ return Name.data();
+ }
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename T1, typename T2>
+std::mutex RPCTypeName<std::pair<T1, T2>>::NameMutex;
+template <typename T1, typename T2>
+std::string RPCTypeName<std::pair<T1, T2>>::Name;
+
+template <typename... ArgTs>
+class RPCTypeName<std::tuple<ArgTs...>> {
+public:
+ static const char* getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "std::tuple<"
+ << RPCTypeNameSequence<ArgTs...>() << ">";
+ return Name.data();
+ }
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename... ArgTs>
+std::mutex RPCTypeName<std::tuple<ArgTs...>>::NameMutex;
+template <typename... ArgTs>
+std::string RPCTypeName<std::tuple<ArgTs...>>::Name;
+
+template <typename T>
+class RPCTypeName<std::vector<T>> {
+public:
+ static const char*getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "std::vector<" << RPCTypeName<T>::getName()
+ << ">";
+ return Name.data();
+ }
+
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename T>
+std::mutex RPCTypeName<std::vector<T>>::NameMutex;
+template <typename T>
+std::string RPCTypeName<std::vector<T>>::Name;
+
+
+/// The SerializationTraits<ChannelT, T> class describes how to serialize and
+/// deserialize an instance of type T to/from an abstract channel of type
+/// ChannelT. It also provides a representation of the type's name via the
+/// getName method.
+///
+/// Specializations of this class should provide the following functions:
+///
+/// @code{.cpp}
+///
+/// static const char* getName();
+/// static Error serialize(ChannelT&, const T&);
+/// static Error deserialize(ChannelT&, T&);
+///
+/// @endcode
+///
+/// The third argument of SerializationTraits is intended to support SFINAE.
+/// E.g.:
+///
+/// @code{.cpp}
+///
+/// class MyVirtualChannel { ... };
+///
+/// template <DerivedChannelT>
+/// class SerializationTraits<DerivedChannelT, bool,
+/// typename std::enable_if<
+/// std::is_base_of<VirtChannel, DerivedChannel>::value
+/// >::type> {
+/// public:
+/// static const char* getName() { ... };
+/// }
+///
+/// @endcode
+template <typename ChannelT, typename WireType,
+ typename ConcreteType = WireType, typename = void>
+class SerializationTraits;
+
+template <typename ChannelT>
+class SequenceTraits {
+public:
+ static Error emitSeparator(ChannelT &C) { return Error::success(); }
+ static Error consumeSeparator(ChannelT &C) { return Error::success(); }
+};
+
+/// Utility class for serializing sequences of values of varying types.
+/// Specializations of this class contain 'serialize' and 'deserialize' methods
+/// for the given channel. The ArgTs... list will determine the "over-the-wire"
+/// types to be serialized. The serialize and deserialize methods take a list
+/// CArgTs... ("caller arg types") which must be the same length as ArgTs...,
+/// but may be different types from ArgTs, provided that for each CArgT there
+/// is a SerializationTraits specialization
+/// SerializeTraits<ChannelT, ArgT, CArgT> with methods that can serialize the
+/// caller argument to over-the-wire value.
+template <typename ChannelT, typename... ArgTs>
+class SequenceSerialization;
+
+template <typename ChannelT>
+class SequenceSerialization<ChannelT> {
+public:
+ static Error serialize(ChannelT &C) { return Error::success(); }
+ static Error deserialize(ChannelT &C) { return Error::success(); }
+};
+
+template <typename ChannelT, typename ArgT>
+class SequenceSerialization<ChannelT, ArgT> {
+public:
+
+ template <typename CArgT>
+ static Error serialize(ChannelT &C, const CArgT &CArg) {
+ return SerializationTraits<ChannelT, ArgT, CArgT>::serialize(C, CArg);
+ }
+
+ template <typename CArgT>
+ static Error deserialize(ChannelT &C, CArgT &CArg) {
+ return SerializationTraits<ChannelT, ArgT, CArgT>::deserialize(C, CArg);
+ }
+};
+
+template <typename ChannelT, typename ArgT, typename... ArgTs>
+class SequenceSerialization<ChannelT, ArgT, ArgTs...> {
+public:
+
+ template <typename CArgT, typename... CArgTs>
+ static Error serialize(ChannelT &C, const CArgT &CArg,
+ const CArgTs&... CArgs) {
+ if (auto Err =
+ SerializationTraits<ChannelT, ArgT, CArgT>::serialize(C, CArg))
+ return Err;
+ if (auto Err = SequenceTraits<ChannelT>::emitSeparator(C))
+ return Err;
+ return SequenceSerialization<ChannelT, ArgTs...>::serialize(C, CArgs...);
+ }
+
+ template <typename CArgT, typename... CArgTs>
+ static Error deserialize(ChannelT &C, CArgT &CArg,
+ CArgTs&... CArgs) {
+ if (auto Err =
+ SerializationTraits<ChannelT, ArgT, CArgT>::deserialize(C, CArg))
+ return Err;
+ if (auto Err = SequenceTraits<ChannelT>::consumeSeparator(C))
+ return Err;
+ return SequenceSerialization<ChannelT, ArgTs...>::deserialize(C, CArgs...);
+ }
+};
+
+template <typename ChannelT, typename... ArgTs>
+Error serializeSeq(ChannelT &C, const ArgTs &... Args) {
+ return SequenceSerialization<ChannelT, ArgTs...>::serialize(C, Args...);
+}
+
+template <typename ChannelT, typename... ArgTs>
+Error deserializeSeq(ChannelT &C, ArgTs &... Args) {
+ return SequenceSerialization<ChannelT, ArgTs...>::deserialize(C, Args...);
+}
+
+/// SerializationTraits default specialization for std::pair.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, std::pair<T1, T2>> {
+public:
+ static Error serialize(ChannelT &C, const std::pair<T1, T2> &V) {
+ return serializeSeq(C, V.first, V.second);
+ }
+
+ static Error deserialize(ChannelT &C, std::pair<T1, T2> &V) {
+ return deserializeSeq(C, V.first, V.second);
+ }
+};
+
+/// SerializationTraits default specialization for std::tuple.
+template <typename ChannelT, typename... ArgTs>
+class SerializationTraits<ChannelT, std::tuple<ArgTs...>> {
+public:
+
+ /// RPC channel serialization for std::tuple.
+ static Error serialize(ChannelT &C, const std::tuple<ArgTs...> &V) {
+ return serializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
+ }
+
+ /// RPC channel deserialization for std::tuple.
+ static Error deserialize(ChannelT &C, std::tuple<ArgTs...> &V) {
+ return deserializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
+ }
+
+private:
+ // Serialization helper for std::tuple.
+ template <size_t... Is>
+ static Error serializeTupleHelper(ChannelT &C, const std::tuple<ArgTs...> &V,
+ llvm::index_sequence<Is...> _) {
+ return serializeSeq(C, std::get<Is>(V)...);
+ }
+
+ // Serialization helper for std::tuple.
+ template <size_t... Is>
+ static Error deserializeTupleHelper(ChannelT &C, std::tuple<ArgTs...> &V,
+ llvm::index_sequence<Is...> _) {
+ return deserializeSeq(C, std::get<Is>(V)...);
+ }
+};
+
+/// SerializationTraits default specialization for std::vector.
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, std::vector<T>> {
+public:
+
+ /// Serialize a std::vector<T> from std::vector<T>.
+ static Error serialize(ChannelT &C, const std::vector<T> &V) {
+ if (auto Err = serializeSeq(C, static_cast<uint64_t>(V.size())))
+ return Err;
+
+ for (const auto &E : V)
+ if (auto Err = serializeSeq(C, E))
+ return Err;
+
+ return Error::success();
+ }
+
+ /// Deserialize a std::vector<T> to a std::vector<T>.
+ static Error deserialize(ChannelT &C, std::vector<T> &V) {
+ uint64_t Count = 0;
+ if (auto Err = deserializeSeq(C, Count))
+ return Err;
+
+ V.resize(Count);
+ for (auto &E : V)
+ if (auto Err = deserializeSeq(C, E))
+ return Err;
+
+ return Error::success();
+ }
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
diff --git a/include/llvm/ExecutionEngine/Orc/RPCUtils.h b/include/llvm/ExecutionEngine/Orc/RPCUtils.h
index 966a49684348..f51fbe153a41 100644
--- a/include/llvm/ExecutionEngine/Orc/RPCUtils.h
+++ b/include/llvm/ExecutionEngine/Orc/RPCUtils.h
@@ -1,4 +1,4 @@
-//===----- RPCUTils.h - Basic tilities for building RPC APIs ----*- C++ -*-===//
+//===------- RPCUTils.h - Utilities for building RPC APIs -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// Basic utilities for building RPC APIs.
+// Utilities to support construction of simple RPC APIs.
+//
+// The RPC utilities aim for ease of use (minimal conceptual overhead) for C++
+// programmers, high performance, low memory overhead, and efficient use of the
+// communications channel.
//
//===----------------------------------------------------------------------===//
@@ -15,11 +19,12 @@
#define LLVM_EXECUTIONENGINE_ORC_RPCUTILS_H
#include <map>
+#include <thread>
#include <vector>
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/RPCSerialization.h"
#ifdef _MSC_VER
// concrt.h depends on eh.h for __uncaught_exception declaration
@@ -40,485 +45,804 @@
namespace llvm {
namespace orc {
-namespace remote {
+namespace rpc {
-/// Describes reserved RPC Function Ids.
-///
-/// The default implementation will serve for integer and enum function id
-/// types. If you want to use a custom type as your FunctionId you can
-/// specialize this class and provide unique values for InvalidId,
-/// ResponseId and FirstValidId.
+template <typename DerivedFunc, typename FnT> class Function;
-template <typename T> class RPCFunctionIdTraits {
+// RPC Function class.
+// DerivedFunc should be a user defined class with a static 'getName()' method
+// returning a const char* representing the function's name.
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+class Function<DerivedFunc, RetT(ArgTs...)> {
public:
- static const T InvalidId = static_cast<T>(0);
- static const T ResponseId = static_cast<T>(1);
- static const T FirstValidId = static_cast<T>(2);
+ /// User defined function type.
+ using Type = RetT(ArgTs...);
+
+ /// Return type.
+ using ReturnType = RetT;
+
+ /// Returns the full function prototype as a string.
+ static const char *getPrototype() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name)
+ << RPCTypeName<RetT>::getName() << " " << DerivedFunc::getName()
+ << "(" << llvm::orc::rpc::RPCTypeNameSequence<ArgTs...>() << ")";
+ return Name.data();
+ }
+
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
};
-// Base class containing utilities that require partial specialization.
-// These cannot be included in RPC, as template class members cannot be
-// partially specialized.
-class RPCBase {
-protected:
- // RPC Function description type.
- //
- // This class provides the information and operations needed to support the
- // RPC primitive operations (call, expect, etc) for a given function. It
- // is specialized for void and non-void functions to deal with the differences
- // betwen the two. Both specializations have the same interface:
- //
- // Id - The function's unique identifier.
- // OptionalReturn - The return type for asyncronous calls.
- // ErrorReturn - The return type for synchronous calls.
- // optionalToErrorReturn - Conversion from a valid OptionalReturn to an
- // ErrorReturn.
- // readResult - Deserialize a result from a channel.
- // abandon - Abandon a promised (asynchronous) result.
- // respond - Retun a result on the channel.
- template <typename FunctionIdT, FunctionIdT FuncId, typename FnT>
- class FunctionHelper {};
-
- // RPC Function description specialization for non-void functions.
- template <typename FunctionIdT, FunctionIdT FuncId, typename RetT,
- typename... ArgTs>
- class FunctionHelper<FunctionIdT, FuncId, RetT(ArgTs...)> {
- public:
- static_assert(FuncId != RPCFunctionIdTraits<FunctionIdT>::InvalidId &&
- FuncId != RPCFunctionIdTraits<FunctionIdT>::ResponseId,
- "Cannot define custom function with InvalidId or ResponseId. "
- "Please use RPCFunctionTraits<FunctionIdT>::FirstValidId.");
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+std::mutex Function<DerivedFunc, RetT(ArgTs...)>::NameMutex;
- static const FunctionIdT Id = FuncId;
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+std::string Function<DerivedFunc, RetT(ArgTs...)>::Name;
- typedef Optional<RetT> OptionalReturn;
+/// Provides a typedef for a tuple containing the decayed argument types.
+template <typename T> class FunctionArgsTuple;
- typedef Expected<RetT> ErrorReturn;
+template <typename RetT, typename... ArgTs>
+class FunctionArgsTuple<RetT(ArgTs...)> {
+public:
+ using Type = std::tuple<typename std::decay<
+ typename std::remove_reference<ArgTs>::type>::type...>;
+};
- static ErrorReturn optionalToErrorReturn(OptionalReturn &&V) {
- assert(V && "Return value not available");
- return std::move(*V);
- }
+/// Allocates RPC function ids during autonegotiation.
+/// Specializations of this class must provide four members:
+///
+/// static T getInvalidId():
+/// Should return a reserved id that will be used to represent missing
+/// functions during autonegotiation.
+///
+/// static T getResponseId():
+/// Should return a reserved id that will be used to send function responses
+/// (return values).
+///
+/// static T getNegotiateId():
+/// Should return a reserved id for the negotiate function, which will be used
+/// to negotiate ids for user defined functions.
+///
+/// template <typename Func> T allocate():
+/// Allocate a unique id for function Func.
+template <typename T, typename = void> class RPCFunctionIdAllocator;
+
+/// This specialization of RPCFunctionIdAllocator provides a default
+/// implementation for integral types.
+template <typename T>
+class RPCFunctionIdAllocator<
+ T, typename std::enable_if<std::is_integral<T>::value>::type> {
+public:
+ static T getInvalidId() { return T(0); }
+ static T getResponseId() { return T(1); }
+ static T getNegotiateId() { return T(2); }
- template <typename ChannelT>
- static Error readResult(ChannelT &C, std::promise<OptionalReturn> &P) {
- RetT Val;
- auto Err = deserialize(C, Val);
- auto Err2 = endReceiveMessage(C);
- Err = joinErrors(std::move(Err), std::move(Err2));
+ template <typename Func> T allocate() { return NextId++; }
- if (Err) {
- P.set_value(OptionalReturn());
- return Err;
- }
- P.set_value(std::move(Val));
- return Error::success();
- }
+private:
+ T NextId = 3;
+};
- static void abandon(std::promise<OptionalReturn> &P) {
- P.set_value(OptionalReturn());
- }
+namespace detail {
- template <typename ChannelT, typename SequenceNumberT>
- static Error respond(ChannelT &C, SequenceNumberT SeqNo,
- ErrorReturn &Result) {
- FunctionIdT ResponseId = RPCFunctionIdTraits<FunctionIdT>::ResponseId;
+// FIXME: Remove MSVCPError/MSVCPExpected once MSVC's future implementation
+// supports classes without default constructors.
+#ifdef _MSC_VER
- // If the handler returned an error then bail out with that.
- if (!Result)
- return Result.takeError();
+namespace msvc_hacks {
- // Otherwise open a new message on the channel and send the result.
- if (auto Err = startSendMessage(C))
- return Err;
- if (auto Err = serializeSeq(C, ResponseId, SeqNo, *Result))
- return Err;
- return endSendMessage(C);
- }
- };
+// Work around MSVC's future implementation's use of default constructors:
+// A default constructed value in the promise will be overwritten when the
+// real error is set - so the default constructed Error has to be checked
+// already.
+class MSVCPError : public Error {
+public:
+ MSVCPError() { (void)!!*this; }
- // RPC Function description specialization for void functions.
- template <typename FunctionIdT, FunctionIdT FuncId, typename... ArgTs>
- class FunctionHelper<FunctionIdT, FuncId, void(ArgTs...)> {
- public:
- static_assert(FuncId != RPCFunctionIdTraits<FunctionIdT>::InvalidId &&
- FuncId != RPCFunctionIdTraits<FunctionIdT>::ResponseId,
- "Cannot define custom function with InvalidId or ResponseId. "
- "Please use RPCFunctionTraits<FunctionIdT>::FirstValidId.");
+ MSVCPError(MSVCPError &&Other) : Error(std::move(Other)) {}
- static const FunctionIdT Id = FuncId;
+ MSVCPError &operator=(MSVCPError Other) {
+ Error::operator=(std::move(Other));
+ return *this;
+ }
- typedef bool OptionalReturn;
- typedef Error ErrorReturn;
+ MSVCPError(Error Err) : Error(std::move(Err)) {}
+};
- static ErrorReturn optionalToErrorReturn(OptionalReturn &&V) {
- assert(V && "Return value not available");
- return Error::success();
- }
+// Work around MSVC's future implementation, similar to MSVCPError.
+template <typename T> class MSVCPExpected : public Expected<T> {
+public:
+ MSVCPExpected()
+ : Expected<T>(make_error<StringError>("", inconvertibleErrorCode())) {
+ consumeError(this->takeError());
+ }
- template <typename ChannelT>
- static Error readResult(ChannelT &C, std::promise<OptionalReturn> &P) {
- // Void functions don't have anything to deserialize, so we're good.
- P.set_value(true);
- return endReceiveMessage(C);
- }
+ MSVCPExpected(MSVCPExpected &&Other) : Expected<T>(std::move(Other)) {}
- static void abandon(std::promise<OptionalReturn> &P) { P.set_value(false); }
+ MSVCPExpected &operator=(MSVCPExpected &&Other) {
+ Expected<T>::operator=(std::move(Other));
+ return *this;
+ }
- template <typename ChannelT, typename SequenceNumberT>
- static Error respond(ChannelT &C, SequenceNumberT SeqNo,
- ErrorReturn &Result) {
- const FunctionIdT ResponseId =
- RPCFunctionIdTraits<FunctionIdT>::ResponseId;
+ MSVCPExpected(Error Err) : Expected<T>(std::move(Err)) {}
+
+ template <typename OtherT>
+ MSVCPExpected(
+ OtherT &&Val,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Val)) {}
+
+ template <class OtherT>
+ MSVCPExpected(
+ Expected<OtherT> &&Other,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Other)) {}
+
+ template <class OtherT>
+ explicit MSVCPExpected(
+ Expected<OtherT> &&Other,
+ typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Other)) {}
+};
- // If the handler returned an error then bail out with that.
- if (Result)
- return std::move(Result);
+} // end namespace msvc_hacks
- // Otherwise open a new message on the channel and send the result.
- if (auto Err = startSendMessage(C))
- return Err;
- if (auto Err = serializeSeq(C, ResponseId, SeqNo))
- return Err;
- return endSendMessage(C);
- }
- };
+#endif // _MSC_VER
- // Helper for the call primitive.
- template <typename ChannelT, typename SequenceNumberT, typename Func>
- class CallHelper;
+// ResultTraits provides typedefs and utilities specific to the return type
+// of functions.
+template <typename RetT> class ResultTraits {
+public:
+ // The return type wrapped in llvm::Expected.
+ using ErrorReturnType = Expected<RetT>;
- template <typename ChannelT, typename SequenceNumberT, typename FunctionIdT,
- FunctionIdT FuncId, typename RetT, typename... ArgTs>
- class CallHelper<ChannelT, SequenceNumberT,
- FunctionHelper<FunctionIdT, FuncId, RetT(ArgTs...)>> {
- public:
- static Error call(ChannelT &C, SequenceNumberT SeqNo,
- const ArgTs &... Args) {
- if (auto Err = startSendMessage(C))
- return Err;
- if (auto Err = serializeSeq(C, FuncId, SeqNo, Args...))
- return Err;
- return endSendMessage(C);
- }
- };
+#ifdef _MSC_VER
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<msvc_hacks::MSVCPExpected<RetT>>;
- // Helper for handle primitive.
- template <typename ChannelT, typename SequenceNumberT, typename Func>
- class HandlerHelper;
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<msvc_hacks::MSVCPExpected<RetT>>;
+#else
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<ErrorReturnType>;
- template <typename ChannelT, typename SequenceNumberT, typename FunctionIdT,
- FunctionIdT FuncId, typename RetT, typename... ArgTs>
- class HandlerHelper<ChannelT, SequenceNumberT,
- FunctionHelper<FunctionIdT, FuncId, RetT(ArgTs...)>> {
- public:
- template <typename HandlerT>
- static Error handle(ChannelT &C, HandlerT Handler) {
- return readAndHandle(C, Handler, llvm::index_sequence_for<ArgTs...>());
- }
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<ErrorReturnType>;
+#endif
- private:
- typedef FunctionHelper<FunctionIdT, FuncId, RetT(ArgTs...)> Func;
+ // Create a 'blank' value of the ErrorReturnType, ready and safe to
+ // overwrite.
+ static ErrorReturnType createBlankErrorReturnValue() {
+ return ErrorReturnType(RetT());
+ }
- template <typename HandlerT, size_t... Is>
- static Error readAndHandle(ChannelT &C, HandlerT Handler,
- llvm::index_sequence<Is...> _) {
- std::tuple<ArgTs...> RPCArgs;
- SequenceNumberT SeqNo;
- // GCC 4.7 and 4.8 incorrectly issue a -Wunused-but-set-variable warning
- // for RPCArgs. Void cast RPCArgs to work around this for now.
- // FIXME: Remove this workaround once we can assume a working GCC version.
- (void)RPCArgs;
- if (auto Err = deserializeSeq(C, SeqNo, std::get<Is>(RPCArgs)...))
- return Err;
+ // Consume an abandoned ErrorReturnType.
+ static void consumeAbandoned(ErrorReturnType RetOrErr) {
+ consumeError(RetOrErr.takeError());
+ }
+};
- // We've deserialized the arguments, so unlock the channel for reading
- // before we call the handler. This allows recursive RPC calls.
- if (auto Err = endReceiveMessage(C))
- return Err;
+// ResultTraits specialization for void functions.
+template <> class ResultTraits<void> {
+public:
+ // For void functions, ErrorReturnType is llvm::Error.
+ using ErrorReturnType = Error;
- // Run the handler and get the result.
- auto Result = Handler(std::get<Is>(RPCArgs)...);
+#ifdef _MSC_VER
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<msvc_hacks::MSVCPError>;
- // Return the result to the client.
- return Func::template respond<ChannelT, SequenceNumberT>(C, SeqNo,
- Result);
- }
- };
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<msvc_hacks::MSVCPError>;
+#else
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<ErrorReturnType>;
- // Helper for wrapping member functions up as functors.
- template <typename ClassT, typename RetT, typename... ArgTs>
- class MemberFnWrapper {
- public:
- typedef RetT (ClassT::*MethodT)(ArgTs...);
- MemberFnWrapper(ClassT &Instance, MethodT Method)
- : Instance(Instance), Method(Method) {}
- RetT operator()(ArgTs &... Args) { return (Instance.*Method)(Args...); }
-
- private:
- ClassT &Instance;
- MethodT Method;
- };
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<ErrorReturnType>;
+#endif
- // Helper that provides a Functor for deserializing arguments.
- template <typename... ArgTs> class ReadArgs {
- public:
- Error operator()() { return Error::success(); }
- };
+ // Create a 'blank' value of the ErrorReturnType, ready and safe to
+ // overwrite.
+ static ErrorReturnType createBlankErrorReturnValue() {
+ return ErrorReturnType::success();
+ }
- template <typename ArgT, typename... ArgTs>
- class ReadArgs<ArgT, ArgTs...> : public ReadArgs<ArgTs...> {
- public:
- ReadArgs(ArgT &Arg, ArgTs &... Args)
- : ReadArgs<ArgTs...>(Args...), Arg(Arg) {}
+ // Consume an abandoned ErrorReturnType.
+ static void consumeAbandoned(ErrorReturnType Err) {
+ consumeError(std::move(Err));
+ }
+};
- Error operator()(ArgT &ArgVal, ArgTs &... ArgVals) {
- this->Arg = std::move(ArgVal);
- return ReadArgs<ArgTs...>::operator()(ArgVals...);
- }
+// ResultTraits<Error> is equivalent to ResultTraits<void>. This allows
+// handlers for void RPC functions to return either void (in which case they
+// implicitly succeed) or Error (in which case their error return is
+// propagated). See usage in HandlerTraits::runHandlerHelper.
+template <> class ResultTraits<Error> : public ResultTraits<void> {};
+
+// ResultTraits<Expected<T>> is equivalent to ResultTraits<T>. This allows
+// handlers for RPC functions returning a T to return either a T (in which
+// case they implicitly succeed) or Expected<T> (in which case their error
+// return is propagated). See usage in HandlerTraits::runHandlerHelper.
+template <typename RetT>
+class ResultTraits<Expected<RetT>> : public ResultTraits<RetT> {};
+
+// Send a response of the given wire return type (WireRetT) over the
+// channel, with the given sequence number.
+template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+ typename FunctionIdT, typename SequenceNumberT>
+static Error respond(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo, Expected<HandlerRetT> ResultOrErr) {
+ // If this was an error bail out.
+ // FIXME: Send an "error" message to the client if this is not a channel
+ // failure?
+ if (auto Err = ResultOrErr.takeError())
+ return Err;
+
+ // Open the response message.
+ if (auto Err = C.startSendMessage(ResponseId, SeqNo))
+ return Err;
+
+ // Serialize the result.
+ if (auto Err =
+ SerializationTraits<ChannelT, WireRetT, HandlerRetT>::serialize(
+ C, *ResultOrErr))
+ return Err;
+
+ // Close the response message.
+ return C.endSendMessage();
+}
+
+// Send an empty response message on the given channel to indicate that
+// the handler ran.
+template <typename WireRetT, typename ChannelT, typename FunctionIdT,
+ typename SequenceNumberT>
+static Error respond(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo, Error Err) {
+ if (Err)
+ return Err;
+ if (auto Err2 = C.startSendMessage(ResponseId, SeqNo))
+ return Err2;
+ return C.endSendMessage();
+}
+
+// Converts a given type to the equivalent error return type.
+template <typename T> class WrappedHandlerReturn {
+public:
+ using Type = Expected<T>;
+};
- private:
- ArgT &Arg;
- };
+template <typename T> class WrappedHandlerReturn<Expected<T>> {
+public:
+ using Type = Expected<T>;
};
-/// Contains primitive utilities for defining, calling and handling calls to
-/// remote procedures. ChannelT is a bidirectional stream conforming to the
-/// RPCChannel interface (see RPCChannel.h), and FunctionIdT is a procedure
-/// identifier type that must be serializable on ChannelT.
-///
-/// These utilities support the construction of very primitive RPC utilities.
-/// Their intent is to ensure correct serialization and deserialization of
-/// procedure arguments, and to keep the client and server's view of the API in
-/// sync.
-///
-/// These utilities do not support return values. These can be handled by
-/// declaring a corresponding '.*Response' procedure and expecting it after a
-/// call). They also do not support versioning: the client and server *must* be
-/// compiled with the same procedure definitions.
-///
-///
-///
-/// Overview (see comments individual types/methods for details):
-///
-/// Function<Id, Args...> :
-///
-/// associates a unique serializable id with an argument list.
-///
-///
-/// call<Func>(Channel, Args...) :
-///
-/// Calls the remote procedure 'Func' by serializing Func's id followed by its
-/// arguments and sending the resulting bytes to 'Channel'.
-///
-///
-/// handle<Func>(Channel, <functor matching Error(Args...)> :
-///
-/// Handles a call to 'Func' by deserializing its arguments and calling the
-/// given functor. This assumes that the id for 'Func' has already been
-/// deserialized.
-///
-/// expect<Func>(Channel, <functor matching Error(Args...)> :
-///
-/// The same as 'handle', except that the procedure id should not have been
-/// read yet. Expect will deserialize the id and assert that it matches Func's
-/// id. If it does not, and unexpected RPC call error is returned.
-template <typename ChannelT, typename FunctionIdT = uint32_t,
- typename SequenceNumberT = uint16_t>
-class RPC : public RPCBase {
+template <> class WrappedHandlerReturn<void> {
+public:
+ using Type = Error;
+};
+
+template <> class WrappedHandlerReturn<Error> {
public:
- /// RPC default constructor.
- RPC() = default;
+ using Type = Error;
+};
- /// RPC instances cannot be copied.
- RPC(const RPC &) = delete;
+template <> class WrappedHandlerReturn<ErrorSuccess> {
+public:
+ using Type = Error;
+};
- /// RPC instances cannot be copied.
- RPC &operator=(const RPC &) = delete;
+// This template class provides utilities related to RPC function handlers.
+// The base case applies to non-function types (the template class is
+// specialized for function types) and inherits from the appropriate
+// speciilization for the given non-function type's call operator.
+template <typename HandlerT>
+class HandlerTraits : public HandlerTraits<decltype(
+ &std::remove_reference<HandlerT>::type::operator())> {
+};
- /// RPC move constructor.
- // FIXME: Remove once MSVC can synthesize move ops.
- RPC(RPC &&Other)
- : SequenceNumberMgr(std::move(Other.SequenceNumberMgr)),
- OutstandingResults(std::move(Other.OutstandingResults)) {}
+// Traits for handlers with a given function type.
+template <typename RetT, typename... ArgTs>
+class HandlerTraits<RetT(ArgTs...)> {
+public:
+ // Function type of the handler.
+ using Type = RetT(ArgTs...);
- /// RPC move assignment.
- // FIXME: Remove once MSVC can synthesize move ops.
- RPC &operator=(RPC &&Other) {
- SequenceNumberMgr = std::move(Other.SequenceNumberMgr);
- OutstandingResults = std::move(Other.OutstandingResults);
- return *this;
+ // Return type of the handler.
+ using ReturnType = RetT;
+
+ // A std::tuple wrapping the handler arguments.
+ using ArgStorage = typename FunctionArgsTuple<RetT(ArgTs...)>::Type;
+
+ // Call the given handler with the given arguments.
+ template <typename HandlerT>
+ static typename WrappedHandlerReturn<RetT>::Type
+ unpackAndRun(HandlerT &Handler, ArgStorage &Args) {
+ return unpackAndRunHelper(Handler, Args,
+ llvm::index_sequence_for<ArgTs...>());
}
- /// Utility class for defining/referring to RPC procedures.
- ///
- /// Typedefs of this utility are used when calling/handling remote procedures.
- ///
- /// FuncId should be a unique value of FunctionIdT (i.e. not used with any
- /// other Function typedef in the RPC API being defined.
- ///
- /// the template argument Ts... gives the argument list for the remote
- /// procedure.
- ///
- /// E.g.
- ///
- /// typedef Function<0, bool> Func1;
- /// typedef Function<1, std::string, std::vector<int>> Func2;
- ///
- /// if (auto Err = call<Func1>(Channel, true))
- /// /* handle Err */;
- ///
- /// if (auto Err = expect<Func2>(Channel,
- /// [](std::string &S, std::vector<int> &V) {
- /// // Stuff.
- /// return Error::success();
- /// })
- /// /* handle Err */;
- ///
- template <FunctionIdT FuncId, typename FnT>
- using Function = FunctionHelper<FunctionIdT, FuncId, FnT>;
+ // Call the given handler with the given arguments.
+ template <typename HandlerT>
+ static typename std::enable_if<
+ std::is_void<typename HandlerTraits<HandlerT>::ReturnType>::value,
+ Error>::type
+ run(HandlerT &Handler, ArgTs &&... Args) {
+ Handler(std::move(Args)...);
+ return Error::success();
+ }
- /// Return type for asynchronous call primitives.
- template <typename Func>
- using AsyncCallResult = std::future<typename Func::OptionalReturn>;
+ template <typename HandlerT>
+ static typename std::enable_if<
+ !std::is_void<typename HandlerTraits<HandlerT>::ReturnType>::value,
+ typename HandlerTraits<HandlerT>::ReturnType>::type
+ run(HandlerT &Handler, ArgTs... Args) {
+ return Handler(std::move(Args)...);
+ }
- /// Return type for asynchronous call-with-seq primitives.
- template <typename Func>
- using AsyncCallWithSeqResult =
- std::pair<std::future<typename Func::OptionalReturn>, SequenceNumberT>;
+ // Serialize arguments to the channel.
+ template <typename ChannelT, typename... CArgTs>
+ static Error serializeArgs(ChannelT &C, const CArgTs... CArgs) {
+ return SequenceSerialization<ChannelT, ArgTs...>::serialize(C, CArgs...);
+ }
- /// Serialize Args... to channel C, but do not call C.send().
- ///
- /// Returns an error (on serialization failure) or a pair of:
- /// (1) A future Optional<T> (or future<bool> for void functions), and
- /// (2) A sequence number.
- ///
- /// This utility function is primarily used for single-threaded mode support,
- /// where the sequence number can be used to wait for the corresponding
- /// result. In multi-threaded mode the appendCallAsync method, which does not
- /// return the sequence numeber, should be preferred.
- template <typename Func, typename... ArgTs>
- Expected<AsyncCallWithSeqResult<Func>>
- appendCallAsyncWithSeq(ChannelT &C, const ArgTs &... Args) {
- auto SeqNo = SequenceNumberMgr.getSequenceNumber();
- std::promise<typename Func::OptionalReturn> Promise;
- auto Result = Promise.get_future();
- OutstandingResults[SeqNo] =
- createOutstandingResult<Func>(std::move(Promise));
-
- if (auto Err = CallHelper<ChannelT, SequenceNumberT, Func>::call(C, SeqNo,
- Args...)) {
- abandonOutstandingResults();
- return std::move(Err);
- } else
- return AsyncCallWithSeqResult<Func>(std::move(Result), SeqNo);
+ // Deserialize arguments from the channel.
+ template <typename ChannelT, typename... CArgTs>
+ static Error deserializeArgs(ChannelT &C, std::tuple<CArgTs...> &Args) {
+ return deserializeArgsHelper(C, Args,
+ llvm::index_sequence_for<CArgTs...>());
}
- /// The same as appendCallAsyncWithSeq, except that it calls C.send() to
- /// flush the channel after serializing the call.
- template <typename Func, typename... ArgTs>
- Expected<AsyncCallWithSeqResult<Func>>
- callAsyncWithSeq(ChannelT &C, const ArgTs &... Args) {
- auto Result = appendCallAsyncWithSeq<Func>(C, Args...);
- if (!Result)
- return Result;
- if (auto Err = C.send()) {
- abandonOutstandingResults();
- return std::move(Err);
- }
- return Result;
+private:
+ template <typename ChannelT, typename... CArgTs, size_t... Indexes>
+ static Error deserializeArgsHelper(ChannelT &C, std::tuple<CArgTs...> &Args,
+ llvm::index_sequence<Indexes...> _) {
+ return SequenceSerialization<ChannelT, ArgTs...>::deserialize(
+ C, std::get<Indexes>(Args)...);
}
- /// Serialize Args... to channel C, but do not call send.
- /// Returns an error if serialization fails, otherwise returns a
- /// std::future<Optional<T>> (or a future<bool> for void functions).
- template <typename Func, typename... ArgTs>
- Expected<AsyncCallResult<Func>> appendCallAsync(ChannelT &C,
- const ArgTs &... Args) {
- auto ResAndSeqOrErr = appendCallAsyncWithSeq<Func>(C, Args...);
- if (ResAndSeqOrErr)
- return std::move(ResAndSeqOrErr->first);
- return ResAndSeqOrErr.getError();
+ template <typename HandlerT, size_t... Indexes>
+ static typename WrappedHandlerReturn<
+ typename HandlerTraits<HandlerT>::ReturnType>::Type
+ unpackAndRunHelper(HandlerT &Handler, ArgStorage &Args,
+ llvm::index_sequence<Indexes...>) {
+ return run(Handler, std::move(std::get<Indexes>(Args))...);
}
+};
- /// The same as appendCallAsync, except that it calls C.send to flush the
- /// channel after serializing the call.
- template <typename Func, typename... ArgTs>
- Expected<AsyncCallResult<Func>> callAsync(ChannelT &C,
- const ArgTs &... Args) {
- auto ResAndSeqOrErr = callAsyncWithSeq<Func>(C, Args...);
- if (ResAndSeqOrErr)
- return std::move(ResAndSeqOrErr->first);
- return ResAndSeqOrErr.getError();
- }
-
- /// This can be used in single-threaded mode.
- template <typename Func, typename HandleFtor, typename... ArgTs>
- typename Func::ErrorReturn
- callSTHandling(ChannelT &C, HandleFtor &HandleOther, const ArgTs &... Args) {
- if (auto ResultAndSeqNoOrErr = callAsyncWithSeq<Func>(C, Args...)) {
- auto &ResultAndSeqNo = *ResultAndSeqNoOrErr;
- if (auto Err = waitForResult(C, ResultAndSeqNo.second, HandleOther))
- return std::move(Err);
- return Func::optionalToErrorReturn(ResultAndSeqNo.first.get());
- } else
- return ResultAndSeqNoOrErr.takeError();
+// Handler traits for class methods (especially call operators for lambdas).
+template <typename Class, typename RetT, typename... ArgTs>
+class HandlerTraits<RetT (Class::*)(ArgTs...)>
+ : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Handler traits for const class methods (especially call operators for
+// lambdas).
+template <typename Class, typename RetT, typename... ArgTs>
+class HandlerTraits<RetT (Class::*)(ArgTs...) const>
+ : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Utility to peel the Expected wrapper off a response handler error type.
+template <typename HandlerT> class ResponseHandlerArg;
+
+template <typename ArgT> class ResponseHandlerArg<Error(Expected<ArgT>)> {
+public:
+ using ArgType = Expected<ArgT>;
+ using UnwrappedArgType = ArgT;
+};
+
+template <typename ArgT>
+class ResponseHandlerArg<ErrorSuccess(Expected<ArgT>)> {
+public:
+ using ArgType = Expected<ArgT>;
+ using UnwrappedArgType = ArgT;
+};
+
+template <> class ResponseHandlerArg<Error(Error)> {
+public:
+ using ArgType = Error;
+};
+
+template <> class ResponseHandlerArg<ErrorSuccess(Error)> {
+public:
+ using ArgType = Error;
+};
+
+// ResponseHandler represents a handler for a not-yet-received function call
+// result.
+template <typename ChannelT> class ResponseHandler {
+public:
+ virtual ~ResponseHandler() {}
+
+ // Reads the function result off the wire and acts on it. The meaning of
+ // "act" will depend on how this method is implemented in any given
+ // ResponseHandler subclass but could, for example, mean running a
+ // user-specified handler or setting a promise value.
+ virtual Error handleResponse(ChannelT &C) = 0;
+
+ // Abandons this outstanding result.
+ virtual void abandon() = 0;
+
+ // Create an error instance representing an abandoned response.
+ static Error createAbandonedResponseError() {
+ return orcError(OrcErrorCode::RPCResponseAbandoned);
}
+};
- // This can be used in single-threaded mode.
- template <typename Func, typename... ArgTs>
- typename Func::ErrorReturn callST(ChannelT &C, const ArgTs &... Args) {
- return callSTHandling<Func>(C, handleNone, Args...);
+// ResponseHandler subclass for RPC functions with non-void returns.
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+class ResponseHandlerImpl : public ResponseHandler<ChannelT> {
+public:
+ ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+ // Handle the result by deserializing it from the channel then passing it
+ // to the user defined handler.
+ Error handleResponse(ChannelT &C) override {
+ using UnwrappedArgType = typename ResponseHandlerArg<
+ typename HandlerTraits<HandlerT>::Type>::UnwrappedArgType;
+ UnwrappedArgType Result;
+ if (auto Err =
+ SerializationTraits<ChannelT, FuncRetT,
+ UnwrappedArgType>::deserialize(C, Result))
+ return Err;
+ if (auto Err = C.endReceiveMessage())
+ return Err;
+ return Handler(Result);
}
- /// Start receiving a new function call.
- ///
- /// Calls startReceiveMessage on the channel, then deserializes a FunctionId
- /// into Id.
- Error startReceivingFunction(ChannelT &C, FunctionIdT &Id) {
- if (auto Err = startReceiveMessage(C))
+ // Abandon this response by calling the handler with an 'abandoned response'
+ // error.
+ void abandon() override {
+ if (auto Err = Handler(this->createAbandonedResponseError())) {
+ // Handlers should not fail when passed an abandoned response error.
+ report_fatal_error(std::move(Err));
+ }
+ }
+
+private:
+ HandlerT Handler;
+};
+
+// ResponseHandler subclass for RPC functions with void returns.
+template <typename ChannelT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, void, HandlerT>
+ : public ResponseHandler<ChannelT> {
+public:
+ ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+ // Handle the result (no actual value, just a notification that the function
+ // has completed on the remote end) by calling the user-defined handler with
+ // Error::success().
+ Error handleResponse(ChannelT &C) override {
+ if (auto Err = C.endReceiveMessage())
return Err;
+ return Handler(Error::success());
+ }
- return deserialize(C, Id);
+ // Abandon this response by calling the handler with an 'abandoned response'
+ // error.
+ void abandon() override {
+ if (auto Err = Handler(this->createAbandonedResponseError())) {
+ // Handlers should not fail when passed an abandoned response error.
+ report_fatal_error(std::move(Err));
+ }
}
- /// Deserialize args for Func from C and call Handler. The signature of
- /// handler must conform to 'Error(Args...)' where Args... matches
- /// the arguments used in the Func typedef.
- template <typename Func, typename HandlerT>
- static Error handle(ChannelT &C, HandlerT Handler) {
- return HandlerHelper<ChannelT, SequenceNumberT, Func>::handle(C, Handler);
+private:
+ HandlerT Handler;
+};
+
+// Create a ResponseHandler from a given user handler.
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+std::unique_ptr<ResponseHandler<ChannelT>> createResponseHandler(HandlerT H) {
+ return llvm::make_unique<ResponseHandlerImpl<ChannelT, FuncRetT, HandlerT>>(
+ std::move(H));
+}
+
+// Helper for wrapping member functions up as functors. This is useful for
+// installing methods as result handlers.
+template <typename ClassT, typename RetT, typename... ArgTs>
+class MemberFnWrapper {
+public:
+ using MethodT = RetT (ClassT::*)(ArgTs...);
+ MemberFnWrapper(ClassT &Instance, MethodT Method)
+ : Instance(Instance), Method(Method) {}
+ RetT operator()(ArgTs &&... Args) {
+ return (Instance.*Method)(std::move(Args)...);
}
- /// Helper version of 'handle' for calling member functions.
- template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
- static Error handle(ChannelT &C, ClassT &Instance,
- RetT (ClassT::*HandlerMethod)(ArgTs...)) {
- return handle<Func>(
- C, MemberFnWrapper<ClassT, RetT, ArgTs...>(Instance, HandlerMethod));
+private:
+ ClassT &Instance;
+ MethodT Method;
+};
+
+// Helper that provides a Functor for deserializing arguments.
+template <typename... ArgTs> class ReadArgs {
+public:
+ Error operator()() { return Error::success(); }
+};
+
+template <typename ArgT, typename... ArgTs>
+class ReadArgs<ArgT, ArgTs...> : public ReadArgs<ArgTs...> {
+public:
+ ReadArgs(ArgT &Arg, ArgTs &... Args)
+ : ReadArgs<ArgTs...>(Args...), Arg(Arg) {}
+
+ Error operator()(ArgT &ArgVal, ArgTs &... ArgVals) {
+ this->Arg = std::move(ArgVal);
+ return ReadArgs<ArgTs...>::operator()(ArgVals...);
}
- /// Deserialize a FunctionIdT from C and verify it matches the id for Func.
- /// If the id does match, deserialize the arguments and call the handler
- /// (similarly to handle).
- /// If the id does not match, return an unexpect RPC call error and do not
- /// deserialize any further bytes.
- template <typename Func, typename HandlerT>
- Error expect(ChannelT &C, HandlerT Handler) {
- FunctionIdT FuncId;
- if (auto Err = startReceivingFunction(C, FuncId))
+private:
+ ArgT &Arg;
+};
+
+// Manage sequence numbers.
+template <typename SequenceNumberT> class SequenceNumberManager {
+public:
+ // Reset, making all sequence numbers available.
+ void reset() {
+ std::lock_guard<std::mutex> Lock(SeqNoLock);
+ NextSequenceNumber = 0;
+ FreeSequenceNumbers.clear();
+ }
+
+ // Get the next available sequence number. Will re-use numbers that have
+ // been released.
+ SequenceNumberT getSequenceNumber() {
+ std::lock_guard<std::mutex> Lock(SeqNoLock);
+ if (FreeSequenceNumbers.empty())
+ return NextSequenceNumber++;
+ auto SequenceNumber = FreeSequenceNumbers.back();
+ FreeSequenceNumbers.pop_back();
+ return SequenceNumber;
+ }
+
+ // Release a sequence number, making it available for re-use.
+ void releaseSequenceNumber(SequenceNumberT SequenceNumber) {
+ std::lock_guard<std::mutex> Lock(SeqNoLock);
+ FreeSequenceNumbers.push_back(SequenceNumber);
+ }
+
+private:
+ std::mutex SeqNoLock;
+ SequenceNumberT NextSequenceNumber = 0;
+ std::vector<SequenceNumberT> FreeSequenceNumbers;
+};
+
+// Checks that predicate P holds for each corresponding pair of type arguments
+// from T1 and T2 tuple.
+template <template <class, class> class P, typename T1Tuple, typename T2Tuple>
+class RPCArgTypeCheckHelper;
+
+template <template <class, class> class P>
+class RPCArgTypeCheckHelper<P, std::tuple<>, std::tuple<>> {
+public:
+ static const bool value = true;
+};
+
+template <template <class, class> class P, typename T, typename... Ts,
+ typename U, typename... Us>
+class RPCArgTypeCheckHelper<P, std::tuple<T, Ts...>, std::tuple<U, Us...>> {
+public:
+ static const bool value =
+ P<T, U>::value &&
+ RPCArgTypeCheckHelper<P, std::tuple<Ts...>, std::tuple<Us...>>::value;
+};
+
+template <template <class, class> class P, typename T1Sig, typename T2Sig>
+class RPCArgTypeCheck {
+public:
+ using T1Tuple = typename FunctionArgsTuple<T1Sig>::Type;
+ using T2Tuple = typename FunctionArgsTuple<T2Sig>::Type;
+
+ static_assert(std::tuple_size<T1Tuple>::value >=
+ std::tuple_size<T2Tuple>::value,
+ "Too many arguments to RPC call");
+ static_assert(std::tuple_size<T1Tuple>::value <=
+ std::tuple_size<T2Tuple>::value,
+ "Too few arguments to RPC call");
+
+ static const bool value = RPCArgTypeCheckHelper<P, T1Tuple, T2Tuple>::value;
+};
+
+template <typename ChannelT, typename WireT, typename ConcreteT>
+class CanSerialize {
+private:
+ using S = SerializationTraits<ChannelT, WireT, ConcreteT>;
+
+ template <typename T>
+ static std::true_type
+ check(typename std::enable_if<
+ std::is_same<decltype(T::serialize(std::declval<ChannelT &>(),
+ std::declval<const ConcreteT &>())),
+ Error>::value,
+ void *>::type);
+
+ template <typename> static std::false_type check(...);
+
+public:
+ static const bool value = decltype(check<S>(0))::value;
+};
+
+template <typename ChannelT, typename WireT, typename ConcreteT>
+class CanDeserialize {
+private:
+ using S = SerializationTraits<ChannelT, WireT, ConcreteT>;
+
+ template <typename T>
+ static std::true_type
+ check(typename std::enable_if<
+ std::is_same<decltype(T::deserialize(std::declval<ChannelT &>(),
+ std::declval<ConcreteT &>())),
+ Error>::value,
+ void *>::type);
+
+ template <typename> static std::false_type check(...);
+
+public:
+ static const bool value = decltype(check<S>(0))::value;
+};
+
+/// Contains primitive utilities for defining, calling and handling calls to
+/// remote procedures. ChannelT is a bidirectional stream conforming to the
+/// RPCChannel interface (see RPCChannel.h), FunctionIdT is a procedure
+/// identifier type that must be serializable on ChannelT, and SequenceNumberT
+/// is an integral type that will be used to number in-flight function calls.
+///
+/// These utilities support the construction of very primitive RPC utilities.
+/// Their intent is to ensure correct serialization and deserialization of
+/// procedure arguments, and to keep the client and server's view of the API in
+/// sync.
+template <typename ImplT, typename ChannelT, typename FunctionIdT,
+ typename SequenceNumberT>
+class RPCBase {
+protected:
+ class OrcRPCInvalid : public Function<OrcRPCInvalid, void()> {
+ public:
+ static const char *getName() { return "__orc_rpc$invalid"; }
+ };
+
+ class OrcRPCResponse : public Function<OrcRPCResponse, void()> {
+ public:
+ static const char *getName() { return "__orc_rpc$response"; }
+ };
+
+ class OrcRPCNegotiate
+ : public Function<OrcRPCNegotiate, FunctionIdT(std::string)> {
+ public:
+ static const char *getName() { return "__orc_rpc$negotiate"; }
+ };
+
+ // Helper predicate for testing for the presence of SerializeTraits
+ // serializers.
+ template <typename WireT, typename ConcreteT>
+ class CanSerializeCheck : detail::CanSerialize<ChannelT, WireT, ConcreteT> {
+ public:
+ using detail::CanSerialize<ChannelT, WireT, ConcreteT>::value;
+
+ static_assert(value, "Missing serializer for argument (Can't serialize the "
+ "first template type argument of CanSerializeCheck "
+ "from the second)");
+ };
+
+ // Helper predicate for testing for the presence of SerializeTraits
+ // deserializers.
+ template <typename WireT, typename ConcreteT>
+ class CanDeserializeCheck
+ : detail::CanDeserialize<ChannelT, WireT, ConcreteT> {
+ public:
+ using detail::CanDeserialize<ChannelT, WireT, ConcreteT>::value;
+
+ static_assert(value, "Missing deserializer for argument (Can't deserialize "
+ "the second template type argument of "
+ "CanDeserializeCheck from the first)");
+ };
+
+public:
+ /// Construct an RPC instance on a channel.
+ RPCBase(ChannelT &C, bool LazyAutoNegotiation)
+ : C(C), LazyAutoNegotiation(LazyAutoNegotiation) {
+ // Hold ResponseId in a special variable, since we expect Response to be
+ // called relatively frequently, and want to avoid the map lookup.
+ ResponseId = FnIdAllocator.getResponseId();
+ RemoteFunctionIds[OrcRPCResponse::getPrototype()] = ResponseId;
+
+ // Register the negotiate function id and handler.
+ auto NegotiateId = FnIdAllocator.getNegotiateId();
+ RemoteFunctionIds[OrcRPCNegotiate::getPrototype()] = NegotiateId;
+ Handlers[NegotiateId] = wrapHandler<OrcRPCNegotiate>(
+ [this](const std::string &Name) { return handleNegotiate(Name); },
+ LaunchPolicy());
+ }
+
+ /// Append a call Func, does not call send on the channel.
+ /// The first argument specifies a user-defined handler to be run when the
+ /// function returns. The handler should take an Expected<Func::ReturnType>,
+ /// or an Error (if Func::ReturnType is void). The handler will be called
+ /// with an error if the return value is abandoned due to a channel error.
+ template <typename Func, typename HandlerT, typename... ArgTs>
+ Error appendCallAsync(HandlerT Handler, const ArgTs &... Args) {
+
+ static_assert(
+ detail::RPCArgTypeCheck<CanSerializeCheck, typename Func::Type,
+ void(ArgTs...)>::value,
+ "");
+
+ // Look up the function ID.
+ FunctionIdT FnId;
+ if (auto FnIdOrErr = getRemoteFunctionId<Func>())
+ FnId = *FnIdOrErr;
+ else {
+ // This isn't a channel error so we don't want to abandon other pending
+ // responses, but we still need to run the user handler with an error to
+ // let them know the call failed.
+ if (auto Err = Handler(orcError(OrcErrorCode::UnknownRPCFunction)))
+ report_fatal_error(std::move(Err));
+ return FnIdOrErr.takeError();
+ }
+
+ // Allocate a sequence number.
+ auto SeqNo = SequenceNumberMgr.getSequenceNumber();
+ assert(!PendingResponses.count(SeqNo) &&
+ "Sequence number already allocated");
+
+ // Install the user handler.
+ PendingResponses[SeqNo] =
+ detail::createResponseHandler<ChannelT, typename Func::ReturnType>(
+ std::move(Handler));
+
+ // Open the function call message.
+ if (auto Err = C.startSendMessage(FnId, SeqNo)) {
+ abandonPendingResponses();
+ return joinErrors(std::move(Err), C.endSendMessage());
+ }
+
+ // Serialize the call arguments.
+ if (auto Err = detail::HandlerTraits<typename Func::Type>::serializeArgs(
+ C, Args...)) {
+ abandonPendingResponses();
+ return joinErrors(std::move(Err), C.endSendMessage());
+ }
+
+ // Close the function call messagee.
+ if (auto Err = C.endSendMessage()) {
+ abandonPendingResponses();
return std::move(Err);
- if (FuncId != Func::Id)
- return orcError(OrcErrorCode::UnexpectedRPCCall);
- return handle<Func>(C, Handler);
+ }
+
+ return Error::success();
}
- /// Helper version of expect for calling member functions.
- template <typename Func, typename ClassT, typename... ArgTs>
- static Error expect(ChannelT &C, ClassT &Instance,
- Error (ClassT::*HandlerMethod)(ArgTs...)) {
- return expect<Func>(
- C, MemberFnWrapper<ClassT, ArgTs...>(Instance, HandlerMethod));
+ Error sendAppendedCalls() { return C.send(); };
+
+ template <typename Func, typename HandlerT, typename... ArgTs>
+ Error callAsync(HandlerT Handler, const ArgTs &... Args) {
+ if (auto Err = appendCallAsync<Func>(std::move(Handler), Args...))
+ return Err;
+ return C.send();
+ }
+
+ /// Handle one incoming call.
+ Error handleOne() {
+ FunctionIdT FnId;
+ SequenceNumberT SeqNo;
+ if (auto Err = C.startReceiveMessage(FnId, SeqNo))
+ return Err;
+ if (FnId == ResponseId)
+ return handleResponse(SeqNo);
+ auto I = Handlers.find(FnId);
+ if (I != Handlers.end())
+ return I->second(C, SeqNo);
+
+ // else: No handler found. Report error to client?
+ return orcError(OrcErrorCode::UnexpectedRPCCall);
}
/// Helper for handling setter procedures - this method returns a functor that
@@ -535,159 +859,480 @@ public:
/// /* Handle Args */ ;
///
template <typename... ArgTs>
- static ReadArgs<ArgTs...> readArgs(ArgTs &... Args) {
- return ReadArgs<ArgTs...>(Args...);
+ static detail::ReadArgs<ArgTs...> readArgs(ArgTs &... Args) {
+ return detail::ReadArgs<ArgTs...>(Args...);
}
- /// Read a response from Channel.
- /// This should be called from the receive loop to retrieve results.
- Error handleResponse(ChannelT &C, SequenceNumberT *SeqNoRet = nullptr) {
- SequenceNumberT SeqNo;
- if (auto Err = deserialize(C, SeqNo)) {
- abandonOutstandingResults();
- return Err;
- }
+protected:
+ // The LaunchPolicy type allows a launch policy to be specified when adding
+ // a function handler. See addHandlerImpl.
+ using LaunchPolicy = std::function<Error(std::function<Error()>)>;
- if (SeqNoRet)
- *SeqNoRet = SeqNo;
+ /// Add the given handler to the handler map and make it available for
+ /// autonegotiation and execution.
+ template <typename Func, typename HandlerT>
+ void addHandlerImpl(HandlerT Handler, LaunchPolicy Launch) {
- auto I = OutstandingResults.find(SeqNo);
- if (I == OutstandingResults.end()) {
- abandonOutstandingResults();
+ static_assert(detail::RPCArgTypeCheck<
+ CanDeserializeCheck, typename Func::Type,
+ typename detail::HandlerTraits<HandlerT>::Type>::value,
+ "");
+
+ FunctionIdT NewFnId = FnIdAllocator.template allocate<Func>();
+ LocalFunctionIds[Func::getPrototype()] = NewFnId;
+ Handlers[NewFnId] =
+ wrapHandler<Func>(std::move(Handler), std::move(Launch));
+ }
+
+ // Abandon all outstanding results.
+ void abandonPendingResponses() {
+ for (auto &KV : PendingResponses)
+ KV.second->abandon();
+ PendingResponses.clear();
+ SequenceNumberMgr.reset();
+ }
+
+ Error handleResponse(SequenceNumberT SeqNo) {
+ auto I = PendingResponses.find(SeqNo);
+ if (I == PendingResponses.end()) {
+ abandonPendingResponses();
return orcError(OrcErrorCode::UnexpectedRPCResponse);
}
- if (auto Err = I->second->readResult(C)) {
- abandonOutstandingResults();
- // FIXME: Release sequence numbers?
+ auto PRHandler = std::move(I->second);
+ PendingResponses.erase(I);
+ SequenceNumberMgr.releaseSequenceNumber(SeqNo);
+
+ if (auto Err = PRHandler->handleResponse(C)) {
+ abandonPendingResponses();
+ SequenceNumberMgr.reset();
return Err;
}
- OutstandingResults.erase(I);
- SequenceNumberMgr.releaseSequenceNumber(SeqNo);
-
return Error::success();
}
- // Loop waiting for a result with the given sequence number.
- // This can be used as a receive loop if the user doesn't have a default.
- template <typename HandleOtherFtor>
- Error waitForResult(ChannelT &C, SequenceNumberT TgtSeqNo,
- HandleOtherFtor &HandleOther = handleNone) {
- bool GotTgtResult = false;
+ FunctionIdT handleNegotiate(const std::string &Name) {
+ auto I = LocalFunctionIds.find(Name);
+ if (I == LocalFunctionIds.end())
+ return FnIdAllocator.getInvalidId();
+ return I->second;
+ }
+
+ // Find the remote FunctionId for the given function, which must be in the
+ // RemoteFunctionIds map.
+ template <typename Func> Expected<FunctionIdT> getRemoteFunctionId() {
+ // Try to find the id for the given function.
+ auto I = RemoteFunctionIds.find(Func::getPrototype());
+
+ // If we have it in the map, return it.
+ if (I != RemoteFunctionIds.end())
+ return I->second;
+
+ // Otherwise, if we have auto-negotiation enabled, try to negotiate it.
+ if (LazyAutoNegotiation) {
+ auto &Impl = static_cast<ImplT &>(*this);
+ if (auto RemoteIdOrErr =
+ Impl.template callB<OrcRPCNegotiate>(Func::getPrototype())) {
+ auto &RemoteId = *RemoteIdOrErr;
+
+ // If autonegotiation indicates that the remote end doesn't support this
+ // function, return an unknown function error.
+ if (RemoteId == FnIdAllocator.getInvalidId())
+ return orcError(OrcErrorCode::UnknownRPCFunction);
+
+ // Autonegotiation succeeded and returned a valid id. Update the map and
+ // return the id.
+ RemoteFunctionIds[Func::getPrototype()] = RemoteId;
+ return RemoteId;
+ } else {
+ // Autonegotiation failed. Return the error.
+ return RemoteIdOrErr.takeError();
+ }
+ }
- while (!GotTgtResult) {
- FunctionIdT Id = RPCFunctionIdTraits<FunctionIdT>::InvalidId;
- if (auto Err = startReceivingFunction(C, Id))
+ // No key was available in the map and autonegotiation wasn't enabled.
+ // Return an unknown function error.
+ return orcError(OrcErrorCode::UnknownRPCFunction);
+ }
+
+ using WrappedHandlerFn = std::function<Error(ChannelT &, SequenceNumberT)>;
+
+ // Wrap the given user handler in the necessary argument-deserialization code,
+ // result-serialization code, and call to the launch policy (if present).
+ template <typename Func, typename HandlerT>
+ WrappedHandlerFn wrapHandler(HandlerT Handler, LaunchPolicy Launch) {
+ return [this, Handler, Launch](ChannelT &Channel,
+ SequenceNumberT SeqNo) mutable -> Error {
+ // Start by deserializing the arguments.
+ auto Args = std::make_shared<
+ typename detail::HandlerTraits<HandlerT>::ArgStorage>();
+ if (auto Err =
+ detail::HandlerTraits<typename Func::Type>::deserializeArgs(
+ Channel, *Args))
return Err;
- if (Id == RPCFunctionIdTraits<FunctionIdT>::ResponseId) {
- SequenceNumberT SeqNo;
- if (auto Err = handleResponse(C, &SeqNo))
- return Err;
- GotTgtResult = (SeqNo == TgtSeqNo);
- } else if (auto Err = HandleOther(C, Id))
+
+ // GCC 4.7 and 4.8 incorrectly issue a -Wunused-but-set-variable warning
+ // for RPCArgs. Void cast RPCArgs to work around this for now.
+ // FIXME: Remove this workaround once we can assume a working GCC version.
+ (void)Args;
+
+ // End receieve message, unlocking the channel for reading.
+ if (auto Err = Channel.endReceiveMessage())
return Err;
- }
- return Error::success();
+ // Build the handler/responder.
+ auto Responder = [this, Handler, Args, &Channel,
+ SeqNo]() mutable -> Error {
+ using HTraits = detail::HandlerTraits<HandlerT>;
+ using FuncReturn = typename Func::ReturnType;
+ return detail::respond<FuncReturn>(
+ Channel, ResponseId, SeqNo, HTraits::unpackAndRun(Handler, *Args));
+ };
+
+ // If there is an explicit launch policy then use it to launch the
+ // handler.
+ if (Launch)
+ return Launch(std::move(Responder));
+
+ // Otherwise run the handler on the listener thread.
+ return Responder();
+ };
}
- // Default handler for 'other' (non-response) functions when waiting for a
- // result from the channel.
- static Error handleNone(ChannelT &, FunctionIdT) {
- return orcError(OrcErrorCode::UnexpectedRPCCall);
- };
+ ChannelT &C;
+
+ bool LazyAutoNegotiation;
+
+ RPCFunctionIdAllocator<FunctionIdT> FnIdAllocator;
+
+ FunctionIdT ResponseId;
+ std::map<std::string, FunctionIdT> LocalFunctionIds;
+ std::map<const char *, FunctionIdT> RemoteFunctionIds;
+
+ std::map<FunctionIdT, WrappedHandlerFn> Handlers;
+ detail::SequenceNumberManager<SequenceNumberT> SequenceNumberMgr;
+ std::map<SequenceNumberT, std::unique_ptr<detail::ResponseHandler<ChannelT>>>
+ PendingResponses;
+};
+
+} // end namespace detail
+
+template <typename ChannelT, typename FunctionIdT = uint32_t,
+ typename SequenceNumberT = uint32_t>
+class MultiThreadedRPC
+ : public detail::RPCBase<
+ MultiThreadedRPC<ChannelT, FunctionIdT, SequenceNumberT>, ChannelT,
+ FunctionIdT, SequenceNumberT> {
private:
- // Manage sequence numbers.
- class SequenceNumberManager {
- public:
- SequenceNumberManager() = default;
+ using BaseClass =
+ detail::RPCBase<MultiThreadedRPC<ChannelT, FunctionIdT, SequenceNumberT>,
+ ChannelT, FunctionIdT, SequenceNumberT>;
- SequenceNumberManager(const SequenceNumberManager &) = delete;
- SequenceNumberManager &operator=(const SequenceNumberManager &) = delete;
+public:
+ MultiThreadedRPC(ChannelT &C, bool LazyAutoNegotiation)
+ : BaseClass(C, LazyAutoNegotiation) {}
- SequenceNumberManager(SequenceNumberManager &&Other)
- : NextSequenceNumber(std::move(Other.NextSequenceNumber)),
- FreeSequenceNumbers(std::move(Other.FreeSequenceNumbers)) {}
+ /// The LaunchPolicy type allows a launch policy to be specified when adding
+ /// a function handler. See addHandler.
+ using LaunchPolicy = typename BaseClass::LaunchPolicy;
- SequenceNumberManager &operator=(SequenceNumberManager &&Other) {
- NextSequenceNumber = std::move(Other.NextSequenceNumber);
- FreeSequenceNumbers = std::move(Other.FreeSequenceNumbers);
+ /// Add a handler for the given RPC function.
+ /// This installs the given handler functor for the given RPC Function, and
+ /// makes the RPC function available for negotiation/calling from the remote.
+ ///
+ /// The optional LaunchPolicy argument can be used to control how the handler
+ /// is run when called:
+ ///
+ /// * If no LaunchPolicy is given, the handler code will be run on the RPC
+ /// handler thread that is reading from the channel. This handler cannot
+ /// make blocking RPC calls (since it would be blocking the thread used to
+ /// get the result), but can make non-blocking calls.
+ ///
+ /// * If a LaunchPolicy is given, the user's handler will be wrapped in a
+ /// call to serialize and send the result, and the resulting functor (with
+ /// type 'Error()' will be passed to the LaunchPolicy. The user can then
+ /// choose to add the wrapped handler to a work queue, spawn a new thread,
+ /// or anything else.
+ template <typename Func, typename HandlerT>
+ void addHandler(HandlerT Handler, LaunchPolicy Launch = LaunchPolicy()) {
+ return this->template addHandlerImpl<Func>(std::move(Handler),
+ std::move(Launch));
+ }
+
+ /// Negotiate a function id for Func with the other end of the channel.
+ template <typename Func> Error negotiateFunction() {
+ using OrcRPCNegotiate = typename BaseClass::OrcRPCNegotiate;
+
+ if (auto RemoteIdOrErr = callB<OrcRPCNegotiate>(Func::getPrototype())) {
+ this->RemoteFunctionIds[Func::getPrototype()] = *RemoteIdOrErr;
+ return Error::success();
+ } else
+ return RemoteIdOrErr.takeError();
+ }
+
+ /// Convenience method for negotiating multiple functions at once.
+ template <typename Func> Error negotiateFunctions() {
+ return negotiateFunction<Func>();
+ }
+
+ /// Convenience method for negotiating multiple functions at once.
+ template <typename Func1, typename Func2, typename... Funcs>
+ Error negotiateFunctions() {
+ if (auto Err = negotiateFunction<Func1>())
+ return Err;
+ return negotiateFunctions<Func2, Funcs...>();
+ }
+
+ /// Return type for non-blocking call primitives.
+ template <typename Func>
+ using NonBlockingCallResult = typename detail::ResultTraits<
+ typename Func::ReturnType>::ReturnFutureType;
+
+ /// Call Func on Channel C. Does not block, does not call send. Returns a pair
+ /// of a future result and the sequence number assigned to the result.
+ ///
+ /// This utility function is primarily used for single-threaded mode support,
+ /// where the sequence number can be used to wait for the corresponding
+ /// result. In multi-threaded mode the appendCallNB method, which does not
+ /// return the sequence numeber, should be preferred.
+ template <typename Func, typename... ArgTs>
+ Expected<NonBlockingCallResult<Func>> appendCallNB(const ArgTs &... Args) {
+ using RTraits = detail::ResultTraits<typename Func::ReturnType>;
+ using ErrorReturn = typename RTraits::ErrorReturnType;
+ using ErrorReturnPromise = typename RTraits::ReturnPromiseType;
+
+ // FIXME: Stack allocate and move this into the handler once LLVM builds
+ // with C++14.
+ auto Promise = std::make_shared<ErrorReturnPromise>();
+ auto FutureResult = Promise->get_future();
+
+ if (auto Err = this->template appendCallAsync<Func>(
+ [Promise](ErrorReturn RetOrErr) {
+ Promise->set_value(std::move(RetOrErr));
+ return Error::success();
+ },
+ Args...)) {
+ this->abandonPendingResponses();
+ RTraits::consumeAbandoned(FutureResult.get());
+ return std::move(Err);
}
+ return std::move(FutureResult);
+ }
- void reset() {
- std::lock_guard<std::mutex> Lock(SeqNoLock);
- NextSequenceNumber = 0;
- FreeSequenceNumbers.clear();
+ /// The same as appendCallNBWithSeq, except that it calls C.send() to
+ /// flush the channel after serializing the call.
+ template <typename Func, typename... ArgTs>
+ Expected<NonBlockingCallResult<Func>> callNB(const ArgTs &... Args) {
+ auto Result = appendCallNB<Func>(Args...);
+ if (!Result)
+ return Result;
+ if (auto Err = this->C.send()) {
+ this->abandonPendingResponses();
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(Result->get()));
+ return std::move(Err);
}
+ return Result;
+ }
- SequenceNumberT getSequenceNumber() {
- std::lock_guard<std::mutex> Lock(SeqNoLock);
- if (FreeSequenceNumbers.empty())
- return NextSequenceNumber++;
- auto SequenceNumber = FreeSequenceNumbers.back();
- FreeSequenceNumbers.pop_back();
- return SequenceNumber;
+ /// Call Func on Channel C. Blocks waiting for a result. Returns an Error
+ /// for void functions or an Expected<T> for functions returning a T.
+ ///
+ /// This function is for use in threaded code where another thread is
+ /// handling responses and incoming calls.
+ template <typename Func, typename... ArgTs,
+ typename AltRetT = typename Func::ReturnType>
+ typename detail::ResultTraits<AltRetT>::ErrorReturnType
+ callB(const ArgTs &... Args) {
+ if (auto FutureResOrErr = callNB<Func>(Args...)) {
+ if (auto Err = this->C.send()) {
+ this->abandonPendingResponses();
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(FutureResOrErr->get()));
+ return std::move(Err);
+ }
+ return FutureResOrErr->get();
+ } else
+ return FutureResOrErr.takeError();
+ }
+
+ /// Handle incoming RPC calls.
+ Error handlerLoop() {
+ while (true)
+ if (auto Err = this->handleOne())
+ return Err;
+ return Error::success();
+ }
+};
+
+template <typename ChannelT, typename FunctionIdT = uint32_t,
+ typename SequenceNumberT = uint32_t>
+class SingleThreadedRPC
+ : public detail::RPCBase<
+ SingleThreadedRPC<ChannelT, FunctionIdT, SequenceNumberT>, ChannelT,
+ FunctionIdT, SequenceNumberT> {
+private:
+ using BaseClass =
+ detail::RPCBase<SingleThreadedRPC<ChannelT, FunctionIdT, SequenceNumberT>,
+ ChannelT, FunctionIdT, SequenceNumberT>;
+
+ using LaunchPolicy = typename BaseClass::LaunchPolicy;
+
+public:
+ SingleThreadedRPC(ChannelT &C, bool LazyAutoNegotiation)
+ : BaseClass(C, LazyAutoNegotiation) {}
+
+ template <typename Func, typename HandlerT>
+ void addHandler(HandlerT Handler) {
+ return this->template addHandlerImpl<Func>(std::move(Handler),
+ LaunchPolicy());
+ }
+
+ template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+ void addHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+ addHandler<Func>(
+ detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+ }
+
+ /// Negotiate a function id for Func with the other end of the channel.
+ template <typename Func> Error negotiateFunction() {
+ using OrcRPCNegotiate = typename BaseClass::OrcRPCNegotiate;
+
+ if (auto RemoteIdOrErr = callB<OrcRPCNegotiate>(Func::getPrototype())) {
+ this->RemoteFunctionIds[Func::getPrototype()] = *RemoteIdOrErr;
+ return Error::success();
+ } else
+ return RemoteIdOrErr.takeError();
+ }
+
+ /// Convenience method for negotiating multiple functions at once.
+ template <typename Func> Error negotiateFunctions() {
+ return negotiateFunction<Func>();
+ }
+
+ /// Convenience method for negotiating multiple functions at once.
+ template <typename Func1, typename Func2, typename... Funcs>
+ Error negotiateFunctions() {
+ if (auto Err = negotiateFunction<Func1>())
+ return Err;
+ return negotiateFunctions<Func2, Funcs...>();
+ }
+
+ template <typename Func, typename... ArgTs,
+ typename AltRetT = typename Func::ReturnType>
+ typename detail::ResultTraits<AltRetT>::ErrorReturnType
+ callB(const ArgTs &... Args) {
+ bool ReceivedResponse = false;
+ using ResultType = typename detail::ResultTraits<AltRetT>::ErrorReturnType;
+ auto Result = detail::ResultTraits<AltRetT>::createBlankErrorReturnValue();
+
+ // We have to 'Check' result (which we know is in a success state at this
+ // point) so that it can be overwritten in the async handler.
+ (void)!!Result;
+
+ if (auto Err = this->template appendCallAsync<Func>(
+ [&](ResultType R) {
+ Result = std::move(R);
+ ReceivedResponse = true;
+ return Error::success();
+ },
+ Args...)) {
+ this->abandonPendingResponses();
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(Result));
+ return std::move(Err);
}
- void releaseSequenceNumber(SequenceNumberT SequenceNumber) {
- std::lock_guard<std::mutex> Lock(SeqNoLock);
- FreeSequenceNumbers.push_back(SequenceNumber);
+ while (!ReceivedResponse) {
+ if (auto Err = this->handleOne()) {
+ this->abandonPendingResponses();
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(Result));
+ return std::move(Err);
+ }
}
- private:
- std::mutex SeqNoLock;
- SequenceNumberT NextSequenceNumber = 0;
- std::vector<SequenceNumberT> FreeSequenceNumbers;
- };
+ return Result;
+ }
+};
- // Base class for results that haven't been returned from the other end of the
- // RPC connection yet.
- class OutstandingResult {
- public:
- virtual ~OutstandingResult() {}
- virtual Error readResult(ChannelT &C) = 0;
- virtual void abandon() = 0;
- };
+/// \brief Allows a set of asynchrounous calls to be dispatched, and then
+/// waited on as a group.
+template <typename RPCClass> class ParallelCallGroup {
+public:
- // Outstanding results for a specific function.
- template <typename Func>
- class OutstandingResultImpl : public OutstandingResult {
- private:
- public:
- OutstandingResultImpl(std::promise<typename Func::OptionalReturn> &&P)
- : P(std::move(P)) {}
+ /// \brief Construct a parallel call group for the given RPC.
+ ParallelCallGroup(RPCClass &RPC) : RPC(RPC), NumOutstandingCalls(0) {}
+
+ ParallelCallGroup(const ParallelCallGroup &) = delete;
+ ParallelCallGroup &operator=(const ParallelCallGroup &) = delete;
- Error readResult(ChannelT &C) override { return Func::readResult(C, P); }
+ /// \brief Make as asynchronous call.
+ ///
+ /// Does not issue a send call to the RPC's channel. The channel may use this
+ /// to batch up subsequent calls. A send will automatically be sent when wait
+ /// is called.
+ template <typename Func, typename HandlerT, typename... ArgTs>
+ Error appendCall(HandlerT Handler, const ArgTs &... Args) {
+ // Increment the count of outstanding calls. This has to happen before
+ // we invoke the call, as the handler may (depending on scheduling)
+ // be run immediately on another thread, and we don't want the decrement
+ // in the wrapped handler below to run before the increment.
+ {
+ std::unique_lock<std::mutex> Lock(M);
+ ++NumOutstandingCalls;
+ }
- void abandon() override { Func::abandon(P); }
+ // Wrap the user handler in a lambda that will decrement the
+ // outstanding calls count, then poke the condition variable.
+ using ArgType = typename detail::ResponseHandlerArg<
+ typename detail::HandlerTraits<HandlerT>::Type>::ArgType;
+ // FIXME: Move handler into wrapped handler once we have C++14.
+ auto WrappedHandler = [this, Handler](ArgType Arg) {
+ auto Err = Handler(std::move(Arg));
+ std::unique_lock<std::mutex> Lock(M);
+ --NumOutstandingCalls;
+ CV.notify_all();
+ return Err;
+ };
- private:
- std::promise<typename Func::OptionalReturn> P;
- };
+ return RPC.template appendCallAsync<Func>(std::move(WrappedHandler),
+ Args...);
+ }
- // Create an outstanding result for the given function.
- template <typename Func>
- std::unique_ptr<OutstandingResult>
- createOutstandingResult(std::promise<typename Func::OptionalReturn> &&P) {
- return llvm::make_unique<OutstandingResultImpl<Func>>(std::move(P));
+ /// \brief Make an asynchronous call.
+ ///
+ /// The same as appendCall, but also calls send on the channel immediately.
+ /// Prefer appendCall if you are about to issue a "wait" call shortly, as
+ /// this may allow the channel to better batch the calls.
+ template <typename Func, typename HandlerT, typename... ArgTs>
+ Error call(HandlerT Handler, const ArgTs &... Args) {
+ if (auto Err = appendCall(std::move(Handler), Args...))
+ return Err;
+ return RPC.sendAppendedCalls();
}
- // Abandon all outstanding results.
- void abandonOutstandingResults() {
- for (auto &KV : OutstandingResults)
- KV.second->abandon();
- OutstandingResults.clear();
- SequenceNumberMgr.reset();
+ /// \brief Blocks until all calls have been completed and their return value
+ /// handlers run.
+ Error wait() {
+ if (auto Err = RPC.sendAppendedCalls())
+ return Err;
+ std::unique_lock<std::mutex> Lock(M);
+ while (NumOutstandingCalls > 0)
+ CV.wait(Lock);
+ return Error::success();
}
- SequenceNumberManager SequenceNumberMgr;
- std::map<SequenceNumberT, std::unique_ptr<OutstandingResult>>
- OutstandingResults;
+private:
+ RPCClass &RPC;
+ std::mutex M;
+ std::condition_variable CV;
+ uint32_t NumOutstandingCalls;
};
-} // end namespace remote
+} // end namespace rpc
} // end namespace orc
} // end namespace llvm
diff --git a/include/llvm/ExecutionEngine/Orc/RawByteChannel.h b/include/llvm/ExecutionEngine/Orc/RawByteChannel.h
new file mode 100644
index 000000000000..43b597de000f
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/RawByteChannel.h
@@ -0,0 +1,175 @@
+//===- llvm/ExecutionEngine/Orc/RawByteChannel.h ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
+#define LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
+
+#include "OrcError.h"
+#include "RPCSerialization.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstddef>
+#include <cstdint>
+#include <mutex>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+/// Interface for byte-streams to be used with RPC.
+class RawByteChannel {
+public:
+ virtual ~RawByteChannel() {}
+
+ /// Read Size bytes from the stream into *Dst.
+ virtual Error readBytes(char *Dst, unsigned Size) = 0;
+
+ /// Read size bytes from *Src and append them to the stream.
+ virtual Error appendBytes(const char *Src, unsigned Size) = 0;
+
+ /// Flush the stream if possible.
+ virtual Error send() = 0;
+
+ /// Notify the channel that we're starting a message send.
+ /// Locks the channel for writing.
+ template <typename FunctionIdT, typename SequenceIdT>
+ Error startSendMessage(const FunctionIdT &FnId, const SequenceIdT &SeqNo) {
+ if (auto Err = serializeSeq(*this, FnId, SeqNo))
+ return Err;
+ writeLock.lock();
+ return Error::success();
+ }
+
+ /// Notify the channel that we're ending a message send.
+ /// Unlocks the channel for writing.
+ Error endSendMessage() {
+ writeLock.unlock();
+ return Error::success();
+ }
+
+ /// Notify the channel that we're starting a message receive.
+ /// Locks the channel for reading.
+ template <typename FunctionIdT, typename SequenceNumberT>
+ Error startReceiveMessage(FunctionIdT &FnId, SequenceNumberT &SeqNo) {
+ readLock.lock();
+ return deserializeSeq(*this, FnId, SeqNo);
+ }
+
+ /// Notify the channel that we're ending a message receive.
+ /// Unlocks the channel for reading.
+ Error endReceiveMessage() {
+ readLock.unlock();
+ return Error::success();
+ }
+
+ /// Get the lock for stream reading.
+ std::mutex &getReadLock() { return readLock; }
+
+ /// Get the lock for stream writing.
+ std::mutex &getWriteLock() { return writeLock; }
+
+private:
+ std::mutex readLock, writeLock;
+};
+
+template <typename ChannelT, typename T>
+class SerializationTraits<
+ ChannelT, T, T,
+ typename std::enable_if<
+ std::is_base_of<RawByteChannel, ChannelT>::value &&
+ (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
+ std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
+ std::is_same<T, uint32_t>::value || std::is_same<T, int32_t>::value ||
+ std::is_same<T, uint64_t>::value || std::is_same<T, int64_t>::value ||
+ std::is_same<T, char>::value)>::type> {
+public:
+ static Error serialize(ChannelT &C, T V) {
+ support::endian::byte_swap<T, support::big>(V);
+ return C.appendBytes(reinterpret_cast<const char *>(&V), sizeof(T));
+ };
+
+ static Error deserialize(ChannelT &C, T &V) {
+ if (auto Err = C.readBytes(reinterpret_cast<char *>(&V), sizeof(T)))
+ return Err;
+ support::endian::byte_swap<T, support::big>(V);
+ return Error::success();
+ };
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, bool, bool,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ static Error serialize(ChannelT &C, bool V) {
+ return C.appendBytes(reinterpret_cast<const char *>(&V), 1);
+ }
+
+ static Error deserialize(ChannelT &C, bool &V) {
+ return C.readBytes(reinterpret_cast<char *>(&V), 1);
+ }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, StringRef,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ /// RPC channel serialization for std::strings.
+ static Error serialize(RawByteChannel &C, StringRef S) {
+ if (auto Err = serializeSeq(C, static_cast<uint64_t>(S.size())))
+ return Err;
+ return C.appendBytes((const char *)S.data(), S.size());
+ }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, const char *,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ static Error serialize(RawByteChannel &C, const char *S) {
+ return SerializationTraits<ChannelT, std::string, StringRef>::serialize(C,
+ S);
+ }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, std::string,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ /// RPC channel serialization for std::strings.
+ static Error serialize(RawByteChannel &C, const std::string &S) {
+ return SerializationTraits<ChannelT, std::string, StringRef>::serialize(C,
+ S);
+ }
+
+ /// RPC channel deserialization for std::strings.
+ static Error deserialize(RawByteChannel &C, std::string &S) {
+ uint64_t Count = 0;
+ if (auto Err = deserializeSeq(C, Count))
+ return Err;
+ S.resize(Count);
+ return C.readBytes(&S[0], Count);
+ }
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
diff --git a/include/llvm/ExecutionEngine/RTDyldMemoryManager.h b/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
index 9451fa57c0f6..5638717790bb 100644
--- a/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
+++ b/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
@@ -14,22 +14,24 @@
#ifndef LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
-#include "RuntimeDyld.h"
-#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/Support/CBindingWrapping.h"
-#include "llvm/Support/Memory.h"
+#include "llvm-c/ExecutionEngine.h"
+#include <cstddef>
+#include <cstdint>
+#include <string>
namespace llvm {
class ExecutionEngine;
- namespace object {
- class ObjectFile;
- }
+namespace object {
+ class ObjectFile;
+} // end namespace object
class MCJITMemoryManager : public RuntimeDyld::MemoryManager {
public:
-
// Don't hide the notifyObjectLoaded method from RuntimeDyld::MemoryManager.
using RuntimeDyld::MemoryManager::notifyObjectLoaded;
@@ -54,11 +56,11 @@ public:
// FIXME: As the RuntimeDyld fills out, additional routines will be needed
// for the varying types of objects to be allocated.
class RTDyldMemoryManager : public MCJITMemoryManager,
- public RuntimeDyld::SymbolResolver {
+ public JITSymbolResolver {
+public:
+ RTDyldMemoryManager() = default;
RTDyldMemoryManager(const RTDyldMemoryManager&) = delete;
void operator=(const RTDyldMemoryManager&) = delete;
-public:
- RTDyldMemoryManager() {}
~RTDyldMemoryManager() override;
/// Register EH frames in the current process.
@@ -98,9 +100,8 @@ public:
/// Clients writing custom RTDyldMemoryManagers are encouraged to override
/// this method and return a SymbolInfo with the flags set correctly. This is
/// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
- RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) override {
- return RuntimeDyld::SymbolInfo(getSymbolAddress(Name),
- JITSymbolFlags::Exported);
+ JITSymbol findSymbol(const std::string &Name) override {
+ return JITSymbol(getSymbolAddress(Name), JITSymbolFlags::Exported);
}
/// Legacy symbol lookup -- DEPRECATED! Please override
@@ -121,10 +122,10 @@ public:
/// Clients writing custom RTDyldMemoryManagers are encouraged to override
/// this method and return a SymbolInfo with the flags set correctly. This is
/// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
- RuntimeDyld::SymbolInfo
+ JITSymbol
findSymbolInLogicalDylib(const std::string &Name) override {
- return RuntimeDyld::SymbolInfo(getSymbolAddressInLogicalDylib(Name),
- JITSymbolFlags::Exported);
+ return JITSymbol(getSymbolAddressInLogicalDylib(Name),
+ JITSymbolFlags::Exported);
}
/// This method returns the address of the specified function. As such it is
@@ -144,7 +145,6 @@ public:
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(
RTDyldMemoryManager, LLVMMCJITMemoryManagerRef)
-} // namespace llvm
-
+} // end namespace llvm
-#endif
+#endif // LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
diff --git a/include/llvm/ExecutionEngine/RuntimeDyld.h b/include/llvm/ExecutionEngine/RuntimeDyld.h
index bd485de91bd4..13a5f9922c51 100644
--- a/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -14,33 +14,39 @@
#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
#define LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
-#include "JITSymbolFlags.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Object/ObjectFile.h"
-#include "llvm/Support/Memory.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <map>
#include <memory>
-#include <utility>
+#include <string>
+#include <system_error>
namespace llvm {
-class StringRef;
-
namespace object {
- class ObjectFile;
template <typename T> class OwningBinary;
-}
+} // end namespace object
/// Base class for errors originating in RuntimeDyld, e.g. missing relocation
/// support.
class RuntimeDyldError : public ErrorInfo<RuntimeDyldError> {
public:
static char ID;
+
RuntimeDyldError(std::string ErrMsg) : ErrMsg(std::move(ErrMsg)) {}
+
void log(raw_ostream &OS) const override;
const std::string &getErrorMessage() const { return ErrMsg; }
std::error_code convertToErrorCode() const override;
+
private:
std::string ErrMsg;
};
@@ -51,30 +57,16 @@ class RuntimeDyldCheckerImpl;
class RuntimeDyld {
friend class RuntimeDyldCheckerImpl;
- RuntimeDyld(const RuntimeDyld &) = delete;
- void operator=(const RuntimeDyld &) = delete;
-
protected:
// Change the address associated with a section when resolving relocations.
// Any relocations already associated with the symbol will be re-resolved.
void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
-public:
-
- /// \brief Information about a named symbol.
- class SymbolInfo : public JITSymbolBase {
- public:
- SymbolInfo(std::nullptr_t) : JITSymbolBase(JITSymbolFlags::None), Address(0) {}
- SymbolInfo(uint64_t Address, JITSymbolFlags Flags)
- : JITSymbolBase(Flags), Address(Address) {}
- explicit operator bool() const { return Address != 0; }
- uint64_t getAddress() const { return Address; }
- private:
- uint64_t Address;
- };
+public:
/// \brief Information about the loaded object.
class LoadedObjectInfo : public llvm::LoadedObjectInfo {
friend class RuntimeDyldImpl;
+
public:
typedef std::map<object::SectionRef, unsigned> ObjSectionToIDMap;
@@ -103,6 +95,7 @@ public:
LoadedObjectInfoHelper(RuntimeDyldImpl &RTDyld,
LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
: LoadedObjectInfo(RTDyld, std::move(ObjSecToIDMap)) {}
+
std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
}
@@ -111,9 +104,10 @@ public:
/// \brief Memory Management.
class MemoryManager {
friend class RuntimeDyld;
+
public:
- MemoryManager() : FinalizationLocked(false) {}
- virtual ~MemoryManager() {}
+ MemoryManager() = default;
+ virtual ~MemoryManager() = default;
/// Allocate a memory block of (at least) the given size suitable for
/// executable code. The SectionID is a unique identifier assigned by the
@@ -186,42 +180,14 @@ public:
private:
virtual void anchor();
- bool FinalizationLocked;
- };
-
- /// \brief Symbol resolution.
- class SymbolResolver {
- public:
- virtual ~SymbolResolver() {}
-
- /// This method returns the address of the specified symbol if it exists
- /// within the logical dynamic library represented by this
- /// RTDyldMemoryManager. Unlike findSymbol, queries through this
- /// interface should return addresses for hidden symbols.
- ///
- /// This is of particular importance for the Orc JIT APIs, which support lazy
- /// compilation by breaking up modules: Each of those broken out modules
- /// must be able to resolve hidden symbols provided by the others. Clients
- /// writing memory managers for MCJIT can usually ignore this method.
- ///
- /// This method will be queried by RuntimeDyld when checking for previous
- /// definitions of common symbols.
- virtual SymbolInfo findSymbolInLogicalDylib(const std::string &Name) = 0;
-
- /// This method returns the address of the specified function or variable.
- /// It is used to resolve symbols during module linking.
- ///
- /// If the returned symbol's address is equal to ~0ULL then RuntimeDyld will
- /// skip all relocations for that symbol, and the client will be responsible
- /// for handling them manually.
- virtual SymbolInfo findSymbol(const std::string &Name) = 0;
- private:
- virtual void anchor();
+ bool FinalizationLocked = false;
};
/// \brief Construct a RuntimeDyld instance.
- RuntimeDyld(MemoryManager &MemMgr, SymbolResolver &Resolver);
+ RuntimeDyld(MemoryManager &MemMgr, JITSymbolResolver &Resolver);
+ RuntimeDyld(const RuntimeDyld &) = delete;
+ void operator=(const RuntimeDyld &) = delete;
~RuntimeDyld();
/// Add the referenced object file to the list of objects to be loaded and
@@ -235,7 +201,7 @@ public:
/// Get the target address and flags for the named symbol.
/// This address is the one used for relocation.
- SymbolInfo getSymbol(StringRef Name) const;
+ JITEvaluatedSymbol getSymbol(StringRef Name) const;
/// Resolve the relocations for all symbols we currently know about.
void resolveRelocations();
@@ -295,7 +261,7 @@ private:
// interface.
std::unique_ptr<RuntimeDyldImpl> Dyld;
MemoryManager &MemMgr;
- SymbolResolver &Resolver;
+ JITSymbolResolver &Resolver;
bool ProcessAllSections;
RuntimeDyldCheckerImpl *Checker;
};
diff --git a/include/llvm/ExecutionEngine/SectionMemoryManager.h b/include/llvm/ExecutionEngine/SectionMemoryManager.h
index 7bb96eb8b71b..3b2af11cdaf4 100644
--- a/include/llvm/ExecutionEngine/SectionMemoryManager.h
+++ b/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -16,11 +16,15 @@
#define LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Memory.h"
+#include <cstdint>
+#include <string>
+#include <system_error>
namespace llvm {
+
/// This is a simple memory manager which implements the methods called by
/// the RuntimeDyld class to allocate memory for section-based loading of
/// objects, usually those generated by the MCJIT execution engine.
@@ -35,11 +39,10 @@ namespace llvm {
/// MCJIT::finalizeObject or by calling SectionMemoryManager::finalizeMemory
/// directly. Clients of MCJIT should call MCJIT::finalizeObject.
class SectionMemoryManager : public RTDyldMemoryManager {
+public:
+ SectionMemoryManager() = default;
SectionMemoryManager(const SectionMemoryManager&) = delete;
void operator=(const SectionMemoryManager&) = delete;
-
-public:
- SectionMemoryManager() { }
~SectionMemoryManager() override;
/// \brief Allocates a memory block of (at least) the given size suitable for
@@ -118,6 +121,6 @@ private:
MemoryGroup RODataMem;
};
-}
+} // end namespace llvm
#endif // LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index 5ef03715b9a9..15783858dd32 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -68,7 +68,7 @@ public:
// IR-Level Attributes
None, ///< No attributes have been set
#define GET_ATTR_ENUM
- #include "llvm/IR/Attributes.inc"
+ #include "llvm/IR/Attributes.gen"
EndAttrKinds ///< Sentinal value useful for loops
};
@@ -338,6 +338,10 @@ public:
/// may be faster.
bool hasFnAttribute(Attribute::AttrKind Kind) const;
+ /// \brief Equivalent to hasAttribute(AttributeSet::FunctionIndex, Kind) but
+ /// may be faster.
+ bool hasFnAttribute(StringRef Kind) const;
+
/// \brief Return true if the specified attribute is set for at least one
/// parameter or for the return value. If Index is not nullptr, the index
/// of a parameter with the specified attribute is provided.
@@ -387,9 +391,6 @@ public:
// AttributeSet Introspection
//===--------------------------------------------------------------------===//
- // FIXME: Remove this.
- uint64_t Raw(unsigned Index) const;
-
/// \brief Return a raw pointer that uniquely identifies this attribute list.
void *getRawPointer() const {
return pImpl;
@@ -454,11 +455,6 @@ public:
AttrBuilder()
: Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
DerefOrNullBytes(0), AllocSizeArgs(0) {}
- explicit AttrBuilder(uint64_t Val)
- : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
- DerefOrNullBytes(0), AllocSizeArgs(0) {
- addRawValue(Val);
- }
AttrBuilder(const Attribute &A)
: Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
DerefOrNullBytes(0), AllocSizeArgs(0) {
@@ -586,11 +582,6 @@ public:
bool operator!=(const AttrBuilder &B) {
return !(*this == B);
}
-
- // FIXME: Remove this in 4.0.
-
- /// \brief Add the raw value to the internal representation.
- AttrBuilder &addRawValue(uint64_t Val);
};
namespace AttributeFuncs {
diff --git a/include/llvm/IR/AutoUpgrade.h b/include/llvm/IR/AutoUpgrade.h
index 9eb358682c65..b42a3d3ad955 100644
--- a/include/llvm/IR/AutoUpgrade.h
+++ b/include/llvm/IR/AutoUpgrade.h
@@ -51,9 +51,10 @@ namespace llvm {
/// module is modified.
bool UpgradeModuleFlags(Module &M);
- /// If the TBAA tag for the given instruction uses the scalar TBAA format,
- /// we upgrade it to the struct-path aware TBAA format.
- void UpgradeInstWithTBAATag(Instruction *I);
+ /// If the given TBAA tag uses the scalar TBAA format, create a new node
+ /// corresponding to the upgrade to the struct-path aware TBAA format.
+ /// Otherwise return the \p TBAANode itself.
+ MDNode *UpgradeTBAANode(MDNode &TBAANode);
/// This is an auto-upgrade for bitcast between pointers with different
/// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr.
diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h
index e7daf6ee238e..93dbd573ee16 100644
--- a/include/llvm/IR/BasicBlock.h
+++ b/include/llvm/IR/BasicBlock.h
@@ -14,25 +14,24 @@
#ifndef LLVM_IR_BASICBLOCK_H
#define LLVM_IR_BASICBLOCK_H
-#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/CBindingWrapping.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm-c/Types.h"
+#include <cassert>
+#include <cstddef>
namespace llvm {
class CallInst;
+class Function;
class LandingPadInst;
-class TerminatorInst;
class LLVMContext;
-class BlockAddress;
-class Function;
-
-template <>
-struct SymbolTableListSentinelTraits<BasicBlock>
- : public ilist_half_embedded_sentinel_traits<BasicBlock> {};
+class TerminatorInst;
/// \brief LLVM Basic Block Representation
///
@@ -51,19 +50,17 @@ struct SymbolTableListSentinelTraits<BasicBlock>
/// are "well formed".
class BasicBlock : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
- friend class BlockAddress;
public:
typedef SymbolTableList<Instruction> InstListType;
private:
+ friend class BlockAddress;
+ friend class SymbolTableListTraits<BasicBlock>;
+
InstListType InstList;
Function *Parent;
void setParent(Function *parent);
- friend class SymbolTableListTraits<BasicBlock>;
-
- BasicBlock(const BasicBlock &) = delete;
- void operator=(const BasicBlock &) = delete;
/// \brief Constructor.
///
@@ -73,7 +70,12 @@ private:
explicit BasicBlock(LLVMContext &C, const Twine &Name = "",
Function *Parent = nullptr,
BasicBlock *InsertBefore = nullptr);
+
public:
+ BasicBlock(const BasicBlock &) = delete;
+ BasicBlock &operator=(const BasicBlock &) = delete;
+ ~BasicBlock() override;
+
/// \brief Get the context in which this basic block lives.
LLVMContext &getContext() const;
@@ -93,7 +95,6 @@ public:
BasicBlock *InsertBefore = nullptr) {
return new BasicBlock(Context, Name, Parent, InsertBefore);
}
- ~BasicBlock() override;
/// \brief Return the enclosing method, or null if none.
const Function *getParent() const { return Parent; }
@@ -334,6 +335,7 @@ private:
assert((int)(signed char)getSubclassDataFromValue() >= 0 &&
"Refcount wrap-around");
}
+
/// \brief Shadow Value::setValueSubclassData with a private forwarding method
/// so that any future subclasses cannot accidentally use it.
void setValueSubclassData(unsigned short D) {
@@ -344,6 +346,6 @@ private:
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef)
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_BASICBLOCK_H
diff --git a/include/llvm/IR/CFG.h b/include/llvm/IR/CFG.h
index a256b5960bb3..52de11a06baf 100644
--- a/include/llvm/IR/CFG.h
+++ b/include/llvm/IR/CFG.h
@@ -16,9 +16,17 @@
#define LLVM_IR_CFG_H
#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
namespace llvm {
@@ -44,7 +52,7 @@ public:
typedef typename super::pointer pointer;
typedef typename super::reference reference;
- PredIterator() {}
+ PredIterator() = default;
explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
advancePastNonTerminators();
}
@@ -85,8 +93,8 @@ public:
typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
typedef PredIterator<const BasicBlock,
Value::const_user_iterator> const_pred_iterator;
-typedef llvm::iterator_range<pred_iterator> pred_range;
-typedef llvm::iterator_range<const_pred_iterator> pred_const_range;
+typedef iterator_range<pred_iterator> pred_range;
+typedef iterator_range<const_pred_iterator> pred_const_range;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
@@ -114,8 +122,8 @@ typedef TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>
succ_iterator;
typedef TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>
succ_const_iterator;
-typedef llvm::iterator_range<succ_iterator> succ_range;
-typedef llvm::iterator_range<succ_const_iterator> succ_const_range;
+typedef iterator_range<succ_iterator> succ_range;
+typedef iterator_range<succ_const_iterator> succ_const_range;
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
@@ -144,8 +152,6 @@ struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
static const bool value = isPodLike<T>::value;
};
-
-
//===--------------------------------------------------------------------===//
// GraphTraits specializations for basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
@@ -154,32 +160,22 @@ struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
// graph of basic blocks...
template <> struct GraphTraits<BasicBlock*> {
- typedef BasicBlock NodeType;
typedef BasicBlock *NodeRef;
typedef succ_iterator ChildIteratorType;
- static NodeType *getEntryNode(BasicBlock *BB) { return BB; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return succ_begin(N);
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return succ_end(N);
- }
+ static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
+ static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
+ static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};
template <> struct GraphTraits<const BasicBlock*> {
- typedef const BasicBlock NodeType;
typedef const BasicBlock *NodeRef;
typedef succ_const_iterator ChildIteratorType;
- static NodeType *getEntryNode(const BasicBlock *BB) { return BB; }
+ static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return succ_begin(N);
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return succ_end(N);
- }
+ static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
+ static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};
// Provide specializations of GraphTraits to be able to treat a function as a
@@ -187,36 +183,22 @@ template <> struct GraphTraits<const BasicBlock*> {
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
-template <> struct GraphTraits<Inverse<BasicBlock*> > {
- typedef BasicBlock NodeType;
+template <> struct GraphTraits<Inverse<BasicBlock*>> {
typedef BasicBlock *NodeRef;
typedef pred_iterator ChildIteratorType;
- static NodeType *getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return pred_begin(N);
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return pred_end(N);
- }
+ static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
+ static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
+ static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
-template <> struct GraphTraits<Inverse<const BasicBlock*> > {
- typedef const BasicBlock NodeType;
+template <> struct GraphTraits<Inverse<const BasicBlock*>> {
typedef const BasicBlock *NodeRef;
typedef const_pred_iterator ChildIteratorType;
- static NodeType *getEntryNode(Inverse<const BasicBlock*> G) {
- return G.Graph;
- }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return pred_begin(N);
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return pred_end(N);
- }
+ static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
+ static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
+ static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
-
-
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
@@ -226,44 +208,57 @@ template <> struct GraphTraits<Inverse<const BasicBlock*> > {
// except that the root node is implicitly the first node of the function.
//
template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
- static NodeType *getEntryNode(Function *F) { return &F->getEntryBlock(); }
+ static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef Function::iterator nodes_iterator;
- static nodes_iterator nodes_begin(Function *F) { return F->begin(); }
- static nodes_iterator nodes_end (Function *F) { return F->end(); }
- static size_t size (Function *F) { return F->size(); }
+ typedef pointer_iterator<Function::iterator> nodes_iterator;
+
+ static nodes_iterator nodes_begin(Function *F) {
+ return nodes_iterator(F->begin());
+ }
+
+ static nodes_iterator nodes_end(Function *F) {
+ return nodes_iterator(F->end());
+ }
+
+ static size_t size(Function *F) { return F->size(); }
};
template <> struct GraphTraits<const Function*> :
public GraphTraits<const BasicBlock*> {
- static NodeType *getEntryNode(const Function *F) {return &F->getEntryBlock();}
+ static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef Function::const_iterator nodes_iterator;
- static nodes_iterator nodes_begin(const Function *F) { return F->begin(); }
- static nodes_iterator nodes_end (const Function *F) { return F->end(); }
- static size_t size (const Function *F) { return F->size(); }
-};
+ typedef pointer_iterator<Function::const_iterator> nodes_iterator;
+
+ static nodes_iterator nodes_begin(const Function *F) {
+ return nodes_iterator(F->begin());
+ }
+ static nodes_iterator nodes_end(const Function *F) {
+ return nodes_iterator(F->end());
+ }
+
+ static size_t size(const Function *F) { return F->size(); }
+};
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order. Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
-template <> struct GraphTraits<Inverse<Function*> > :
- public GraphTraits<Inverse<BasicBlock*> > {
- static NodeType *getEntryNode(Inverse<Function*> G) {
+template <> struct GraphTraits<Inverse<Function*>> :
+ public GraphTraits<Inverse<BasicBlock*>> {
+ static NodeRef getEntryNode(Inverse<Function *> G) {
return &G.Graph->getEntryBlock();
}
};
-template <> struct GraphTraits<Inverse<const Function*> > :
- public GraphTraits<Inverse<const BasicBlock*> > {
- static NodeType *getEntryNode(Inverse<const Function *> G) {
+template <> struct GraphTraits<Inverse<const Function*>> :
+ public GraphTraits<Inverse<const BasicBlock*>> {
+ static NodeRef getEntryNode(Inverse<const Function *> G) {
return &G.Graph->getEntryBlock();
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CFG_H
diff --git a/include/llvm/IR/CMakeLists.txt b/include/llvm/IR/CMakeLists.txt
index eade87e05bc9..cf75d5800b74 100644
--- a/include/llvm/IR/CMakeLists.txt
+++ b/include/llvm/IR/CMakeLists.txt
@@ -1,5 +1,5 @@
set(LLVM_TARGET_DEFINITIONS Attributes.td)
-tablegen(LLVM Attributes.inc -gen-attrs)
+tablegen(LLVM Attributes.gen -gen-attrs)
set(LLVM_TARGET_DEFINITIONS Intrinsics.td)
tablegen(LLVM Intrinsics.gen -gen-intrinsic)
diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h
index 9c977aef941a..b02c89474146 100644
--- a/include/llvm/IR/CallSite.h
+++ b/include/llvm/IR/CallSite.h
@@ -26,17 +26,26 @@
#ifndef LLVM_IR_CALLSITE_H
#define LLVM_IR_CALLSITE_H
-#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
namespace llvm {
-class CallInst;
-class InvokeInst;
-
template <typename FunTy = const Function,
typename BBTy = const BasicBlock,
typename ValTy = const Value,
@@ -109,6 +118,17 @@ public:
*getCallee() = V;
}
+ /// Return the intrinsic ID of the intrinsic called by this CallSite,
+ /// or Intrinsic::not_intrinsic if the called function is not an
+ /// intrinsic, or if this CallSite is an indirect call.
+ Intrinsic::ID getIntrinsicID() const {
+ if (auto *F = getCalledFunction())
+ return F->getIntrinsicID();
+ // Don't use Intrinsic::not_intrinsic, as it will require pulling
+ // Intrinsics.h into every header that uses CallSite.
+ return static_cast<Intrinsic::ID>(0);
+ }
+
/// isCallee - Determine whether the passed iterator points to the
/// callee operand's Use.
bool isCallee(Value::const_user_iterator UI) const {
@@ -302,10 +322,10 @@ public:
/// getAttributes/setAttributes - get or set the parameter attributes of
/// the call.
- const AttributeSet &getAttributes() const {
+ AttributeSet getAttributes() const {
CALLSITE_DELEGATE_GETTER(getAttributes());
}
- void setAttributes(const AttributeSet &PAL) {
+ void setAttributes(AttributeSet PAL) {
CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
}
@@ -313,10 +333,6 @@ public:
CALLSITE_DELEGATE_SETTER(addAttribute(i, Kind));
}
- void addAttribute(unsigned i, StringRef Kind, StringRef Value) {
- CALLSITE_DELEGATE_SETTER(addAttribute(i, Kind, Value));
- }
-
void addAttribute(unsigned i, Attribute Attr) {
CALLSITE_DELEGATE_SETTER(addAttribute(i, Attr));
}
@@ -329,10 +345,6 @@ public:
CALLSITE_DELEGATE_SETTER(removeAttribute(i, Kind));
}
- void removeAttribute(unsigned i, Attribute Attr) {
- CALLSITE_DELEGATE_SETTER(removeAttribute(i, Attr));
- }
-
/// \brief Return true if this function has the given attribute.
bool hasFnAttr(Attribute::AttrKind Kind) const {
CALLSITE_DELEGATE_GETTER(hasFnAttr(Kind));
@@ -509,6 +521,10 @@ public:
CALLSITE_DELEGATE_GETTER(countOperandBundlesOfType(ID));
}
+ bool isBundleOperand(unsigned Idx) const {
+ CALLSITE_DELEGATE_GETTER(isBundleOperand(Idx));
+ }
+
IterTy arg_begin() const {
CALLSITE_DELEGATE_GETTER(arg_begin());
}
@@ -602,7 +618,7 @@ class CallSite : public CallSiteBase<Function, BasicBlock, Value, User, Use,
Instruction, CallInst, InvokeInst,
User::op_iterator> {
public:
- CallSite() {}
+ CallSite() = default;
CallSite(CallSiteBase B) : CallSiteBase(B) {}
CallSite(CallInst *CI) : CallSiteBase(CI) {}
CallSite(InvokeInst *II) : CallSiteBase(II) {}
@@ -616,13 +632,39 @@ public:
}
private:
+ friend struct DenseMapInfo<CallSite>;
+
User::op_iterator getCallee() const;
};
+template <> struct DenseMapInfo<CallSite> {
+ using BaseInfo = DenseMapInfo<decltype(CallSite::I)>;
+
+ static CallSite getEmptyKey() {
+ CallSite CS;
+ CS.I = BaseInfo::getEmptyKey();
+ return CS;
+ }
+
+ static CallSite getTombstoneKey() {
+ CallSite CS;
+ CS.I = BaseInfo::getTombstoneKey();
+ return CS;
+ }
+
+ static unsigned getHashValue(const CallSite &CS) {
+ return BaseInfo::getHashValue(CS.I);
+ }
+
+ static bool isEqual(const CallSite &LHS, const CallSite &RHS) {
+ return LHS == RHS;
+ }
+};
+
/// ImmutableCallSite - establish a view to a call site for examination
class ImmutableCallSite : public CallSiteBase<> {
public:
- ImmutableCallSite() {}
+ ImmutableCallSite() = default;
ImmutableCallSite(const CallInst *CI) : CallSiteBase(CI) {}
ImmutableCallSite(const InvokeInst *II) : CallSiteBase(II) {}
explicit ImmutableCallSite(const Instruction *II) : CallSiteBase(II) {}
@@ -630,6 +672,6 @@ public:
ImmutableCallSite(CallSite CS) : CallSiteBase(CS.getInstruction()) {}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CALLSITE_H
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h
index 4987b7e943f2..9cfbda1f6857 100644
--- a/include/llvm/IR/CallingConv.h
+++ b/include/llvm/IR/CallingConv.h
@@ -193,6 +193,9 @@ namespace CallingConv {
/// Calling convention for AMDGPU code object kernels.
AMDGPU_KERNEL = 91,
+ /// Register calling convention used for parameters transfer optimization
+ X86_RegCall = 92,
+
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
diff --git a/include/llvm/IR/Comdat.h b/include/llvm/IR/Comdat.h
index 577247f27e20..f4a391c31ae2 100644
--- a/include/llvm/IR/Comdat.h
+++ b/include/llvm/IR/Comdat.h
@@ -36,7 +36,9 @@ public:
SameSize, ///< The data referenced by the COMDAT must be the same size.
};
+ Comdat(const Comdat &) = delete;
Comdat(Comdat &&C);
+
SelectionKind getSelectionKind() const { return SK; }
void setSelectionKind(SelectionKind Val) { SK = Val; }
StringRef getName() const;
@@ -45,8 +47,8 @@ public:
private:
friend class Module;
+
Comdat();
- Comdat(const Comdat &) = delete;
// Points to the map in Module.
StringMapEntry<Comdat> *Name;
@@ -58,6 +60,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Comdat &C) {
return OS;
}
-} // end llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_COMDAT_H
diff --git a/include/llvm/IR/Constant.h b/include/llvm/IR/Constant.h
index 3c5fe556d50f..99c970ebb633 100644
--- a/include/llvm/IR/Constant.h
+++ b/include/llvm/IR/Constant.h
@@ -15,11 +15,12 @@
#define LLVM_IR_CONSTANT_H
#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
namespace llvm {
- class APInt;
- template<typename T> class SmallVectorImpl;
+class APInt;
/// This is an important base class in LLVM. It provides the common facilities
/// of all constant values in an LLVM program. A constant is a value that is
@@ -39,8 +40,6 @@ namespace llvm {
/// don't have to worry about the lifetime of the objects.
/// @brief LLVM Constant Representation
class Constant : public User {
- void operator=(const Constant &) = delete;
- Constant(const Constant &) = delete;
void anchor() override;
protected:
@@ -48,6 +47,9 @@ protected:
: User(ty, vty, Ops, NumOps) {}
public:
+ void operator=(const Constant &) = delete;
+ Constant(const Constant &) = delete;
+
/// Return true if this is the value that would be returned by getNullValue.
bool isNullValue() const;
@@ -159,6 +161,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CONSTANT_H
diff --git a/include/llvm/IR/ConstantFolder.h b/include/llvm/IR/ConstantFolder.h
index fb6ca3b3184c..da5bba7ba141 100644
--- a/include/llvm/IR/ConstantFolder.h
+++ b/include/llvm/IR/ConstantFolder.h
@@ -17,15 +17,17 @@
#ifndef LLVM_IR_CONSTANTFOLDER_H
#define LLVM_IR_CONSTANTFOLDER_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
namespace llvm {
/// ConstantFolder - Create constants with minimum, target independent, folding.
class ConstantFolder {
public:
- explicit ConstantFolder() {}
+ explicit ConstantFolder() = default;
//===--------------------------------------------------------------------===//
// Binary Operators
@@ -35,61 +37,78 @@ public:
bool HasNUW = false, bool HasNSW = false) const {
return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
}
+
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFAdd(LHS, RHS);
}
+
Constant *CreateSub(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
}
+
Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFSub(LHS, RHS);
}
+
Constant *CreateMul(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
}
+
Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFMul(LHS, RHS);
}
+
Constant *CreateUDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const {
return ConstantExpr::getUDiv(LHS, RHS, isExact);
}
+
Constant *CreateSDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const {
return ConstantExpr::getSDiv(LHS, RHS, isExact);
}
+
Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFDiv(LHS, RHS);
}
+
Constant *CreateURem(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getURem(LHS, RHS);
}
+
Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getSRem(LHS, RHS);
}
+
Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFRem(LHS, RHS);
}
+
Constant *CreateShl(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
}
+
Constant *CreateLShr(Constant *LHS, Constant *RHS,
bool isExact = false) const {
return ConstantExpr::getLShr(LHS, RHS, isExact);
}
+
Constant *CreateAShr(Constant *LHS, Constant *RHS,
bool isExact = false) const {
return ConstantExpr::getAShr(LHS, RHS, isExact);
}
+
Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getAnd(LHS, RHS);
}
+
Constant *CreateOr(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getOr(LHS, RHS);
}
+
Constant *CreateXor(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getXor(LHS, RHS);
}
@@ -107,9 +126,11 @@ public:
bool HasNUW = false, bool HasNSW = false) const {
return ConstantExpr::getNeg(C, HasNUW, HasNSW);
}
+
Constant *CreateFNeg(Constant *C) const {
return ConstantExpr::getFNeg(C);
}
+
Constant *CreateNot(Constant *C) const {
return ConstantExpr::getNot(C);
}
@@ -122,12 +143,14 @@ public:
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
}
+
Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
return ConstantExpr::getGetElementPtr(Ty, C, Idx);
}
+
Constant *CreateGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) const {
return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
@@ -137,6 +160,7 @@ public:
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
}
+
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
Constant *Idx) const {
// This form of the function only exists to avoid ambiguous overload
@@ -144,6 +168,7 @@ public:
// ArrayRef<Value *>.
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
}
+
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) const {
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
@@ -157,6 +182,7 @@ public:
Type *DestTy) const {
return ConstantExpr::getCast(Op, C, DestTy);
}
+
Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getPointerCast(C, DestTy);
}
@@ -170,6 +196,7 @@ public:
bool isSigned) const {
return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
}
+
Constant *CreateFPCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getFPCast(C, DestTy);
}
@@ -177,15 +204,19 @@ public:
Constant *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
}
+
Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
+
Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
+
Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getZExtOrBitCast(C, DestTy);
}
+
Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
return ConstantExpr::getSExtOrBitCast(C, DestTy);
}
@@ -202,6 +233,7 @@ public:
Constant *RHS) const {
return ConstantExpr::getCompare(P, LHS, RHS);
}
+
Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const {
return ConstantExpr::getCompare(P, LHS, RHS);
@@ -240,6 +272,6 @@ public:
}
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CONSTANTFOLDER_H
diff --git a/include/llvm/IR/ConstantRange.h b/include/llvm/IR/ConstantRange.h
index 9458fa9f5c86..27a9b1364448 100644
--- a/include/llvm/IR/ConstantRange.h
+++ b/include/llvm/IR/ConstantRange.h
@@ -38,6 +38,8 @@
namespace llvm {
+class MDNode;
+
/// This class represents a range of values.
///
class ConstantRange {
@@ -166,6 +168,14 @@ public:
return nullptr;
}
+ /// If this set contains all but a single element, return it, otherwise return
+ /// null.
+ const APInt *getSingleMissingElement() const {
+ if (Lower == Upper + 1)
+ return &Upper;
+ return nullptr;
+ }
+
/// Return true if this set contains exactly one member.
///
bool isSingleElement() const { return getSingleElement() != nullptr; }
@@ -223,6 +233,15 @@ public:
///
ConstantRange unionWith(const ConstantRange &CR) const;
+ /// Return a new range representing the possible values resulting
+ /// from an application of the specified cast operator to this range. \p
+ /// BitWidth is the target bitwidth of the cast. For casts which don't
+ /// change bitwidth, it must be the same as the source bitwidth. For casts
+ /// which do change bitwidth, the bitwidth must be consistent with the
+ /// requested cast and source bitwidth.
+ ConstantRange castOp(Instruction::CastOps CastOp,
+ uint32_t BitWidth) const;
+
/// Return a new range in the specified integer type, which must
/// be strictly larger than the current type. The returned range will
/// correspond to the possible range of values if the source range had been
@@ -250,9 +269,19 @@ public:
ConstantRange sextOrTrunc(uint32_t BitWidth) const;
/// Return a new range representing the possible values resulting
+ /// from an application of the specified binary operator to an left hand side
+ /// of this range and a right hand side of \p Other.
+ ConstantRange binaryOp(Instruction::BinaryOps BinOp,
+ const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
/// from an addition of a value in this range and a value in \p Other.
ConstantRange add(const ConstantRange &Other) const;
+ /// Return a new range representing the possible values resulting from a
+ /// known NSW addition of a value in this range and \p Other constant.
+ ConstantRange addWithNoSignedWrap(const APInt &Other) const;
+
/// Return a new range representing the possible values resulting
/// from a subtraction of a value in this range and a value in \p Other.
ConstantRange sub(const ConstantRange &Other) const;
@@ -318,6 +347,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) {
return OS;
}
+/// Parse out a conservative ConstantRange from !range metadata.
+///
+/// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
+ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD);
+
} // End llvm namespace
#endif
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h
index 2a5d14d94646..cbefa3f05dfc 100644
--- a/include/llvm/IR/Constants.h
+++ b/include/llvm/IR/Constants.h
@@ -24,20 +24,29 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
namespace llvm {
class ArrayType;
class IntegerType;
-class StructType;
class PointerType;
-class VectorType;
class SequentialType;
-
-struct ConstantExprKeyType;
+class StructType;
+class VectorType;
template <class ConstantClass> struct ConstantAggrKeyType;
/// Base class for constants with no operands.
@@ -46,21 +55,25 @@ template <class ConstantClass> struct ConstantAggrKeyType;
/// Since they can be in use by unrelated modules (and are never based on
/// GlobalValues), it never makes sense to RAUW them.
class ConstantData : public Constant {
+ friend class Constant;
+
void anchor() override;
- void *operator new(size_t, unsigned) = delete;
- ConstantData() = delete;
- ConstantData(const ConstantData &) = delete;
- friend class Constant;
Value *handleOperandChangeImpl(Value *From, Value *To) {
llvm_unreachable("Constant data does not have operands!");
}
protected:
explicit ConstantData(Type *Ty, ValueTy VT) : Constant(Ty, VT, nullptr, 0) {}
+
void *operator new(size_t s) { return User::operator new(s, 0); }
public:
+ ConstantData() = delete;
+ ConstantData(const ConstantData &) = delete;
+
+ void *operator new(size_t, unsigned) = delete;
+
/// Methods to support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Value *V) {
return V->getValueID() >= ConstantDataFirstVal &&
@@ -73,15 +86,18 @@ public:
/// represents both boolean and integral constants.
/// @brief Class for constant integers.
class ConstantInt final : public ConstantData {
- void anchor() override;
- ConstantInt(const ConstantInt &) = delete;
- ConstantInt(IntegerType *Ty, const APInt& V);
+ friend class Constant;
+
APInt Val;
- friend class Constant;
+ ConstantInt(IntegerType *Ty, const APInt& V);
+
+ void anchor() override;
void destroyConstantImpl();
public:
+ ConstantInt(const ConstantInt &) = delete;
+
static ConstantInt *getTrue(LLVMContext &Context);
static ConstantInt *getFalse(LLVMContext &Context);
static Constant *getTrue(Type *Ty);
@@ -247,21 +263,22 @@ public:
}
};
-
//===----------------------------------------------------------------------===//
/// ConstantFP - Floating Point Values [float, double]
///
class ConstantFP final : public ConstantData {
- APFloat Val;
- void anchor() override;
- ConstantFP(const ConstantFP &) = delete;
-
friend class Constant;
- void destroyConstantImpl();
+
+ APFloat Val;
ConstantFP(Type *Ty, const APFloat& V);
+ void anchor() override;
+ void destroyConstantImpl();
+
public:
+ ConstantFP(const ConstantFP &) = delete;
+
/// Floating point negation must be implemented with f(x) = -0.0 - x. This
/// method returns the negative zero constant for floating point or vector
/// floating point types; for all other types, it returns the null value.
@@ -308,6 +325,7 @@ public:
FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
return isExactlyValue(FV);
}
+
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == ConstantFPVal;
@@ -318,15 +336,16 @@ public:
/// All zero aggregate value
///
class ConstantAggregateZero final : public ConstantData {
- ConstantAggregateZero(const ConstantAggregateZero &) = delete;
-
friend class Constant;
- void destroyConstantImpl();
explicit ConstantAggregateZero(Type *Ty)
: ConstantData(Ty, ConstantAggregateZeroVal) {}
+ void destroyConstantImpl();
+
public:
+ ConstantAggregateZero(const ConstantAggregateZero &) = delete;
+
static ConstantAggregateZero *get(Type *Ty);
/// If this CAZ has array or vector type, return a zero with the right element
@@ -392,11 +411,12 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantAggregate, Constant)
class ConstantArray final : public ConstantAggregate {
friend struct ConstantAggrKeyType<ConstantArray>;
friend class Constant;
- void destroyConstantImpl();
- Value *handleOperandChangeImpl(Value *From, Value *To);
ConstantArray(ArrayType *T, ArrayRef<Constant *> Val);
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To);
+
public:
// ConstantArray accessors
static Constant *get(ArrayType *T, ArrayRef<Constant*> V);
@@ -423,11 +443,12 @@ public:
class ConstantStruct final : public ConstantAggregate {
friend struct ConstantAggrKeyType<ConstantStruct>;
friend class Constant;
- void destroyConstantImpl();
- Value *handleOperandChangeImpl(Value *From, Value *To);
ConstantStruct(StructType *T, ArrayRef<Constant *> Val);
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To);
+
public:
// ConstantStruct accessors
static Constant *get(StructType *T, ArrayRef<Constant*> V);
@@ -463,18 +484,18 @@ public:
}
};
-
//===----------------------------------------------------------------------===//
/// Constant Vector Declarations
///
class ConstantVector final : public ConstantAggregate {
friend struct ConstantAggrKeyType<ConstantVector>;
friend class Constant;
- void destroyConstantImpl();
- Value *handleOperandChangeImpl(Value *From, Value *To);
ConstantVector(VectorType *T, ArrayRef<Constant *> Val);
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To);
+
public:
// ConstantVector accessors
static Constant *get(ArrayRef<Constant*> V);
@@ -506,15 +527,16 @@ public:
/// A constant pointer value that points to null
///
class ConstantPointerNull final : public ConstantData {
- ConstantPointerNull(const ConstantPointerNull &) = delete;
-
friend class Constant;
- void destroyConstantImpl();
explicit ConstantPointerNull(PointerType *T)
: ConstantData(T, Value::ConstantPointerNullVal) {}
+ void destroyConstantImpl();
+
public:
+ ConstantPointerNull(const ConstantPointerNull &) = delete;
+
/// Static factory methods - Return objects of the specified value
static ConstantPointerNull *get(PointerType *T);
@@ -541,6 +563,8 @@ public:
///
class ConstantDataSequential : public ConstantData {
friend class LLVMContextImpl;
+ friend class Constant;
+
/// A pointer to the bytes underlying this constant (which is owned by the
/// uniquing StringMap).
const char *DataElements;
@@ -550,9 +574,7 @@ class ConstantDataSequential : public ConstantData {
/// element array of i8, or a 1-element array of i32. They'll both end up in
/// the same StringMap bucket, linked up.
ConstantDataSequential *Next;
- ConstantDataSequential(const ConstantDataSequential &) = delete;
- friend class Constant;
void destroyConstantImpl();
protected:
@@ -563,6 +585,8 @@ protected:
static Constant *getImpl(StringRef Bytes, Type *Ty);
public:
+ ConstantDataSequential(const ConstantDataSequential &) = delete;
+
/// Return true if a ConstantDataSequential can be formed with a vector or
/// array of the specified element type.
/// ConstantDataArray only works with normal float and int types that are
@@ -638,6 +662,7 @@ public:
return V->getValueID() == ConstantDataArrayVal ||
V->getValueID() == ConstantDataVectorVal;
}
+
private:
const char *getElementPointer(unsigned Elt) const;
};
@@ -649,18 +674,23 @@ private:
/// stores all of the elements of the constant as densely packed data, instead
/// of as Value*'s.
class ConstantDataArray final : public ConstantDataSequential {
- void *operator new(size_t, unsigned) = delete;
- ConstantDataArray(const ConstantDataArray &) = delete;
- void anchor() override;
friend class ConstantDataSequential;
+
explicit ConstantDataArray(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
+
/// Allocate space for exactly zero operands.
void *operator new(size_t s) {
return User::operator new(s, 0);
}
+ void anchor() override;
+
public:
+ ConstantDataArray(const ConstantDataArray &) = delete;
+
+ void *operator new(size_t, unsigned) = delete;
+
/// get() constructors - Return a constant with array type with an element
/// count and element type matching the ArrayRef passed in. Note that this
/// can return a ConstantAggregateZero object.
@@ -707,18 +737,23 @@ public:
/// stores all of the elements of the constant as densely packed data, instead
/// of as Value*'s.
class ConstantDataVector final : public ConstantDataSequential {
- void *operator new(size_t, unsigned) = delete;
- ConstantDataVector(const ConstantDataVector &) = delete;
- void anchor() override;
friend class ConstantDataSequential;
+
explicit ConstantDataVector(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
+
// allocate space for exactly zero operands.
void *operator new(size_t s) {
return User::operator new(s, 0);
}
+ void anchor() override;
+
public:
+ ConstantDataVector(const ConstantDataVector &) = delete;
+
+ void *operator new(size_t, unsigned) = delete;
+
/// get() constructors - Return a constant with vector type with an element
/// count and element type matching the ArrayRef passed in. Note that this
/// can return a ConstantAggregateZero object.
@@ -763,15 +798,16 @@ public:
/// A constant token which is empty
///
class ConstantTokenNone final : public ConstantData {
- ConstantTokenNone(const ConstantTokenNone &) = delete;
-
friend class Constant;
- void destroyConstantImpl();
explicit ConstantTokenNone(LLVMContext &Context)
: ConstantData(Type::getTokenTy(Context), ConstantTokenNoneVal) {}
+ void destroyConstantImpl();
+
public:
+ ConstantTokenNone(const ConstantTokenNone &) = delete;
+
/// Return the ConstantTokenNone.
static ConstantTokenNone *get(LLVMContext &Context);
@@ -784,15 +820,18 @@ public:
/// The address of a basic block.
///
class BlockAddress final : public Constant {
- void *operator new(size_t, unsigned) = delete;
- void *operator new(size_t s) { return User::operator new(s, 2); }
+ friend class Constant;
+
BlockAddress(Function *F, BasicBlock *BB);
- friend class Constant;
+ void *operator new(size_t s) { return User::operator new(s, 2); }
+
void destroyConstantImpl();
Value *handleOperandChangeImpl(Value *From, Value *To);
public:
+ void *operator new(size_t, unsigned) = delete;
+
/// Return a BlockAddress for the specified function and basic block.
static BlockAddress *get(Function *F, BasicBlock *BB);
@@ -824,7 +863,6 @@ struct OperandTraits<BlockAddress> :
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)
-
//===----------------------------------------------------------------------===//
/// A constant value that is initialized with an expression using
/// other constant values.
@@ -834,8 +872,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)
/// maintained in the Value::SubclassData field.
class ConstantExpr : public Constant {
friend struct ConstantExprKeyType;
-
friend class Constant;
+
void destroyConstantImpl();
Value *handleOperandChangeImpl(Value *From, Value *To);
@@ -919,39 +957,51 @@ public:
static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
+
static Constant *getNSWAdd(Constant *C1, Constant *C2) {
return getAdd(C1, C2, false, true);
}
+
static Constant *getNUWAdd(Constant *C1, Constant *C2) {
return getAdd(C1, C2, true, false);
}
+
static Constant *getNSWSub(Constant *C1, Constant *C2) {
return getSub(C1, C2, false, true);
}
+
static Constant *getNUWSub(Constant *C1, Constant *C2) {
return getSub(C1, C2, true, false);
}
+
static Constant *getNSWMul(Constant *C1, Constant *C2) {
return getMul(C1, C2, false, true);
}
+
static Constant *getNUWMul(Constant *C1, Constant *C2) {
return getMul(C1, C2, true, false);
}
+
static Constant *getNSWShl(Constant *C1, Constant *C2) {
return getShl(C1, C2, false, true);
}
+
static Constant *getNUWShl(Constant *C1, Constant *C2) {
return getShl(C1, C2, true, false);
}
+
static Constant *getExactSDiv(Constant *C1, Constant *C2) {
return getSDiv(C1, C2, true);
}
+
static Constant *getExactUDiv(Constant *C1, Constant *C2) {
return getUDiv(C1, C2, true);
}
+
static Constant *getExactAShr(Constant *C1, Constant *C2) {
return getAShr(C1, C2, true);
}
+
static Constant *getExactLShr(Constant *C1, Constant *C2) {
return getLShr(C1, C2, true);
}
@@ -1071,26 +1121,31 @@ public:
/// Getelementptr form. Value* is only accepted for convenience;
/// all elements must be Constants.
///
+ /// \param InRangeIndex the inrange index if present or None.
/// \param OnlyIfReducedTy see \a getWithOperands() docs.
static Constant *getGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Constant *> IdxList,
bool InBounds = false,
+ Optional<unsigned> InRangeIndex = None,
Type *OnlyIfReducedTy = nullptr) {
return getGetElementPtr(
Ty, C, makeArrayRef((Value * const *)IdxList.data(), IdxList.size()),
- InBounds, OnlyIfReducedTy);
+ InBounds, InRangeIndex, OnlyIfReducedTy);
}
static Constant *getGetElementPtr(Type *Ty, Constant *C, Constant *Idx,
bool InBounds = false,
+ Optional<unsigned> InRangeIndex = None,
Type *OnlyIfReducedTy = nullptr) {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
- return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, OnlyIfReducedTy);
+ return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRangeIndex,
+ OnlyIfReducedTy);
}
static Constant *getGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList,
bool InBounds = false,
+ Optional<unsigned> InRangeIndex = None,
Type *OnlyIfReducedTy = nullptr);
/// Create an "inbounds" getelementptr. See the documentation for the
@@ -1201,14 +1256,15 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)
/// LangRef.html#undefvalues for details.
///
class UndefValue final : public ConstantData {
- UndefValue(const UndefValue &) = delete;
-
friend class Constant;
- void destroyConstantImpl();
explicit UndefValue(Type *T) : ConstantData(T, UndefValueVal) {}
+ void destroyConstantImpl();
+
public:
+ UndefValue(const UndefValue &) = delete;
+
/// Static factory methods - Return an 'undef' object of the specified type.
static UndefValue *get(Type *T);
@@ -1236,6 +1292,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CONSTANTS_H
diff --git a/include/llvm/IR/DIBuilder.h b/include/llvm/IR/DIBuilder.h
index 0f2f67f5feaf..932ae51b39dc 100644
--- a/include/llvm/IR/DIBuilder.h
+++ b/include/llvm/IR/DIBuilder.h
@@ -15,21 +15,27 @@
#ifndef LLVM_IR_DIBUILDER_H
#define LLVM_IR_DIBUILDER_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/TrackingMDRef.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cstdint>
namespace llvm {
+
class BasicBlock;
- class Instruction;
+ class Constant;
class Function;
+ class Instruction;
+ class LLVMContext;
class Module;
class Value;
- class Constant;
- class LLVMContext;
- class StringRef;
- template <typename T> class ArrayRef;
class DIBuilder {
Module &M;
@@ -57,9 +63,6 @@ namespace llvm {
/// copy.
DenseMap<MDNode *, SmallVector<TrackingMDNodeRef, 1>> PreservedVariables;
- DIBuilder(const DIBuilder &) = delete;
- void operator=(const DIBuilder &) = delete;
-
/// Create a temporary.
///
/// Create an \a temporary node and track it in \a UnresolvedNodes.
@@ -71,6 +74,8 @@ namespace llvm {
/// If \c AllowUnresolved, collect unresolved nodes attached to the module
/// in order to resolve cycles during \a finalize().
explicit DIBuilder(Module &M, bool AllowUnresolved = true);
+ DIBuilder(const DIBuilder &) = delete;
+ DIBuilder &operator=(const DIBuilder &) = delete;
/// Construct any deferred debug info descriptors.
void finalize();
@@ -78,8 +83,7 @@ namespace llvm {
/// A CompileUnit provides an anchor for all debugging
/// information generated during this instance of compilation.
/// \param Lang Source programming language, eg. dwarf::DW_LANG_C99
- /// \param File File name
- /// \param Dir Directory
+ /// \param File File info.
/// \param Producer Identify the producer of debugging information
/// and code. Usually this is a compiler
/// version string.
@@ -96,16 +100,21 @@ namespace llvm {
/// \param Kind The kind of debug information to generate.
/// \param DWOId The DWOId if this is a split skeleton compile unit.
DICompileUnit *
- createCompileUnit(unsigned Lang, StringRef File, StringRef Dir,
- StringRef Producer, bool isOptimized, StringRef Flags,
- unsigned RV, StringRef SplitName = StringRef(),
+ createCompileUnit(unsigned Lang, DIFile *File, StringRef Producer,
+ bool isOptimized, StringRef Flags, unsigned RV,
+ StringRef SplitName = StringRef(),
DICompileUnit::DebugEmissionKind Kind =
DICompileUnit::DebugEmissionKind::FullDebug,
- uint64_t DWOId = 0);
+ uint64_t DWOId = 0, bool SplitDebugInlining = true);
- /// Create a file descriptor to hold debugging information
- /// for a file.
- DIFile *createFile(StringRef Filename, StringRef Directory);
+ /// Create a file descriptor to hold debugging information for a file.
+ /// \param Filename File name.
+ /// \param Directory Directory.
+ /// \param CSKind Checksum kind (e.g. CSK_None, CSK_MD5, CSK_SHA1, etc.).
+ /// \param Checksum Checksum data.
+ DIFile *createFile(StringRef Filename, StringRef Directory,
+ DIFile::ChecksumKind CSKind = DIFile::CSK_None,
+ StringRef Checksum = StringRef());
/// Create a single enumerator value.
DIEnumerator *createEnumerator(StringRef Name, int64_t Val);
@@ -120,10 +129,9 @@ namespace llvm {
/// type.
/// \param Name Type name.
/// \param SizeInBits Size of the type.
- /// \param AlignInBits Type alignment.
/// \param Encoding DWARF encoding code, e.g. dwarf::DW_ATE_float.
DIBasicType *createBasicType(StringRef Name, uint64_t SizeInBits,
- uint64_t AlignInBits, unsigned Encoding);
+ unsigned Encoding);
/// Create debugging information entry for a qualified
/// type, e.g. 'const int'.
@@ -137,7 +145,7 @@ namespace llvm {
/// \param AlignInBits Alignment. (optional)
/// \param Name Pointer type name. (optional)
DIDerivedType *createPointerType(DIType *PointeeTy, uint64_t SizeInBits,
- uint64_t AlignInBits = 0,
+ uint32_t AlignInBits = 0,
StringRef Name = "");
/// Create debugging information entry for a pointer to member.
@@ -145,16 +153,16 @@ namespace llvm {
/// \param SizeInBits Size.
/// \param AlignInBits Alignment. (optional)
/// \param Class Type for which this pointer points to members of.
- DIDerivedType *createMemberPointerType(DIType *PointeeTy, DIType *Class,
- uint64_t SizeInBits,
- uint64_t AlignInBits = 0,
- unsigned Flags = 0);
+ DIDerivedType *
+ createMemberPointerType(DIType *PointeeTy, DIType *Class,
+ uint64_t SizeInBits, uint32_t AlignInBits = 0,
+ DINode::DIFlags Flags = DINode::FlagZero);
/// Create debugging information entry for a c++
/// style reference or rvalue reference type.
DIDerivedType *createReferenceType(unsigned Tag, DIType *RTy,
uint64_t SizeInBits = 0,
- uint64_t AlignInBits = 0);
+ uint32_t AlignInBits = 0);
/// Create debugging information entry for a typedef.
/// \param Ty Original type.
@@ -176,7 +184,8 @@ namespace llvm {
/// \param Flags Flags to describe inheritance attribute,
/// e.g. private
DIDerivedType *createInheritance(DIType *Ty, DIType *BaseTy,
- uint64_t BaseOffset, unsigned Flags);
+ uint64_t BaseOffset,
+ DINode::DIFlags Flags);
/// Create debugging information entry for a member.
/// \param Scope Member scope.
@@ -190,9 +199,10 @@ namespace llvm {
/// \param Ty Parent type.
DIDerivedType *createMemberType(DIScope *Scope, StringRef Name,
DIFile *File, unsigned LineNo,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
- DIType *Ty);
+ uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ uint64_t OffsetInBits,
+ DINode::DIFlags Flags, DIType *Ty);
/// Create debugging information entry for a bit field member.
/// \param Scope Member scope.
@@ -200,15 +210,14 @@ namespace llvm {
/// \param File File where this member is defined.
/// \param LineNo Line number.
/// \param SizeInBits Member size.
- /// \param AlignInBits Member alignment.
/// \param OffsetInBits Member offset.
/// \param StorageOffsetInBits Member storage offset.
/// \param Flags Flags to encode member attribute.
/// \param Ty Parent type.
DIDerivedType *createBitFieldMemberType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo,
- uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
- uint64_t StorageOffsetInBits, unsigned Flags, DIType *Ty);
+ uint64_t SizeInBits, uint64_t OffsetInBits,
+ uint64_t StorageOffsetInBits, DINode::DIFlags Flags, DIType *Ty);
/// Create debugging information entry for a
/// C++ static data member.
@@ -219,10 +228,12 @@ namespace llvm {
/// \param Ty Type of the static member.
/// \param Flags Flags to encode member attribute, e.g. private.
/// \param Val Const initializer of the member.
+ /// \param AlignInBits Member alignment.
DIDerivedType *createStaticMemberType(DIScope *Scope, StringRef Name,
DIFile *File, unsigned LineNo,
- DIType *Ty, unsigned Flags,
- llvm::Constant *Val);
+ DIType *Ty, DINode::DIFlags Flags,
+ Constant *Val,
+ uint32_t AlignInBits = 0);
/// Create debugging information entry for Objective-C
/// instance variable.
@@ -236,8 +247,8 @@ namespace llvm {
/// \param Ty Parent type.
/// \param PropertyNode Property associated with this ivar.
DIDerivedType *createObjCIVar(StringRef Name, DIFile *File, unsigned LineNo,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DINode::DIFlags Flags,
DIType *Ty, MDNode *PropertyNode);
/// Create debugging information entry for Objective-C
@@ -271,14 +282,12 @@ namespace llvm {
/// for more info.
/// \param TemplateParms Template type parameters.
/// \param UniqueIdentifier A unique identifier for the class.
- DICompositeType *createClassType(DIScope *Scope, StringRef Name,
- DIFile *File, unsigned LineNumber,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
- DIType *DerivedFrom, DINodeArray Elements,
- DIType *VTableHolder = nullptr,
- MDNode *TemplateParms = nullptr,
- StringRef UniqueIdentifier = "");
+ DICompositeType *createClassType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements,
+ DIType *VTableHolder = nullptr, MDNode *TemplateParms = nullptr,
+ StringRef UniqueIdentifier = "");
/// Create debugging information entry for a struct.
/// \param Scope Scope in which this struct is defined.
@@ -293,7 +302,7 @@ namespace llvm {
/// \param UniqueIdentifier A unique identifier for the struct.
DICompositeType *createStructType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
- uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags,
+ uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags,
DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang = 0,
DIType *VTableHolder = nullptr, StringRef UniqueIdentifier = "");
@@ -310,8 +319,9 @@ namespace llvm {
/// \param UniqueIdentifier A unique identifier for the union.
DICompositeType *createUnionType(DIScope *Scope, StringRef Name,
DIFile *File, unsigned LineNumber,
- uint64_t SizeInBits, uint64_t AlignInBits,
- unsigned Flags, DINodeArray Elements,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ DINode::DIFlags Flags,
+ DINodeArray Elements,
unsigned RunTimeLang = 0,
StringRef UniqueIdentifier = "");
@@ -359,7 +369,7 @@ namespace llvm {
/// \param AlignInBits Alignment.
/// \param Ty Element type.
/// \param Subscripts Subscripts.
- DICompositeType *createArrayType(uint64_t Size, uint64_t AlignInBits,
+ DICompositeType *createArrayType(uint64_t Size, uint32_t AlignInBits,
DIType *Ty, DINodeArray Subscripts);
/// Create debugging information entry for a vector type.
@@ -367,7 +377,7 @@ namespace llvm {
/// \param AlignInBits Alignment.
/// \param Ty Element type.
/// \param Subscripts Subscripts.
- DICompositeType *createVectorType(uint64_t Size, uint64_t AlignInBits,
+ DICompositeType *createVectorType(uint64_t Size, uint32_t AlignInBits,
DIType *Ty, DINodeArray Subscripts);
/// Create debugging information entry for an
@@ -383,7 +393,7 @@ namespace llvm {
/// \param UniqueIdentifier A unique identifier for the enum.
DICompositeType *createEnumerationType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
- uint64_t SizeInBits, uint64_t AlignInBits, DINodeArray Elements,
+ uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
DIType *UnderlyingType, StringRef UniqueIdentifier = "");
/// Create subroutine type.
@@ -392,8 +402,10 @@ namespace llvm {
/// \param Flags E.g.: LValueReference.
/// These flags are used to emit dwarf attributes.
/// \param CC Calling convention, e.g. dwarf::DW_CC_normal
- DISubroutineType *createSubroutineType(DITypeRefArray ParameterTypes,
- unsigned Flags = 0, unsigned CC = 0);
+ DISubroutineType *
+ createSubroutineType(DITypeRefArray ParameterTypes,
+ DINode::DIFlags Flags = DINode::FlagZero,
+ unsigned CC = 0);
/// Create an external type reference.
/// \param Tag Dwarf TAG.
@@ -414,14 +426,14 @@ namespace llvm {
DIScope *Scope, DIFile *F, unsigned Line,
unsigned RuntimeLang = 0,
uint64_t SizeInBits = 0,
- uint64_t AlignInBits = 0,
+ uint32_t AlignInBits = 0,
StringRef UniqueIdentifier = "");
/// Create a temporary forward-declared type.
DICompositeType *createReplaceableCompositeType(
unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line,
unsigned RuntimeLang = 0, uint64_t SizeInBits = 0,
- uint64_t AlignInBits = 0, unsigned Flags = DINode::FlagFwdDecl,
+ uint32_t AlignInBits = 0, DINode::DIFlags Flags = DINode::FlagFwdDecl,
StringRef UniqueIdentifier = "");
/// Retain DIScope* in a module even if it is not referenced
@@ -442,8 +454,7 @@ namespace llvm {
/// implicitly uniques the values returned.
DISubrange *getOrCreateSubrange(int64_t Lo, int64_t Count);
- /// Create a new descriptor for the specified
- /// variable.
+ /// Create a new descriptor for the specified variable.
/// \param Context Variable scope.
/// \param Name Name of the variable.
/// \param LinkageName Mangled name of the variable.
@@ -452,21 +463,23 @@ namespace llvm {
/// \param Ty Variable Type.
/// \param isLocalToUnit Boolean flag indicate whether this variable is
/// externally visible or not.
- /// \param Val llvm::Value of the variable.
+ /// \param Expr The location of the global relative to the attached
+ /// GlobalVariable.
/// \param Decl Reference to the corresponding declaration.
- DIGlobalVariable *createGlobalVariable(DIScope *Context, StringRef Name,
- StringRef LinkageName, DIFile *File,
- unsigned LineNo, DIType *Ty,
- bool isLocalToUnit,
- llvm::Constant *Val,
- MDNode *Decl = nullptr);
+ /// \param AlignInBits Variable alignment(or 0 if no alignment attr was
+ /// specified)
+ DIGlobalVariableExpression *createGlobalVariableExpression(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool isLocalToUnit,
+ DIExpression *Expr = nullptr, MDNode *Decl = nullptr,
+ uint32_t AlignInBits = 0);
/// Identical to createGlobalVariable
/// except that the resulting DbgNode is temporary and meant to be RAUWed.
DIGlobalVariable *createTempGlobalVariableFwdDecl(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
- unsigned LineNo, DIType *Ty, bool isLocalToUnit, llvm::Constant *Val,
- MDNode *Decl = nullptr);
+ unsigned LineNo, DIType *Ty, bool isLocalToUnit, MDNode *Decl = nullptr,
+ uint32_t AlignInBits = 0);
/// Create a new descriptor for an auto variable. This is a local variable
/// that is not a subprogram parameter.
@@ -476,11 +489,11 @@ namespace llvm {
///
/// If \c AlwaysPreserve, this variable will be referenced from its
/// containing subprogram, and will survive some optimizations.
- DILocalVariable *createAutoVariable(DIScope *Scope, StringRef Name,
- DIFile *File, unsigned LineNo,
- DIType *Ty,
- bool AlwaysPreserve = false,
- unsigned Flags = 0);
+ DILocalVariable *
+ createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool AlwaysPreserve = false,
+ DINode::DIFlags Flags = DINode::FlagZero,
+ uint32_t AlignInBits = 0);
/// Create a new descriptor for a parameter variable.
///
@@ -493,11 +506,11 @@ namespace llvm {
///
/// If \c AlwaysPreserve, this variable will be referenced from its
/// containing subprogram, and will survive some optimizations.
- DILocalVariable *createParameterVariable(DIScope *Scope, StringRef Name,
- unsigned ArgNo, DIFile *File,
- unsigned LineNo, DIType *Ty,
- bool AlwaysPreserve = false,
- unsigned Flags = 0);
+ DILocalVariable *
+ createParameterVariable(DIScope *Scope, StringRef Name, unsigned ArgNo,
+ DIFile *File, unsigned LineNo, DIType *Ty,
+ bool AlwaysPreserve = false,
+ DINode::DIFlags Flags = DINode::FlagZero);
/// Create a new descriptor for the specified
/// variable which has a complex address expression for its address.
@@ -510,9 +523,16 @@ namespace llvm {
///
/// \param OffsetInBits Offset of the piece in bits.
/// \param SizeInBits Size of the piece in bits.
- DIExpression *createBitPieceExpression(unsigned OffsetInBits,
+ DIExpression *createFragmentExpression(unsigned OffsetInBits,
unsigned SizeInBits);
+ /// Create an expression for a variable that does not have an address, but
+ /// does have a constant value.
+ DIExpression *createConstantValueExpression(uint64_t Val) {
+ return DIExpression::get(
+ VMContext, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_stack_value});
+ }
+
/// Create a new descriptor for the specified subprogram.
/// See comments in DISubprogram* for descriptions of these fields.
/// \param Scope Function scope.
@@ -532,7 +552,8 @@ namespace llvm {
StringRef LinkageName, DIFile *File,
unsigned LineNo, DISubroutineType *Ty,
bool isLocalToUnit, bool isDefinition,
- unsigned ScopeLine, unsigned Flags = 0,
+ unsigned ScopeLine,
+ DINode::DIFlags Flags = DINode::FlagZero,
bool isOptimized = false,
DITemplateParameterArray TParams = nullptr,
DISubprogram *Decl = nullptr);
@@ -542,8 +563,9 @@ namespace llvm {
DISubprogram *createTempFunctionFwdDecl(
DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned ScopeLine, unsigned Flags = 0,
- bool isOptimized = false, DITemplateParameterArray TParams = nullptr,
+ bool isDefinition, unsigned ScopeLine,
+ DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+ DITemplateParameterArray TParams = nullptr,
DISubprogram *Decl = nullptr);
/// Create a new descriptor for the specified C++ method.
@@ -568,14 +590,13 @@ namespace llvm {
/// This flags are used to emit dwarf attributes.
/// \param isOptimized True if optimization is ON.
/// \param TParams Function template parameters.
- DISubprogram *
- createMethod(DIScope *Scope, StringRef Name, StringRef LinkageName,
- DIFile *File, unsigned LineNo, DISubroutineType *Ty,
- bool isLocalToUnit, bool isDefinition, unsigned Virtuality = 0,
- unsigned VTableIndex = 0, int ThisAdjustment = 0,
- DIType *VTableHolder = nullptr, unsigned Flags = 0,
- bool isOptimized = false,
- DITemplateParameterArray TParams = nullptr);
+ DISubprogram *createMethod(
+ DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned Virtuality = 0, unsigned VTableIndex = 0,
+ int ThisAdjustment = 0, DIType *VTableHolder = nullptr,
+ DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+ DITemplateParameterArray TParams = nullptr);
/// This creates new descriptor for a namespace with the specified
/// parent scope.
@@ -583,8 +604,9 @@ namespace llvm {
/// \param Name Name of this namespace
/// \param File Source file
/// \param LineNo Line number
+ /// \param ExportSymbols True for C++ inline namespaces.
DINamespace *createNameSpace(DIScope *Scope, StringRef Name, DIFile *File,
- unsigned LineNo);
+ unsigned LineNo, bool ExportSymbols);
/// This creates new descriptor for a module with the specified
/// parent scope.
@@ -726,6 +748,7 @@ namespace llvm {
return Replacement;
}
};
+
} // end namespace llvm
-#endif
+#endif // LLVM_IR_DIBUILDER_H
diff --git a/include/llvm/IR/DataLayout.h b/include/llvm/IR/DataLayout.h
index 173121b72ffd..6f37669f9768 100644
--- a/include/llvm/IR/DataLayout.h
+++ b/include/llvm/IR/DataLayout.h
@@ -21,6 +21,7 @@
#define LLVM_IR_DATALAYOUT_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
@@ -144,6 +145,10 @@ private:
// The StructType -> StructLayout map.
mutable void *LayoutMap;
+ /// Pointers in these address spaces are non-integral, and don't have a
+ /// well-defined bitwise representation.
+ SmallVector<unsigned, 8> NonIntegralAddressSpaces;
+
void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width);
unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
@@ -199,6 +204,7 @@ public:
LegalIntWidths = DL.LegalIntWidths;
Alignments = DL.Alignments;
Pointers = DL.Pointers;
+ NonIntegralAddressSpaces = DL.NonIntegralAddressSpaces;
return *this;
}
@@ -255,7 +261,7 @@ public:
bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; }
- const char *getLinkerPrivateGlobalPrefix() const {
+ StringRef getLinkerPrivateGlobalPrefix() const {
if (ManglingMode == MM_MachO)
return "l";
return "";
@@ -275,16 +281,16 @@ public:
llvm_unreachable("invalid mangling mode");
}
- const char *getPrivateGlobalPrefix() const {
+ StringRef getPrivateGlobalPrefix() const {
switch (ManglingMode) {
case MM_None:
return "";
case MM_ELF:
+ case MM_WinCOFF:
return ".L";
case MM_Mips:
return "$";
case MM_MachO:
- case MM_WinCOFF:
case MM_WinCOFFX86:
return "L";
}
@@ -320,6 +326,23 @@ public:
/// the backends/clients are updated.
unsigned getPointerSize(unsigned AS = 0) const;
+ /// Return the address spaces containing non-integral pointers. Pointers in
+ /// this address space don't have a well-defined bitwise representation.
+ ArrayRef<unsigned> getNonIntegralAddressSpaces() const {
+ return NonIntegralAddressSpaces;
+ }
+
+ bool isNonIntegralPointerType(PointerType *PT) const {
+ ArrayRef<unsigned> NonIntegralSpaces = getNonIntegralAddressSpaces();
+ return find(NonIntegralSpaces, PT->getAddressSpace()) !=
+ NonIntegralSpaces.end();
+ }
+
+ bool isNonIntegralPointerType(Type *Ty) const {
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ return PTy && isNonIntegralPointerType(PTy);
+ }
+
/// Layout pointer size, in bits
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
diff --git a/include/llvm/IR/DebugInfo.h b/include/llvm/IR/DebugInfo.h
index 972042432b7b..04f46197b1c3 100644
--- a/include/llvm/IR/DebugInfo.h
+++ b/include/llvm/IR/DebugInfo.h
@@ -44,6 +44,18 @@ DISubprogram *getDISubprogram(const MDNode *Scope);
bool StripDebugInfo(Module &M);
bool stripDebugInfo(Function &F);
+/// Downgrade the debug info in a module to contain only line table information.
+///
+/// In order to convert debug info to what -gline-tables-only would have
+/// created, this does the following:
+/// 1) Delete all debug intrinsics.
+/// 2) Delete all non-CU named metadata debug info nodes.
+/// 3) Create new DebugLocs for each instruction.
+/// 4) Create a new CU debug info, and similarly for every metadata node
+/// that's reachable from the CU debug info.
+/// All debug type metadata nodes are unreachable and garbage collected.
+bool stripNonLineTableDebugInfo(Module &M);
+
/// \brief Return Debug Info Metadata Version by checking module flags.
unsigned getDebugMetadataVersionFromModule(const Module &M);
@@ -77,7 +89,7 @@ private:
void processSubprogram(DISubprogram *SP);
void processScope(DIScope *Scope);
bool addCompileUnit(DICompileUnit *CU);
- bool addGlobalVariable(DIGlobalVariable *DIG);
+ bool addGlobalVariable(DIGlobalVariableExpression *DIG);
bool addSubprogram(DISubprogram *SP);
bool addType(DIType *DT);
bool addScope(DIScope *Scope);
@@ -86,8 +98,8 @@ public:
typedef SmallVectorImpl<DICompileUnit *>::const_iterator
compile_unit_iterator;
typedef SmallVectorImpl<DISubprogram *>::const_iterator subprogram_iterator;
- typedef SmallVectorImpl<DIGlobalVariable *>::const_iterator
- global_variable_iterator;
+ typedef SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator
+ global_variable_expression_iterator;
typedef SmallVectorImpl<DIType *>::const_iterator type_iterator;
typedef SmallVectorImpl<DIScope *>::const_iterator scope_iterator;
@@ -99,7 +111,7 @@ public:
return make_range(SPs.begin(), SPs.end());
}
- iterator_range<global_variable_iterator> global_variables() const {
+ iterator_range<global_variable_expression_iterator> global_variables() const {
return make_range(GVs.begin(), GVs.end());
}
@@ -120,7 +132,7 @@ public:
private:
SmallVector<DICompileUnit *, 8> CUs;
SmallVector<DISubprogram *, 8> SPs;
- SmallVector<DIGlobalVariable *, 8> GVs;
+ SmallVector<DIGlobalVariableExpression *, 8> GVs;
SmallVector<DIType *, 8> TYs;
SmallVector<DIScope *, 8> Scopes;
SmallPtrSet<const MDNode *, 32> NodesSeen;
diff --git a/include/llvm/IR/DebugInfoFlags.def b/include/llvm/IR/DebugInfoFlags.def
index 26238c349e7e..87f3dc9dbdd3 100644
--- a/include/llvm/IR/DebugInfoFlags.def
+++ b/include/llvm/IR/DebugInfoFlags.def
@@ -16,6 +16,8 @@
#error "Missing macro definition of HANDLE_DI_FLAG"
#endif
+HANDLE_DI_FLAG(0, Zero) // Use it as zero value.
+ // For example: void foo(DIFlags Flags = FlagZero).
HANDLE_DI_FLAG(1, Private)
HANDLE_DI_FLAG(2, Protected)
HANDLE_DI_FLAG(3, Public)
@@ -38,5 +40,19 @@ HANDLE_DI_FLAG((2 << 16), MultipleInheritance)
HANDLE_DI_FLAG((3 << 16), VirtualInheritance)
HANDLE_DI_FLAG((1 << 18), IntroducedVirtual)
HANDLE_DI_FLAG((1 << 19), BitField)
+HANDLE_DI_FLAG((1 << 20), NoReturn)
+HANDLE_DI_FLAG((1 << 21), MainSubprogram)
+
+// To avoid needing a dedicated value for IndirectVirtualBase, we use
+// the bitwise or of Virtual and FwdDecl, which does not otherwise
+// make sense for inheritance.
+HANDLE_DI_FLAG((1 << 2) | (1 << 5), IndirectVirtualBase)
+
+#ifdef DI_FLAG_LARGEST_NEEDED
+// intended to be used with ADT/BitmaskEnum.h
+// NOTE: always must be equal to largest flag, check this when adding new flag
+HANDLE_DI_FLAG((1 << 21), Largest)
+#undef DI_FLAG_LARGEST_NEEDED
+#endif
#undef HANDLE_DI_FLAG
diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h
index 853a94afd9d9..26f4626ead10 100644
--- a/include/llvm/IR/DebugInfoMetadata.h
+++ b/include/llvm/IR/DebugInfoMetadata.h
@@ -14,8 +14,21 @@
#ifndef LLVM_IR_DEBUGINFOMETADATA_H
#define LLVM_IR_DEBUGINFOMETADATA_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Dwarf.h"
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <type_traits>
+#include <vector>
// Helper macros for defining get() overrides.
#define DEFINE_MDNODE_GET_UNPACK_IMPL(...) __VA_ARGS__
@@ -108,16 +121,20 @@ public:
public:
iterator() = default;
explicit iterator(MDNode::op_iterator I) : I(I) {}
+
DITypeRef operator*() const { return DITypeRef(*I); }
+
iterator &operator++() {
++I;
return *this;
}
+
iterator operator++(int) {
iterator Temp(*this);
++I;
return Temp;
}
+
bool operator==(const iterator &X) const { return I == X.I; }
bool operator!=(const iterator &X) const { return I != X.I; }
};
@@ -127,7 +144,7 @@ public:
iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
};
-/// \brief Tagged DWARF-like metadata node.
+/// Tagged DWARF-like metadata node.
///
/// A metadata node with a DWARF tag (i.e., a constant named \c DW_TAG_*,
/// defined in llvm/Support/Dwarf.h). Called \a DINode because it's
@@ -167,27 +184,29 @@ protected:
public:
unsigned getTag() const { return SubclassData16; }
- /// \brief Debug info flags.
+ /// Debug info flags.
///
/// The three accessibility flags are mutually exclusive and rolled together
/// in the first two bits.
- enum DIFlags {
+ enum DIFlags : uint32_t {
#define HANDLE_DI_FLAG(ID, NAME) Flag##NAME = ID,
+#define DI_FLAG_LARGEST_NEEDED
#include "llvm/IR/DebugInfoFlags.def"
FlagAccessibility = FlagPrivate | FlagProtected | FlagPublic,
FlagPtrToMemberRep = FlagSingleInheritance | FlagMultipleInheritance |
FlagVirtualInheritance,
+ LLVM_MARK_AS_BITMASK_ENUM(FlagLargest)
};
- static unsigned getFlag(StringRef Flag);
- static const char *getFlagString(unsigned Flag);
+ static DIFlags getFlag(StringRef Flag);
+ static StringRef getFlagString(DIFlags Flag);
- /// \brief Split up a flags bitfield.
+ /// Split up a flags bitfield.
///
/// Split \c Flags into \c SplitFlags, a vector of its components. Returns
/// any remaining (unrecognized) bits.
- static unsigned splitFlags(unsigned Flags,
- SmallVectorImpl<unsigned> &SplitFlags);
+ static DIFlags splitFlags(DIFlags Flags,
+ SmallVectorImpl<DIFlags> &SplitFlags);
static bool classof(const Metadata *MD) {
switch (MD->getMetadataID()) {
@@ -229,7 +248,7 @@ template <class T>
struct simplify_type<TypedDINodeRef<T>>
: simplify_type<const TypedDINodeRef<T>> {};
-/// \brief Generic tagged DWARF-like metadata node.
+/// Generic tagged DWARF-like metadata node.
///
/// An un-specialized DWARF-like metadata node. The first operand is a
/// (possibly empty) null-separated \a MDString header that contains arbitrary
@@ -277,7 +296,7 @@ public:
ArrayRef<Metadata *> DwarfOps),
(Tag, Header, DwarfOps))
- /// \brief Return a (temporary) clone of this.
+ /// Return a (temporary) clone of this.
TempGenericDINode clone() const { return cloneImpl(); }
unsigned getTag() const { return SubclassData16; }
@@ -303,7 +322,7 @@ public:
}
};
-/// \brief Array subrange.
+/// Array subrange.
///
/// TODO: Merge into node for DW_TAG_array_type, which should have a custom
/// type.
@@ -342,7 +361,7 @@ public:
}
};
-/// \brief Enumeration value.
+/// Enumeration value.
///
/// TODO: Add a pointer to the context (DW_TAG_enumeration_type) once that no
/// longer creates a type cycle.
@@ -390,7 +409,7 @@ public:
}
};
-/// \brief Base class for scope-like contexts.
+/// Base class for scope-like contexts.
///
/// Base class for lexical scopes and types (which are also declaration
/// contexts).
@@ -412,7 +431,7 @@ public:
StringRef getName() const;
DIScopeRef getScope() const;
- /// \brief Return the raw underlying file.
+ /// Return the raw underlying file.
///
/// An \a DIFile is an \a DIScope, but it doesn't point at a separate file
/// (it\em is the file). If \c this is an \a DIFile, we need to return \c
@@ -443,7 +462,7 @@ public:
}
};
-/// \brief File.
+/// File.
///
/// TODO: Merge with directory/file node (including users).
/// TODO: Canonicalize paths on creation.
@@ -451,38 +470,62 @@ class DIFile : public DIScope {
friend class LLVMContextImpl;
friend class MDNode;
- DIFile(LLVMContext &C, StorageType Storage, ArrayRef<Metadata *> Ops)
- : DIScope(C, DIFileKind, Storage, dwarf::DW_TAG_file_type, Ops) {}
+public:
+ enum ChecksumKind {
+ CSK_None,
+ CSK_MD5,
+ CSK_SHA1,
+ CSK_Last = CSK_SHA1 // Should be last enumeration.
+ };
+
+private:
+ ChecksumKind CSKind;
+
+ DIFile(LLVMContext &C, StorageType Storage, ChecksumKind CSK,
+ ArrayRef<Metadata *> Ops)
+ : DIScope(C, DIFileKind, Storage, dwarf::DW_TAG_file_type, Ops),
+ CSKind(CSK) {}
~DIFile() = default;
static DIFile *getImpl(LLVMContext &Context, StringRef Filename,
- StringRef Directory, StorageType Storage,
- bool ShouldCreate = true) {
+ StringRef Directory, ChecksumKind CSK, StringRef CS,
+ StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, getCanonicalMDString(Context, Filename),
- getCanonicalMDString(Context, Directory), Storage,
- ShouldCreate);
+ getCanonicalMDString(Context, Directory), CSK,
+ getCanonicalMDString(Context, CS), Storage, ShouldCreate);
}
static DIFile *getImpl(LLVMContext &Context, MDString *Filename,
- MDString *Directory, StorageType Storage,
- bool ShouldCreate = true);
+ MDString *Directory, ChecksumKind CSK, MDString *CS,
+ StorageType Storage, bool ShouldCreate = true);
TempDIFile cloneImpl() const {
- return getTemporary(getContext(), getFilename(), getDirectory());
+ return getTemporary(getContext(), getFilename(), getDirectory(),
+ getChecksumKind(), getChecksum());
}
public:
- DEFINE_MDNODE_GET(DIFile, (StringRef Filename, StringRef Directory),
- (Filename, Directory))
- DEFINE_MDNODE_GET(DIFile, (MDString * Filename, MDString *Directory),
- (Filename, Directory))
+ DEFINE_MDNODE_GET(DIFile, (StringRef Filename, StringRef Directory,
+ ChecksumKind CSK = CSK_None,
+ StringRef CS = StringRef()),
+ (Filename, Directory, CSK, CS))
+ DEFINE_MDNODE_GET(DIFile, (MDString *Filename, MDString *Directory,
+ ChecksumKind CSK = CSK_None,
+ MDString *CS = nullptr),
+ (Filename, Directory, CSK, CS))
TempDIFile clone() const { return cloneImpl(); }
StringRef getFilename() const { return getStringOperand(0); }
StringRef getDirectory() const { return getStringOperand(1); }
+ StringRef getChecksum() const { return getStringOperand(2); }
+ ChecksumKind getChecksumKind() const { return CSKind; }
+ StringRef getChecksumKindAsString() const;
MDString *getRawFilename() const { return getOperandAs<MDString>(0); }
MDString *getRawDirectory() const { return getOperandAs<MDString>(1); }
+ MDString *getRawChecksum() const { return getOperandAs<MDString>(2); }
+
+ static ChecksumKind getChecksumKind(StringRef CSKindStr);
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DIFileKind;
@@ -501,29 +544,29 @@ StringRef DIScope::getDirectory() const {
return "";
}
-/// \brief Base class for types.
+/// Base class for types.
///
/// TODO: Remove the hardcoded name and context, since many types don't use
/// them.
/// TODO: Split up flags.
class DIType : public DIScope {
unsigned Line;
- unsigned Flags;
+ DIFlags Flags;
uint64_t SizeInBits;
- uint64_t AlignInBits;
uint64_t OffsetInBits;
+ uint32_t AlignInBits;
protected:
DIType(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
- unsigned Line, uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags, ArrayRef<Metadata *> Ops)
+ unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags, ArrayRef<Metadata *> Ops)
: DIScope(C, ID, Storage, Tag, Ops) {
init(Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
}
~DIType() = default;
- void init(unsigned Line, uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags) {
+ void init(unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags) {
this->Line = Line;
this->Flags = Flags;
this->SizeInBits = SizeInBits;
@@ -533,7 +576,7 @@ protected:
/// Change fields in place.
void mutate(unsigned Tag, unsigned Line, uint64_t SizeInBits,
- uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags) {
+ uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags) {
assert(isDistinct() && "Only distinct nodes can mutate");
setTag(Tag);
init(Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
@@ -546,9 +589,10 @@ public:
unsigned getLine() const { return Line; }
uint64_t getSizeInBits() const { return SizeInBits; }
- uint64_t getAlignInBits() const { return AlignInBits; }
+ uint32_t getAlignInBits() const { return AlignInBits; }
+ uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
uint64_t getOffsetInBits() const { return OffsetInBits; }
- unsigned getFlags() const { return Flags; }
+ DIFlags getFlags() const { return Flags; }
DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
StringRef getName() const { return getStringOperand(2); }
@@ -557,7 +601,7 @@ public:
Metadata *getRawScope() const { return getOperand(1); }
MDString *getRawName() const { return getOperandAs<MDString>(2); }
- void setFlags(unsigned NewFlags) {
+ void setFlags(DIFlags NewFlags) {
assert(!isUniqued() && "Cannot set flags on uniqued nodes");
Flags = NewFlags;
}
@@ -600,7 +644,7 @@ public:
}
};
-/// \brief Basic type, like 'int' or 'float'.
+/// Basic type, like 'int' or 'float'.
///
/// TODO: Split out DW_TAG_unspecified_type.
/// TODO: Drop unused accessors.
@@ -611,23 +655,23 @@ class DIBasicType : public DIType {
unsigned Encoding;
DIBasicType(LLVMContext &C, StorageType Storage, unsigned Tag,
- uint64_t SizeInBits, uint64_t AlignInBits, unsigned Encoding,
+ uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding,
ArrayRef<Metadata *> Ops)
: DIType(C, DIBasicTypeKind, Storage, Tag, 0, SizeInBits, AlignInBits, 0,
- 0, Ops),
+ FlagZero, Ops),
Encoding(Encoding) {}
~DIBasicType() = default;
static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
StringRef Name, uint64_t SizeInBits,
- uint64_t AlignInBits, unsigned Encoding,
+ uint32_t AlignInBits, unsigned Encoding,
StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
SizeInBits, AlignInBits, Encoding, Storage, ShouldCreate);
}
static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, uint64_t SizeInBits,
- uint64_t AlignInBits, unsigned Encoding,
+ uint32_t AlignInBits, unsigned Encoding,
StorageType Storage, bool ShouldCreate = true);
TempDIBasicType cloneImpl() const {
@@ -640,11 +684,11 @@ public:
(Tag, Name, 0, 0, 0))
DEFINE_MDNODE_GET(DIBasicType,
(unsigned Tag, StringRef Name, uint64_t SizeInBits,
- uint64_t AlignInBits, unsigned Encoding),
+ uint32_t AlignInBits, unsigned Encoding),
(Tag, Name, SizeInBits, AlignInBits, Encoding))
DEFINE_MDNODE_GET(DIBasicType,
(unsigned Tag, MDString *Name, uint64_t SizeInBits,
- uint64_t AlignInBits, unsigned Encoding),
+ uint32_t AlignInBits, unsigned Encoding),
(Tag, Name, SizeInBits, AlignInBits, Encoding))
TempDIBasicType clone() const { return cloneImpl(); }
@@ -656,7 +700,7 @@ public:
}
};
-/// \brief Derived types.
+/// Derived types.
///
/// This includes qualified types, pointers, references, friends, typedefs, and
/// class members.
@@ -667,8 +711,8 @@ class DIDerivedType : public DIType {
friend class MDNode;
DIDerivedType(LLVMContext &C, StorageType Storage, unsigned Tag,
- unsigned Line, uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags, ArrayRef<Metadata *> Ops)
+ unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags, ArrayRef<Metadata *> Ops)
: DIType(C, DIDerivedTypeKind, Storage, Tag, Line, SizeInBits,
AlignInBits, OffsetInBits, Flags, Ops) {}
~DIDerivedType() = default;
@@ -676,8 +720,8 @@ class DIDerivedType : public DIType {
static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag,
StringRef Name, DIFile *File, unsigned Line,
DIScopeRef Scope, DITypeRef BaseType,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags,
Metadata *ExtraData, StorageType Storage,
bool ShouldCreate = true) {
return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File,
@@ -687,8 +731,8 @@ class DIDerivedType : public DIType {
static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, Metadata *File, unsigned Line,
Metadata *Scope, Metadata *BaseType,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags,
Metadata *ExtraData, StorageType Storage,
bool ShouldCreate = true);
@@ -703,16 +747,16 @@ public:
DEFINE_MDNODE_GET(DIDerivedType,
(unsigned Tag, MDString *Name, Metadata *File,
unsigned Line, Metadata *Scope, Metadata *BaseType,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags,
Metadata *ExtraData = nullptr),
(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
AlignInBits, OffsetInBits, Flags, ExtraData))
DEFINE_MDNODE_GET(DIDerivedType,
(unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
DIScopeRef Scope, DITypeRef BaseType, uint64_t SizeInBits,
- uint64_t AlignInBits, uint64_t OffsetInBits,
- unsigned Flags, Metadata *ExtraData = nullptr),
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ DIFlags Flags, Metadata *ExtraData = nullptr),
(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
AlignInBits, OffsetInBits, Flags, ExtraData))
@@ -722,7 +766,7 @@ public:
DITypeRef getBaseType() const { return DITypeRef(getRawBaseType()); }
Metadata *getRawBaseType() const { return getOperand(3); }
- /// \brief Get extra data associated with this derived type.
+ /// Get extra data associated with this derived type.
///
/// Class type for pointer-to-members, objective-c property node for ivars,
/// or global constant wrapper for static members.
@@ -732,7 +776,7 @@ public:
Metadata *getExtraData() const { return getRawExtraData(); }
Metadata *getRawExtraData() const { return getOperand(4); }
- /// \brief Get casted version of extra data.
+ /// Get casted version of extra data.
/// @{
DITypeRef getClassType() const {
assert(getTag() == dwarf::DW_TAG_ptr_to_member_type);
@@ -760,7 +804,7 @@ public:
}
};
-/// \brief Composite types.
+/// Composite types.
///
/// TODO: Detach from DerivedTypeBase (split out MDEnumType?).
/// TODO: Create a custom, unrelated node for DW_TAG_array_type.
@@ -772,7 +816,7 @@ class DICompositeType : public DIType {
DICompositeType(LLVMContext &C, StorageType Storage, unsigned Tag,
unsigned Line, unsigned RuntimeLang, uint64_t SizeInBits,
- uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
ArrayRef<Metadata *> Ops)
: DIType(C, DICompositeTypeKind, Storage, Tag, Line, SizeInBits,
AlignInBits, OffsetInBits, Flags, Ops),
@@ -781,8 +825,8 @@ class DICompositeType : public DIType {
/// Change fields in place.
void mutate(unsigned Tag, unsigned Line, unsigned RuntimeLang,
- uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
- unsigned Flags) {
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags) {
assert(isDistinct() && "Only distinct nodes can mutate");
assert(getRawIdentifier() && "Only ODR-uniqued nodes should mutate");
this->RuntimeLang = RuntimeLang;
@@ -792,8 +836,8 @@ class DICompositeType : public DIType {
static DICompositeType *
getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, Metadata *File,
unsigned Line, DIScopeRef Scope, DITypeRef BaseType,
- uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
- uint64_t Flags, DINodeArray Elements, unsigned RuntimeLang,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DIFlags Flags, DINodeArray Elements, unsigned RuntimeLang,
DITypeRef VTableHolder, DITemplateParameterArray TemplateParams,
StringRef Identifier, StorageType Storage, bool ShouldCreate = true) {
return getImpl(
@@ -805,8 +849,8 @@ class DICompositeType : public DIType {
static DICompositeType *
getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
unsigned Line, Metadata *Scope, Metadata *BaseType,
- uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
- unsigned Flags, Metadata *Elements, unsigned RuntimeLang,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DIFlags Flags, Metadata *Elements, unsigned RuntimeLang,
Metadata *VTableHolder, Metadata *TemplateParams,
MDString *Identifier, StorageType Storage, bool ShouldCreate = true);
@@ -822,8 +866,8 @@ public:
DEFINE_MDNODE_GET(DICompositeType,
(unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
DIScopeRef Scope, DITypeRef BaseType, uint64_t SizeInBits,
- uint64_t AlignInBits, uint64_t OffsetInBits,
- unsigned Flags, DINodeArray Elements, unsigned RuntimeLang,
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ DIFlags Flags, DINodeArray Elements, unsigned RuntimeLang,
DITypeRef VTableHolder,
DITemplateParameterArray TemplateParams = nullptr,
StringRef Identifier = ""),
@@ -833,8 +877,8 @@ public:
DEFINE_MDNODE_GET(DICompositeType,
(unsigned Tag, MDString *Name, Metadata *File,
unsigned Line, Metadata *Scope, Metadata *BaseType,
- uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags, Metadata *Elements,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
unsigned RuntimeLang, Metadata *VTableHolder,
Metadata *TemplateParams = nullptr,
MDString *Identifier = nullptr),
@@ -854,8 +898,8 @@ public:
static DICompositeType *
getODRType(LLVMContext &Context, MDString &Identifier, unsigned Tag,
MDString *Name, Metadata *File, unsigned Line, Metadata *Scope,
- Metadata *BaseType, uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags, Metadata *Elements,
+ Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
unsigned RuntimeLang, Metadata *VTableHolder,
Metadata *TemplateParams);
static DICompositeType *getODRTypeIfExists(LLVMContext &Context,
@@ -873,8 +917,8 @@ public:
static DICompositeType *
buildODRType(LLVMContext &Context, MDString &Identifier, unsigned Tag,
MDString *Name, Metadata *File, unsigned Line, Metadata *Scope,
- Metadata *BaseType, uint64_t SizeInBits, uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags, Metadata *Elements,
+ Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
unsigned RuntimeLang, Metadata *VTableHolder,
Metadata *TemplateParams);
@@ -895,7 +939,7 @@ public:
Metadata *getRawTemplateParams() const { return getOperand(6); }
MDString *getRawIdentifier() const { return getOperandAs<MDString>(7); }
- /// \brief Replace operands.
+ /// Replace operands.
///
/// If this \a isUniqued() and not \a isResolved(), on a uniquing collision
/// this will be RAUW'ed and deleted. Use a \a TrackingMDRef to keep track
@@ -904,7 +948,7 @@ public:
void replaceElements(DINodeArray Elements) {
#ifndef NDEBUG
for (DINode *Op : getElements())
- assert(std::find(Elements->op_begin(), Elements->op_end(), Op) &&
+ assert(is_contained(Elements->operands(), Op) &&
"Lost a member during member list replacement");
#endif
replaceOperandWith(4, Elements.get());
@@ -922,7 +966,7 @@ public:
}
};
-/// \brief Type array for a subprogram.
+/// Type array for a subprogram.
///
/// TODO: Fold the array of types in directly as operands.
class DISubroutineType : public DIType {
@@ -933,20 +977,20 @@ class DISubroutineType : public DIType {
/// type dwarf::CallingConvention.
uint8_t CC;
- DISubroutineType(LLVMContext &C, StorageType Storage, unsigned Flags,
+ DISubroutineType(LLVMContext &C, StorageType Storage, DIFlags Flags,
uint8_t CC, ArrayRef<Metadata *> Ops)
: DIType(C, DISubroutineTypeKind, Storage, dwarf::DW_TAG_subroutine_type,
0, 0, 0, 0, Flags, Ops),
CC(CC) {}
~DISubroutineType() = default;
- static DISubroutineType *getImpl(LLVMContext &Context, unsigned Flags,
+ static DISubroutineType *getImpl(LLVMContext &Context, DIFlags Flags,
uint8_t CC, DITypeRefArray TypeArray,
StorageType Storage,
bool ShouldCreate = true) {
return getImpl(Context, Flags, CC, TypeArray.get(), Storage, ShouldCreate);
}
- static DISubroutineType *getImpl(LLVMContext &Context, unsigned Flags,
+ static DISubroutineType *getImpl(LLVMContext &Context, DIFlags Flags,
uint8_t CC, Metadata *TypeArray,
StorageType Storage,
bool ShouldCreate = true);
@@ -957,10 +1001,10 @@ class DISubroutineType : public DIType {
public:
DEFINE_MDNODE_GET(DISubroutineType,
- (unsigned Flags, uint8_t CC, DITypeRefArray TypeArray),
+ (DIFlags Flags, uint8_t CC, DITypeRefArray TypeArray),
(Flags, CC, TypeArray))
DEFINE_MDNODE_GET(DISubroutineType,
- (unsigned Flags, uint8_t CC, Metadata *TypeArray),
+ (DIFlags Flags, uint8_t CC, Metadata *TypeArray),
(Flags, CC, TypeArray))
TempDISubroutineType clone() const { return cloneImpl(); }
@@ -977,10 +1021,11 @@ public:
}
};
-/// \brief Compile unit.
+/// Compile unit.
class DICompileUnit : public DIScope {
friend class LLVMContextImpl;
friend class MDNode;
+
public:
enum DebugEmissionKind : unsigned {
NoDebug = 0,
@@ -988,6 +1033,7 @@ public:
LineTablesOnly,
LastEmissionKind = LineTablesOnly
};
+
static Optional<DebugEmissionKind> getEmissionKind(StringRef Str);
static const char *EmissionKindString(DebugEmissionKind EK);
@@ -997,14 +1043,16 @@ private:
unsigned RuntimeVersion;
unsigned EmissionKind;
uint64_t DWOId;
+ bool SplitDebugInlining;
DICompileUnit(LLVMContext &C, StorageType Storage, unsigned SourceLanguage,
bool IsOptimized, unsigned RuntimeVersion,
- unsigned EmissionKind, uint64_t DWOId, ArrayRef<Metadata *> Ops)
+ unsigned EmissionKind, uint64_t DWOId, bool SplitDebugInlining,
+ ArrayRef<Metadata *> Ops)
: DIScope(C, DICompileUnitKind, Storage, dwarf::DW_TAG_compile_unit, Ops),
SourceLanguage(SourceLanguage), IsOptimized(IsOptimized),
RuntimeVersion(RuntimeVersion), EmissionKind(EmissionKind),
- DWOId(DWOId) {
+ DWOId(DWOId), SplitDebugInlining(SplitDebugInlining) {
assert(Storage != Uniqued);
}
~DICompileUnit() = default;
@@ -1014,15 +1062,18 @@ private:
StringRef Producer, bool IsOptimized, StringRef Flags,
unsigned RuntimeVersion, StringRef SplitDebugFilename,
unsigned EmissionKind, DICompositeTypeArray EnumTypes,
- DIScopeArray RetainedTypes, DIGlobalVariableArray GlobalVariables,
+ DIScopeArray RetainedTypes,
+ DIGlobalVariableExpressionArray GlobalVariables,
DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
- uint64_t DWOId, StorageType Storage, bool ShouldCreate = true) {
- return getImpl(
- Context, SourceLanguage, File, getCanonicalMDString(Context, Producer),
- IsOptimized, getCanonicalMDString(Context, Flags), RuntimeVersion,
- getCanonicalMDString(Context, SplitDebugFilename), EmissionKind,
- EnumTypes.get(), RetainedTypes.get(), GlobalVariables.get(),
- ImportedEntities.get(), Macros.get(), DWOId, Storage, ShouldCreate);
+ uint64_t DWOId, bool SplitDebugInlining, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, SourceLanguage, File,
+ getCanonicalMDString(Context, Producer), IsOptimized,
+ getCanonicalMDString(Context, Flags), RuntimeVersion,
+ getCanonicalMDString(Context, SplitDebugFilename),
+ EmissionKind, EnumTypes.get(), RetainedTypes.get(),
+ GlobalVariables.get(), ImportedEntities.get(), Macros.get(),
+ DWOId, SplitDebugInlining, Storage, ShouldCreate);
}
static DICompileUnit *
getImpl(LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
@@ -1030,43 +1081,45 @@ private:
unsigned RuntimeVersion, MDString *SplitDebugFilename,
unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
Metadata *GlobalVariables, Metadata *ImportedEntities,
- Metadata *Macros, uint64_t DWOId, StorageType Storage,
- bool ShouldCreate = true);
+ Metadata *Macros, uint64_t DWOId, bool SplitDebugInlining,
+ StorageType Storage, bool ShouldCreate = true);
TempDICompileUnit cloneImpl() const {
- return getTemporary(
- getContext(), getSourceLanguage(), getFile(), getProducer(),
- isOptimized(), getFlags(), getRuntimeVersion(), getSplitDebugFilename(),
- getEmissionKind(), getEnumTypes(), getRetainedTypes(),
- getGlobalVariables(), getImportedEntities(), getMacros(), DWOId);
+ return getTemporary(getContext(), getSourceLanguage(), getFile(),
+ getProducer(), isOptimized(), getFlags(),
+ getRuntimeVersion(), getSplitDebugFilename(),
+ getEmissionKind(), getEnumTypes(), getRetainedTypes(),
+ getGlobalVariables(), getImportedEntities(),
+ getMacros(), DWOId, getSplitDebugInlining());
}
+public:
static void get() = delete;
static void getIfExists() = delete;
-public:
DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
DICompileUnit,
(unsigned SourceLanguage, DIFile *File, StringRef Producer,
bool IsOptimized, StringRef Flags, unsigned RuntimeVersion,
StringRef SplitDebugFilename, DebugEmissionKind EmissionKind,
DICompositeTypeArray EnumTypes, DIScopeArray RetainedTypes,
- DIGlobalVariableArray GlobalVariables,
+ DIGlobalVariableExpressionArray GlobalVariables,
DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
- uint64_t DWOId),
+ uint64_t DWOId, bool SplitDebugInlining),
(SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
- GlobalVariables, ImportedEntities, Macros, DWOId))
+ GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining))
DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
DICompileUnit,
(unsigned SourceLanguage, Metadata *File, MDString *Producer,
bool IsOptimized, MDString *Flags, unsigned RuntimeVersion,
MDString *SplitDebugFilename, unsigned EmissionKind, Metadata *EnumTypes,
Metadata *RetainedTypes, Metadata *GlobalVariables,
- Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId),
+ Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
+ bool SplitDebugInlining),
(SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
- GlobalVariables, ImportedEntities, Macros, DWOId))
+ GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining))
TempDICompileUnit clone() const { return cloneImpl(); }
@@ -1085,7 +1138,7 @@ public:
DIScopeArray getRetainedTypes() const {
return cast_or_null<MDTuple>(getRawRetainedTypes());
}
- DIGlobalVariableArray getGlobalVariables() const {
+ DIGlobalVariableExpressionArray getGlobalVariables() const {
return cast_or_null<MDTuple>(getRawGlobalVariables());
}
DIImportedEntityArray getImportedEntities() const {
@@ -1096,6 +1149,10 @@ public:
}
uint64_t getDWOId() const { return DWOId; }
void setDWOId(uint64_t DwoId) { DWOId = DwoId; }
+ bool getSplitDebugInlining() const { return SplitDebugInlining; }
+ void setSplitDebugInlining(bool SplitDebugInlining) {
+ this->SplitDebugInlining = SplitDebugInlining;
+ }
MDString *getRawProducer() const { return getOperandAs<MDString>(1); }
MDString *getRawFlags() const { return getOperandAs<MDString>(2); }
@@ -1108,7 +1165,7 @@ public:
Metadata *getRawImportedEntities() const { return getOperand(7); }
Metadata *getRawMacros() const { return getOperand(8); }
- /// \brief Replace arrays.
+ /// Replace arrays.
///
/// If this \a isUniqued() and not \a isResolved(), it will be RAUW'ed and
/// deleted on a uniquing collision. In practice, uniquing collisions on \a
@@ -1120,7 +1177,7 @@ public:
void replaceRetainedTypes(DITypeArray N) {
replaceOperandWith(5, N.get());
}
- void replaceGlobalVariables(DIGlobalVariableArray N) {
+ void replaceGlobalVariables(DIGlobalVariableExpressionArray N) {
replaceOperandWith(6, N.get());
}
void replaceImportedEntities(DIImportedEntityArray N) {
@@ -1134,7 +1191,7 @@ public:
}
};
-/// \brief A scope for locals.
+/// A scope for locals.
///
/// A legal scope for lexical blocks, local variables, and debug info
/// locations. Subclasses are \a DISubprogram, \a DILexicalBlock, and \a
@@ -1147,7 +1204,7 @@ protected:
~DILocalScope() = default;
public:
- /// \brief Get the subprogram for this scope.
+ /// Get the subprogram for this scope.
///
/// Return this if it's an \a DISubprogram; otherwise, look up the scope
/// chain.
@@ -1166,7 +1223,7 @@ public:
}
};
-/// \brief Debug location.
+/// Debug location.
///
/// A debug location in source code, used for debug info and otherwise.
class DILocation : public MDNode {
@@ -1196,10 +1253,10 @@ class DILocation : public MDNode {
getRawInlinedAt());
}
+public:
// Disallow replacing operands.
void replaceOperandWith(unsigned I, Metadata *New) = delete;
-public:
DEFINE_MDNODE_GET(DILocation,
(unsigned Line, unsigned Column, Metadata *Scope,
Metadata *InlinedAt = nullptr),
@@ -1209,7 +1266,7 @@ public:
DILocation *InlinedAt = nullptr),
(Line, Column, Scope, InlinedAt))
- /// \brief Return a (temporary) clone of this.
+ /// Return a (temporary) clone of this.
TempDILocation clone() const { return cloneImpl(); }
unsigned getLine() const { return SubclassData32; }
@@ -1223,7 +1280,7 @@ public:
StringRef getFilename() const { return getScope()->getFilename(); }
StringRef getDirectory() const { return getScope()->getDirectory(); }
- /// \brief Get the scope where this is inlined.
+ /// Get the scope where this is inlined.
///
/// Walk through \a getInlinedAt() and return \a getScope() from the deepest
/// location.
@@ -1233,7 +1290,7 @@ public:
return getScope();
}
- /// \brief Check whether this can be discriminated from another location.
+ /// Check whether this can be discriminated from another location.
///
/// Check \c this can be discriminated from \c RHS in a linetable entry.
/// Scope and inlined-at chains are not recorded in the linetable, so they
@@ -1250,12 +1307,33 @@ public:
return getFilename() != RHS.getFilename() || getLine() != RHS.getLine();
}
- /// \brief Get the DWARF discriminator.
+ /// Get the DWARF discriminator.
///
/// DWARF discriminators distinguish identical file locations between
/// instructions that are on different basic blocks.
inline unsigned getDiscriminator() const;
+ /// Returns a new DILocation with updated \p Discriminator.
+ inline DILocation *cloneWithDiscriminator(unsigned Discriminator) const;
+
+ /// When two instructions are combined into a single instruction we also
+ /// need to combine the original locations into a single location.
+ ///
+ /// When the locations are the same we can use either location. When they
+ /// differ, we need a third location which is distinct from either. If
+ /// they have the same file/line but have a different discriminator we
+ /// could create a location with a new discriminator. If they are from
+ /// different files/lines the location is ambiguous and can't be
+ /// represented in a single line entry. In this case, no location
+ /// should be set.
+ ///
+ /// Currently this function is simply a stub, and no location will be
+ /// used for all cases.
+ static DILocation *getMergedLocation(const DILocation *LocA,
+ const DILocation *LocB) {
+ return nullptr;
+ }
+
Metadata *getRawScope() const { return getOperand(0); }
Metadata *getRawInlinedAt() const {
if (getNumOperands() == 2)
@@ -1268,7 +1346,7 @@ public:
}
};
-/// \brief Subprogram description.
+/// Subprogram description.
///
/// TODO: Remove DisplayName. It's always equal to Name.
/// TODO: Split up flags.
@@ -1289,8 +1367,6 @@ class DISubprogram : public DILocalScope {
// in 2 bits (none/pure/pure_virtual).
unsigned Virtuality : 2;
- unsigned Flags : 27;
-
// These are boolean flags so one bit is enough.
// MSVC starts a new container field every time the base
// type changes so we can't use 'bool' to ensure these bits
@@ -1299,19 +1375,22 @@ class DISubprogram : public DILocalScope {
unsigned IsDefinition : 1;
unsigned IsOptimized : 1;
+ unsigned Padding : 3;
+
+ DIFlags Flags;
+
DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line,
unsigned ScopeLine, unsigned Virtuality, unsigned VirtualIndex,
- int ThisAdjustment, unsigned Flags, bool IsLocalToUnit,
+ int ThisAdjustment, DIFlags Flags, bool IsLocalToUnit,
bool IsDefinition, bool IsOptimized, ArrayRef<Metadata *> Ops)
: DILocalScope(C, DISubprogramKind, Storage, dwarf::DW_TAG_subprogram,
Ops),
Line(Line), ScopeLine(ScopeLine), VirtualIndex(VirtualIndex),
- ThisAdjustment(ThisAdjustment), Virtuality(Virtuality), Flags(Flags),
+ ThisAdjustment(ThisAdjustment), Virtuality(Virtuality),
IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition),
- IsOptimized(IsOptimized) {
+ IsOptimized(IsOptimized), Flags(Flags) {
static_assert(dwarf::DW_VIRTUALITY_max < 4, "Virtuality out of range");
assert(Virtuality < 4 && "Virtuality out of range");
- assert((Flags < (1 << 27)) && "Flags out of range");
}
~DISubprogram() = default;
@@ -1320,7 +1399,7 @@ class DISubprogram : public DILocalScope {
StringRef LinkageName, DIFile *File, unsigned Line,
DISubroutineType *Type, bool IsLocalToUnit, bool IsDefinition,
unsigned ScopeLine, DITypeRef ContainingType, unsigned Virtuality,
- unsigned VirtualIndex, int ThisAdjustment, unsigned Flags,
+ unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
bool IsOptimized, DICompileUnit *Unit,
DITemplateParameterArray TemplateParams, DISubprogram *Declaration,
DILocalVariableArray Variables, StorageType Storage,
@@ -1337,7 +1416,7 @@ class DISubprogram : public DILocalScope {
MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
- int ThisAdjustment, unsigned Flags, bool IsOptimized, Metadata *Unit,
+ int ThisAdjustment, DIFlags Flags, bool IsOptimized, Metadata *Unit,
Metadata *TemplateParams, Metadata *Declaration, Metadata *Variables,
StorageType Storage, bool ShouldCreate = true);
@@ -1356,7 +1435,7 @@ public:
DIFile *File, unsigned Line, DISubroutineType *Type,
bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
DITypeRef ContainingType, unsigned Virtuality,
- unsigned VirtualIndex, int ThisAdjustment, unsigned Flags,
+ unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
bool IsOptimized, DICompileUnit *Unit,
DITemplateParameterArray TemplateParams = nullptr,
DISubprogram *Declaration = nullptr,
@@ -1370,7 +1449,7 @@ public:
(Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
unsigned Line, Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
unsigned ScopeLine, Metadata *ContainingType, unsigned Virtuality,
- unsigned VirtualIndex, int ThisAdjustment, unsigned Flags,
+ unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
bool IsOptimized, Metadata *Unit, Metadata *TemplateParams = nullptr,
Metadata *Declaration = nullptr, Metadata *Variables = nullptr),
(Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
@@ -1385,12 +1464,12 @@ public:
unsigned getVirtualIndex() const { return VirtualIndex; }
int getThisAdjustment() const { return ThisAdjustment; }
unsigned getScopeLine() const { return ScopeLine; }
- unsigned getFlags() const { return Flags; }
+ DIFlags getFlags() const { return Flags; }
bool isLocalToUnit() const { return IsLocalToUnit; }
bool isDefinition() const { return IsDefinition; }
bool isOptimized() const { return IsOptimized; }
- unsigned isArtificial() const { return getFlags() & FlagArtificial; }
+ bool isArtificial() const { return getFlags() & FlagArtificial; }
bool isPrivate() const {
return (getFlags() & FlagAccessibility) == FlagPrivate;
}
@@ -1402,22 +1481,24 @@ public:
}
bool isExplicit() const { return getFlags() & FlagExplicit; }
bool isPrototyped() const { return getFlags() & FlagPrototyped; }
+ bool isMainSubprogram() const { return getFlags() & FlagMainSubprogram; }
- /// \brief Check if this is reference-qualified.
+ /// Check if this is reference-qualified.
///
/// Return true if this subprogram is a C++11 reference-qualified non-static
/// member function (void foo() &).
- unsigned isLValueReference() const {
- return getFlags() & FlagLValueReference;
- }
+ bool isLValueReference() const { return getFlags() & FlagLValueReference; }
- /// \brief Check if this is rvalue-reference-qualified.
+ /// Check if this is rvalue-reference-qualified.
///
/// Return true if this subprogram is a C++11 rvalue-reference-qualified
/// non-static member function (void foo() &&).
- unsigned isRValueReference() const {
- return getFlags() & FlagRValueReference;
- }
+ bool isRValueReference() const { return getFlags() & FlagRValueReference; }
+
+ /// Check if this is marked as noreturn.
+ ///
+ /// Return true if this subprogram is C++11 noreturn or C11 _Noreturn
+ bool isNoReturn() const { return getFlags() & FlagNoReturn; }
DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
@@ -1459,7 +1540,7 @@ public:
Metadata *getRawDeclaration() const { return getOperand(9); }
Metadata *getRawVariables() const { return getOperand(10); }
- /// \brief Check if this subprogram describes the given function.
+ /// Check if this subprogram describes the given function.
///
/// FIXME: Should this be looking through bitcasts?
bool describes(const Function *F) const;
@@ -1596,45 +1677,66 @@ unsigned DILocation::getDiscriminator() const {
return 0;
}
+DILocation *DILocation::cloneWithDiscriminator(unsigned Discriminator) const {
+ DIScope *Scope = getScope();
+ // Skip all parent DILexicalBlockFile that already have a discriminator
+ // assigned. We do not want to have nested DILexicalBlockFiles that have
+ // mutliple discriminators because only the leaf DILexicalBlockFile's
+ // dominator will be used.
+ for (auto *LBF = dyn_cast<DILexicalBlockFile>(Scope);
+ LBF && LBF->getDiscriminator() != 0;
+ LBF = dyn_cast<DILexicalBlockFile>(Scope))
+ Scope = LBF->getScope();
+ DILexicalBlockFile *NewScope =
+ DILexicalBlockFile::get(getContext(), Scope, getFile(), Discriminator);
+ return DILocation::get(getContext(), getLine(), getColumn(), NewScope,
+ getInlinedAt());
+}
+
class DINamespace : public DIScope {
friend class LLVMContextImpl;
friend class MDNode;
unsigned Line;
+ unsigned ExportSymbols : 1;
DINamespace(LLVMContext &Context, StorageType Storage, unsigned Line,
- ArrayRef<Metadata *> Ops)
+ bool ExportSymbols, ArrayRef<Metadata *> Ops)
: DIScope(Context, DINamespaceKind, Storage, dwarf::DW_TAG_namespace,
Ops),
- Line(Line) {}
+ Line(Line), ExportSymbols(ExportSymbols) {}
~DINamespace() = default;
static DINamespace *getImpl(LLVMContext &Context, DIScope *Scope,
DIFile *File, StringRef Name, unsigned Line,
- StorageType Storage, bool ShouldCreate = true) {
+ bool ExportSymbols, StorageType Storage,
+ bool ShouldCreate = true) {
return getImpl(Context, Scope, File, getCanonicalMDString(Context, Name),
- Line, Storage, ShouldCreate);
+ Line, ExportSymbols, Storage, ShouldCreate);
}
static DINamespace *getImpl(LLVMContext &Context, Metadata *Scope,
Metadata *File, MDString *Name, unsigned Line,
- StorageType Storage, bool ShouldCreate = true);
+ bool ExportSymbols, StorageType Storage,
+ bool ShouldCreate = true);
TempDINamespace cloneImpl() const {
return getTemporary(getContext(), getScope(), getFile(), getName(),
- getLine());
+ getLine(), getExportSymbols());
}
public:
DEFINE_MDNODE_GET(DINamespace, (DIScope * Scope, DIFile *File, StringRef Name,
- unsigned Line),
- (Scope, File, Name, Line))
- DEFINE_MDNODE_GET(DINamespace, (Metadata * Scope, Metadata *File,
- MDString *Name, unsigned Line),
- (Scope, File, Name, Line))
+ unsigned Line, bool ExportSymbols),
+ (Scope, File, Name, Line, ExportSymbols))
+ DEFINE_MDNODE_GET(DINamespace,
+ (Metadata * Scope, Metadata *File, MDString *Name,
+ unsigned Line, bool ExportSymbols),
+ (Scope, File, Name, Line, ExportSymbols))
TempDINamespace clone() const { return cloneImpl(); }
unsigned getLine() const { return Line; }
+ bool getExportSymbols() const { return ExportSymbols; }
DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
StringRef getName() const { return getStringOperand(2); }
@@ -1646,7 +1748,7 @@ public:
}
};
-/// \brief A (clang) module that has been imported by the compile unit.
+/// A (clang) module that has been imported by the compile unit.
///
class DIModule : public DIScope {
friend class LLVMContextImpl;
@@ -1654,7 +1756,7 @@ class DIModule : public DIScope {
DIModule(LLVMContext &Context, StorageType Storage, ArrayRef<Metadata *> Ops)
: DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops) {}
- ~DIModule() {}
+ ~DIModule() = default;
static DIModule *getImpl(LLVMContext &Context, DIScope *Scope,
StringRef Name, StringRef ConfigurationMacros,
@@ -1706,7 +1808,7 @@ public:
}
};
-/// \brief Base class for template parameters.
+/// Base class for template parameters.
class DITemplateParameter : public DINode {
protected:
DITemplateParameter(LLVMContext &Context, unsigned ID, StorageType Storage,
@@ -1808,14 +1910,16 @@ public:
}
};
-/// \brief Base class for variables.
+/// Base class for variables.
class DIVariable : public DINode {
unsigned Line;
+ uint32_t AlignInBits;
protected:
DIVariable(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Line,
- ArrayRef<Metadata *> Ops)
- : DINode(C, ID, Storage, dwarf::DW_TAG_variable, Ops), Line(Line) {}
+ ArrayRef<Metadata *> Ops, uint32_t AlignInBits = 0)
+ : DINode(C, ID, Storage, dwarf::DW_TAG_variable, Ops), Line(Line),
+ AlignInBits(AlignInBits) {}
~DIVariable() = default;
public:
@@ -1824,6 +1928,8 @@ public:
StringRef getName() const { return getStringOperand(1); }
DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
DITypeRef getType() const { return DITypeRef(getRawType()); }
+ uint32_t getAlignInBits() const { return AlignInBits; }
+ uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
StringRef getFilename() const {
if (auto *F = getFile())
@@ -1847,173 +1953,11 @@ public:
}
};
-/// \brief Global variables.
-///
-/// TODO: Remove DisplayName. It's always equal to Name.
-class DIGlobalVariable : public DIVariable {
- friend class LLVMContextImpl;
- friend class MDNode;
-
- bool IsLocalToUnit;
- bool IsDefinition;
-
- DIGlobalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
- bool IsLocalToUnit, bool IsDefinition,
- ArrayRef<Metadata *> Ops)
- : DIVariable(C, DIGlobalVariableKind, Storage, Line, Ops),
- IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition) {}
- ~DIGlobalVariable() = default;
-
- static DIGlobalVariable *
- getImpl(LLVMContext &Context, DIScope *Scope, StringRef Name,
- StringRef LinkageName, DIFile *File, unsigned Line, DITypeRef Type,
- bool IsLocalToUnit, bool IsDefinition, Constant *Variable,
- DIDerivedType *StaticDataMemberDeclaration, StorageType Storage,
- bool ShouldCreate = true) {
- return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
- getCanonicalMDString(Context, LinkageName), File, Line, Type,
- IsLocalToUnit, IsDefinition,
- Variable ? ConstantAsMetadata::get(Variable) : nullptr,
- StaticDataMemberDeclaration, Storage, ShouldCreate);
- }
- static DIGlobalVariable *
- getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
- MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
- bool IsLocalToUnit, bool IsDefinition, Metadata *Variable,
- Metadata *StaticDataMemberDeclaration, StorageType Storage,
- bool ShouldCreate = true);
-
- TempDIGlobalVariable cloneImpl() const {
- return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
- getFile(), getLine(), getType(), isLocalToUnit(),
- isDefinition(), getVariable(),
- getStaticDataMemberDeclaration());
- }
-
-public:
- DEFINE_MDNODE_GET(DIGlobalVariable,
- (DIScope * Scope, StringRef Name, StringRef LinkageName,
- DIFile *File, unsigned Line, DITypeRef Type,
- bool IsLocalToUnit, bool IsDefinition, Constant *Variable,
- DIDerivedType *StaticDataMemberDeclaration),
- (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
- IsDefinition, Variable, StaticDataMemberDeclaration))
- DEFINE_MDNODE_GET(DIGlobalVariable,
- (Metadata * Scope, MDString *Name, MDString *LinkageName,
- Metadata *File, unsigned Line, Metadata *Type,
- bool IsLocalToUnit, bool IsDefinition, Metadata *Variable,
- Metadata *StaticDataMemberDeclaration),
- (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
- IsDefinition, Variable, StaticDataMemberDeclaration))
-
- TempDIGlobalVariable clone() const { return cloneImpl(); }
-
- bool isLocalToUnit() const { return IsLocalToUnit; }
- bool isDefinition() const { return IsDefinition; }
- StringRef getDisplayName() const { return getStringOperand(4); }
- StringRef getLinkageName() const { return getStringOperand(5); }
- Constant *getVariable() const {
- if (auto *C = cast_or_null<ConstantAsMetadata>(getRawVariable()))
- return dyn_cast<Constant>(C->getValue());
- return nullptr;
- }
- DIDerivedType *getStaticDataMemberDeclaration() const {
- return cast_or_null<DIDerivedType>(getRawStaticDataMemberDeclaration());
- }
-
- MDString *getRawLinkageName() const { return getOperandAs<MDString>(5); }
- Metadata *getRawVariable() const { return getOperand(6); }
- Metadata *getRawStaticDataMemberDeclaration() const { return getOperand(7); }
-
- static bool classof(const Metadata *MD) {
- return MD->getMetadataID() == DIGlobalVariableKind;
- }
-};
-
-/// \brief Local variable.
-///
-/// TODO: Split up flags.
-class DILocalVariable : public DIVariable {
- friend class LLVMContextImpl;
- friend class MDNode;
-
- unsigned Arg : 16;
- unsigned Flags : 16;
-
- DILocalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
- unsigned Arg, unsigned Flags, ArrayRef<Metadata *> Ops)
- : DIVariable(C, DILocalVariableKind, Storage, Line, Ops), Arg(Arg),
- Flags(Flags) {
- assert(Flags < (1 << 16) && "DILocalVariable: Flags out of range");
- assert(Arg < (1 << 16) && "DILocalVariable: Arg out of range");
- }
- ~DILocalVariable() = default;
-
- static DILocalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
- StringRef Name, DIFile *File, unsigned Line,
- DITypeRef Type, unsigned Arg, unsigned Flags,
- StorageType Storage,
- bool ShouldCreate = true) {
- return getImpl(Context, Scope, getCanonicalMDString(Context, Name), File,
- Line, Type, Arg, Flags, Storage, ShouldCreate);
- }
- static DILocalVariable *getImpl(LLVMContext &Context, Metadata *Scope,
- MDString *Name, Metadata *File, unsigned Line,
- Metadata *Type, unsigned Arg, unsigned Flags,
- StorageType Storage,
- bool ShouldCreate = true);
-
- TempDILocalVariable cloneImpl() const {
- return getTemporary(getContext(), getScope(), getName(), getFile(),
- getLine(), getType(), getArg(), getFlags());
- }
-
-public:
- DEFINE_MDNODE_GET(DILocalVariable,
- (DILocalScope * Scope, StringRef Name, DIFile *File,
- unsigned Line, DITypeRef Type, unsigned Arg,
- unsigned Flags),
- (Scope, Name, File, Line, Type, Arg, Flags))
- DEFINE_MDNODE_GET(DILocalVariable,
- (Metadata * Scope, MDString *Name, Metadata *File,
- unsigned Line, Metadata *Type, unsigned Arg,
- unsigned Flags),
- (Scope, Name, File, Line, Type, Arg, Flags))
-
- TempDILocalVariable clone() const { return cloneImpl(); }
-
- /// \brief Get the local scope for this variable.
- ///
- /// Variables must be defined in a local scope.
- DILocalScope *getScope() const {
- return cast<DILocalScope>(DIVariable::getScope());
- }
-
- bool isParameter() const { return Arg; }
- unsigned getArg() const { return Arg; }
- unsigned getFlags() const { return Flags; }
-
- bool isArtificial() const { return getFlags() & FlagArtificial; }
- bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
-
- /// \brief Check that a location is valid for this variable.
- ///
- /// Check that \c DL exists, is in the same subprogram, and has the same
- /// inlined-at location as \c this. (Otherwise, it's not a valid attachment
- /// to a \a DbgInfoIntrinsic.)
- bool isValidLocationForIntrinsic(const DILocation *DL) const {
- return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram();
- }
-
- static bool classof(const Metadata *MD) {
- return MD->getMetadataID() == DILocalVariableKind;
- }
-};
-
-/// \brief DWARF expression.
+/// DWARF expression.
///
/// This is (almost) a DWARF expression that modifies the location of a
-/// variable or (or the location of a single piece of a variable).
+/// variable, or the location of a single piece of a variable, or (when using
+/// DW_OP_stack_value) is the constant variable value.
///
/// FIXME: Instead of DW_OP_plus taking an argument, this should use DW_OP_const
/// and have DW_OP_plus consume the topmost elements on the stack.
@@ -2053,53 +1997,49 @@ public:
return Elements[I];
}
- /// \brief Return whether this is a piece of an aggregate variable.
- bool isBitPiece() const;
-
- /// \brief Return the offset of this piece in bits.
- uint64_t getBitPieceOffset() const;
-
- /// \brief Return the size of this piece in bits.
- uint64_t getBitPieceSize() const;
+ /// Determine whether this represents a standalone constant value.
+ bool isConstant() const;
typedef ArrayRef<uint64_t>::iterator element_iterator;
element_iterator elements_begin() const { return getElements().begin(); }
element_iterator elements_end() const { return getElements().end(); }
- /// \brief A lightweight wrapper around an expression operand.
+ /// A lightweight wrapper around an expression operand.
///
/// TODO: Store arguments directly and change \a DIExpression to store a
/// range of these.
class ExprOperand {
- const uint64_t *Op;
+ const uint64_t *Op = nullptr;
public:
+ ExprOperand() = default;
explicit ExprOperand(const uint64_t *Op) : Op(Op) {}
const uint64_t *get() const { return Op; }
- /// \brief Get the operand code.
+ /// Get the operand code.
uint64_t getOp() const { return *Op; }
- /// \brief Get an argument to the operand.
+ /// Get an argument to the operand.
///
/// Never returns the operand itself.
uint64_t getArg(unsigned I) const { return Op[I + 1]; }
unsigned getNumArgs() const { return getSize() - 1; }
- /// \brief Return the size of the operand.
+ /// Return the size of the operand.
///
/// Return the number of elements in the operand (1 + args).
unsigned getSize() const;
};
- /// \brief An iterator for expression operands.
+ /// An iterator for expression operands.
class expr_op_iterator
: public std::iterator<std::input_iterator_tag, ExprOperand> {
ExprOperand Op;
public:
+ expr_op_iterator() = default;
explicit expr_op_iterator(element_iterator I) : Op(I) {}
element_iterator getBase() const { return Op.get(); }
@@ -2116,7 +2056,7 @@ public:
return T;
}
- /// \brief Get the next iterator.
+ /// Get the next iterator.
///
/// \a std::next() doesn't work because this is technically an
/// input_iterator, but it's a perfectly valid operation. This is an
@@ -2134,7 +2074,7 @@ public:
void increment() { Op = ExprOperand(getBase() + Op.getSize()); }
};
- /// \brief Visit the elements via ExprOperand wrappers.
+ /// Visit the elements via ExprOperand wrappers.
///
/// These range iterators visit elements through \a ExprOperand wrappers.
/// This is not guaranteed to be a valid range unless \a isValid() gives \c
@@ -2155,6 +2095,189 @@ public:
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DIExpressionKind;
}
+
+ /// Is the first element a DW_OP_deref?.
+ bool startsWithDeref() const {
+ return getNumElements() > 0 && getElement(0) == dwarf::DW_OP_deref;
+ }
+
+ /// Holds the characteristics of one fragment of a larger variable.
+ struct FragmentInfo {
+ uint64_t SizeInBits;
+ uint64_t OffsetInBits;
+ };
+
+ /// Retrieve the details of this fragment expression.
+ static Optional<FragmentInfo> getFragmentInfo(expr_op_iterator Start,
+ expr_op_iterator End);
+
+ /// Retrieve the details of this fragment expression.
+ Optional<FragmentInfo> getFragmentInfo() const {
+ return getFragmentInfo(expr_op_begin(), expr_op_end());
+ }
+
+ /// Return whether this is a piece of an aggregate variable.
+ bool isFragment() const { return getFragmentInfo().hasValue(); }
+};
+
+/// Global variables.
+///
+/// TODO: Remove DisplayName. It's always equal to Name.
+class DIGlobalVariable : public DIVariable {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ bool IsLocalToUnit;
+ bool IsDefinition;
+
+ DIGlobalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
+ bool IsLocalToUnit, bool IsDefinition, uint32_t AlignInBits,
+ ArrayRef<Metadata *> Ops)
+ : DIVariable(C, DIGlobalVariableKind, Storage, Line, Ops, AlignInBits),
+ IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition) {}
+ ~DIGlobalVariable() = default;
+
+ static DIGlobalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
+ StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned Line, DITypeRef Type,
+ bool IsLocalToUnit, bool IsDefinition,
+ DIDerivedType *StaticDataMemberDeclaration,
+ uint32_t AlignInBits, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+ getCanonicalMDString(Context, LinkageName), File, Line, Type,
+ IsLocalToUnit, IsDefinition, StaticDataMemberDeclaration,
+ AlignInBits, Storage, ShouldCreate);
+ }
+ static DIGlobalVariable *
+ getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition,
+ Metadata *StaticDataMemberDeclaration, uint32_t AlignInBits,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDIGlobalVariable cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
+ getFile(), getLine(), getType(), isLocalToUnit(),
+ isDefinition(), getStaticDataMemberDeclaration(),
+ getAlignInBits());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIGlobalVariable,
+ (DIScope * Scope, StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned Line, DITypeRef Type,
+ bool IsLocalToUnit, bool IsDefinition,
+ DIDerivedType *StaticDataMemberDeclaration,
+ uint32_t AlignInBits),
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+ IsDefinition, StaticDataMemberDeclaration, AlignInBits))
+ DEFINE_MDNODE_GET(DIGlobalVariable,
+ (Metadata * Scope, MDString *Name, MDString *LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition,
+ Metadata *StaticDataMemberDeclaration,
+ uint32_t AlignInBits),
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+ IsDefinition, StaticDataMemberDeclaration, AlignInBits))
+
+ TempDIGlobalVariable clone() const { return cloneImpl(); }
+
+ bool isLocalToUnit() const { return IsLocalToUnit; }
+ bool isDefinition() const { return IsDefinition; }
+ StringRef getDisplayName() const { return getStringOperand(4); }
+ StringRef getLinkageName() const { return getStringOperand(5); }
+ DIDerivedType *getStaticDataMemberDeclaration() const {
+ return cast_or_null<DIDerivedType>(getRawStaticDataMemberDeclaration());
+ }
+
+ MDString *getRawLinkageName() const { return getOperandAs<MDString>(5); }
+ Metadata *getRawStaticDataMemberDeclaration() const { return getOperand(6); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIGlobalVariableKind;
+ }
+};
+
+/// Local variable.
+///
+/// TODO: Split up flags.
+class DILocalVariable : public DIVariable {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Arg : 16;
+ DIFlags Flags;
+
+ DILocalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Arg, DIFlags Flags, uint32_t AlignInBits,
+ ArrayRef<Metadata *> Ops)
+ : DIVariable(C, DILocalVariableKind, Storage, Line, Ops, AlignInBits),
+ Arg(Arg), Flags(Flags) {
+ assert(Arg < (1 << 16) && "DILocalVariable: Arg out of range");
+ }
+ ~DILocalVariable() = default;
+
+ static DILocalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
+ StringRef Name, DIFile *File, unsigned Line,
+ DITypeRef Type, unsigned Arg, DIFlags Flags,
+ uint32_t AlignInBits, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name), File,
+ Line, Type, Arg, Flags, AlignInBits, Storage, ShouldCreate);
+ }
+ static DILocalVariable *getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Type, unsigned Arg, DIFlags Flags,
+ uint32_t AlignInBits, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDILocalVariable cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getName(), getFile(),
+ getLine(), getType(), getArg(), getFlags(),
+ getAlignInBits());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DILocalVariable,
+ (DILocalScope * Scope, StringRef Name, DIFile *File,
+ unsigned Line, DITypeRef Type, unsigned Arg,
+ DIFlags Flags, uint32_t AlignInBits),
+ (Scope, Name, File, Line, Type, Arg, Flags, AlignInBits))
+ DEFINE_MDNODE_GET(DILocalVariable,
+ (Metadata * Scope, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Type, unsigned Arg,
+ DIFlags Flags, uint32_t AlignInBits),
+ (Scope, Name, File, Line, Type, Arg, Flags, AlignInBits))
+
+ TempDILocalVariable clone() const { return cloneImpl(); }
+
+ /// Get the local scope for this variable.
+ ///
+ /// Variables must be defined in a local scope.
+ DILocalScope *getScope() const {
+ return cast<DILocalScope>(DIVariable::getScope());
+ }
+
+ bool isParameter() const { return Arg; }
+ unsigned getArg() const { return Arg; }
+ DIFlags getFlags() const { return Flags; }
+
+ bool isArtificial() const { return getFlags() & FlagArtificial; }
+ bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
+
+ /// Check that a location is valid for this variable.
+ ///
+ /// Check that \c DL exists, is in the same subprogram, and has the same
+ /// inlined-at location as \c this. (Otherwise, it's not a valid attachment
+ /// to a \a DbgInfoIntrinsic.)
+ bool isValidLocationForIntrinsic(const DILocation *DL) const {
+ return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram();
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILocalVariableKind;
+ }
};
class DIObjCProperty : public DINode {
@@ -2238,7 +2361,7 @@ public:
}
};
-/// \brief An imported module (C++ using directive or similar).
+/// An imported module (C++ using directive or similar).
class DIImportedEntity : public DINode {
friend class LLVMContextImpl;
friend class MDNode;
@@ -2295,7 +2418,46 @@ public:
}
};
-/// \brief Macro Info DWARF-like metadata node.
+/// A pair of DIGlobalVariable and DIExpression.
+class DIGlobalVariableExpression : public MDNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DIGlobalVariableExpression(LLVMContext &C, StorageType Storage,
+ ArrayRef<Metadata *> Ops)
+ : MDNode(C, DIGlobalVariableExpressionKind, Storage, Ops) {}
+ ~DIGlobalVariableExpression() = default;
+
+ static DIGlobalVariableExpression *
+ getImpl(LLVMContext &Context, Metadata *Variable, Metadata *Expression,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDIGlobalVariableExpression cloneImpl() const {
+ return getTemporary(getContext(), getVariable(), getExpression());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIGlobalVariableExpression,
+ (Metadata * Variable, Metadata *Expression),
+ (Variable, Expression))
+
+ TempDIGlobalVariableExpression clone() const { return cloneImpl(); }
+
+ Metadata *getRawVariable() const { return getOperand(0); }
+ DIGlobalVariable *getVariable() const {
+ return cast_or_null<DIGlobalVariable>(getRawVariable());
+ }
+ Metadata *getRawExpression() const { return getOperand(1); }
+ DIExpression *getExpression() const {
+ return cast_or_null<DIExpression>(getRawExpression());
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIGlobalVariableExpressionKind;
+ }
+};
+
+/// Macro Info DWARF-like metadata node.
///
/// A metadata node with a DWARF macro info (i.e., a constant named
/// \c DW_MACINFO_*, defined in llvm/Support/Dwarf.h). Called \a DIMacroNode
@@ -2433,7 +2595,7 @@ public:
void replaceElements(DIMacroNodeArray Elements) {
#ifndef NDEBUG
for (DIMacroNode *Op : getElements())
- assert(std::find(Elements->op_begin(), Elements->op_end(), Op) &&
+ assert(is_contained(Elements->operands(), Op) &&
"Lost a macro node during macro node list replacement");
#endif
replaceOperandWith(1, Elements.get());
@@ -2460,4 +2622,4 @@ public:
#undef DEFINE_MDNODE_GET_UNPACK
#undef DEFINE_MDNODE_GET
-#endif
+#endif // LLVM_IR_DEBUGINFOMETADATA_H
diff --git a/include/llvm/IR/DebugLoc.h b/include/llvm/IR/DebugLoc.h
index 8ea5875e1f85..202be3da14da 100644
--- a/include/llvm/IR/DebugLoc.h
+++ b/include/llvm/IR/DebugLoc.h
@@ -35,17 +35,7 @@ namespace llvm {
TrackingMDNodeRef Loc;
public:
- DebugLoc() {}
- DebugLoc(DebugLoc &&X) : Loc(std::move(X.Loc)) {}
- DebugLoc(const DebugLoc &X) : Loc(X.Loc) {}
- DebugLoc &operator=(DebugLoc &&X) {
- Loc = std::move(X.Loc);
- return *this;
- }
- DebugLoc &operator=(const DebugLoc &X) {
- Loc = X.Loc;
- return *this;
- }
+ DebugLoc() = default;
/// \brief Construct from an \a DILocation.
DebugLoc(const DILocation *L);
diff --git a/include/llvm/IR/DerivedTypes.h b/include/llvm/IR/DerivedTypes.h
index efd0d07366ee..05e99157b8dc 100644
--- a/include/llvm/IR/DerivedTypes.h
+++ b/include/llvm/IR/DerivedTypes.h
@@ -18,17 +18,19 @@
#ifndef LLVM_IR_DERIVEDTYPES_H
#define LLVM_IR_DERIVEDTYPES_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstdint>
namespace llvm {
class Value;
class APInt;
class LLVMContext;
-template<typename T> class ArrayRef;
-class StringRef;
/// Class to represent integer types. Note that this class is also used to
/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and
@@ -46,9 +48,10 @@ public:
/// This enum is just used to hold constants we need for IntegerType.
enum {
MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified
- MAX_INT_BITS = (1<<23)-1 ///< Maximum number of bits that can be specified
+ MAX_INT_BITS = (1<<24)-1 ///< Maximum number of bits that can be specified
///< Note that bit width is stored in the Type classes SubclassData field
- ///< which has 23 bits. This yields a maximum bit width of 8,388,607 bits.
+ ///< which has 24 bits. This yields a maximum bit width of 16,777,215
+ ///< bits.
};
/// This static method is the primary way of constructing an IntegerType.
@@ -97,11 +100,12 @@ unsigned Type::getIntegerBitWidth() const {
/// Class to represent function types
///
class FunctionType : public Type {
- FunctionType(const FunctionType &) = delete;
- const FunctionType &operator=(const FunctionType &) = delete;
FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
public:
+ FunctionType(const FunctionType &) = delete;
+ FunctionType &operator=(const FunctionType &) = delete;
+
/// This static method is the primary way of constructing a FunctionType.
static FunctionType *get(Type *Result,
ArrayRef<Type*> Params, bool isVarArg);
@@ -137,7 +141,7 @@ public:
return T->getTypeID() == FunctionTyID;
}
};
-static_assert(AlignOf<FunctionType>::Alignment >= AlignOf<Type *>::Alignment,
+static_assert(alignof(FunctionType) >= alignof(Type *),
"Alignment sufficient for objects appended to FunctionType");
bool Type::isFunctionVarArg() const {
@@ -152,7 +156,7 @@ unsigned Type::getFunctionNumParams() const {
return cast<FunctionType>(this)->getNumParams();
}
-/// Common super class of ArrayType, StructType, PointerType and VectorType.
+/// Common super class of ArrayType, StructType and VectorType.
class CompositeType : public Type {
protected:
explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) {}
@@ -168,7 +172,6 @@ public:
static inline bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID ||
T->getTypeID() == StructTyID ||
- T->getTypeID() == PointerTyID ||
T->getTypeID() == VectorTyID;
}
};
@@ -194,10 +197,9 @@ public:
/// generator for a target expects).
///
class StructType : public CompositeType {
- StructType(const StructType &) = delete;
- const StructType &operator=(const StructType &) = delete;
StructType(LLVMContext &C)
: CompositeType(C, StructTyID), SymbolTableEntry(nullptr) {}
+
enum {
/// This is the contents of the SubClassData field.
SCDB_HasBody = 1,
@@ -213,6 +215,9 @@ class StructType : public CompositeType {
void *SymbolTableEntry;
public:
+ StructType(const StructType &) = delete;
+ StructType &operator=(const StructType &) = delete;
+
/// This creates an identified struct.
static StructType *create(LLVMContext &Context, StringRef Name);
static StructType *create(LLVMContext &Context);
@@ -305,52 +310,51 @@ Type *Type::getStructElementType(unsigned N) const {
return cast<StructType>(this)->getElementType(N);
}
-/// This is the superclass of the array, pointer and vector type classes.
-/// All of these represent "arrays" in memory. The array type represents a
-/// specifically sized array, pointer types are unsized/unknown size arrays,
-/// vector types represent specifically sized arrays that allow for use of SIMD
-/// instructions. SequentialType holds the common features of all, which stem
-/// from the fact that all three lay their components out in memory identically.
+/// This is the superclass of the array and vector type classes. Both of these
+/// represent "arrays" in memory. The array type represents a specifically sized
+/// array, and the vector type represents a specifically sized array that allows
+/// for use of SIMD instructions. SequentialType holds the common features of
+/// both, which stem from the fact that both lay their components out in memory
+/// identically.
class SequentialType : public CompositeType {
Type *ContainedType; ///< Storage for the single contained type.
- SequentialType(const SequentialType &) = delete;
- const SequentialType &operator=(const SequentialType &) = delete;
+ uint64_t NumElements;
protected:
- SequentialType(TypeID TID, Type *ElType)
- : CompositeType(ElType->getContext(), TID), ContainedType(ElType) {
+ SequentialType(TypeID TID, Type *ElType, uint64_t NumElements)
+ : CompositeType(ElType->getContext(), TID), ContainedType(ElType),
+ NumElements(NumElements) {
ContainedTys = &ContainedType;
NumContainedTys = 1;
}
public:
- Type *getElementType() const { return getSequentialElementType(); }
+ SequentialType(const SequentialType &) = delete;
+ SequentialType &operator=(const SequentialType &) = delete;
+
+ uint64_t getNumElements() const { return NumElements; }
+ Type *getElementType() const { return ContainedType; }
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const Type *T) {
- return T->getTypeID() == ArrayTyID ||
- T->getTypeID() == PointerTyID ||
- T->getTypeID() == VectorTyID;
+ return T->getTypeID() == ArrayTyID || T->getTypeID() == VectorTyID;
}
};
/// Class to represent array types.
class ArrayType : public SequentialType {
- uint64_t NumElements;
-
- ArrayType(const ArrayType &) = delete;
- const ArrayType &operator=(const ArrayType &) = delete;
ArrayType(Type *ElType, uint64_t NumEl);
public:
+ ArrayType(const ArrayType &) = delete;
+ ArrayType &operator=(const ArrayType &) = delete;
+
/// This static method is the primary way to construct an ArrayType
static ArrayType *get(Type *ElementType, uint64_t NumElements);
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
- uint64_t getNumElements() const { return NumElements; }
-
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID;
@@ -363,13 +367,12 @@ uint64_t Type::getArrayNumElements() const {
/// Class to represent vector types.
class VectorType : public SequentialType {
- unsigned NumElements;
-
- VectorType(const VectorType &) = delete;
- const VectorType &operator=(const VectorType &) = delete;
VectorType(Type *ElType, unsigned NumEl);
public:
+ VectorType(const VectorType &) = delete;
+ VectorType &operator=(const VectorType &) = delete;
+
/// This static method is the primary way to construct an VectorType.
static VectorType *get(Type *ElementType, unsigned NumElements);
@@ -420,13 +423,10 @@ public:
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
- /// Return the number of elements in the Vector type.
- unsigned getNumElements() const { return NumElements; }
-
/// Return the number of bits in the Vector type.
/// Returns zero when the vector is a vector of pointers.
unsigned getBitWidth() const {
- return NumElements * getElementType()->getPrimitiveSizeInBits();
+ return getNumElements() * getElementType()->getPrimitiveSizeInBits();
}
/// Methods for support type inquiry through isa, cast, and dyn_cast.
@@ -440,12 +440,15 @@ unsigned Type::getVectorNumElements() const {
}
/// Class to represent pointers.
-class PointerType : public SequentialType {
- PointerType(const PointerType &) = delete;
- const PointerType &operator=(const PointerType &) = delete;
+class PointerType : public Type {
explicit PointerType(Type *ElType, unsigned AddrSpace);
+ Type *PointeeTy;
+
public:
+ PointerType(const PointerType &) = delete;
+ PointerType &operator=(const PointerType &) = delete;
+
/// This constructs a pointer to an object of the specified type in a numbered
/// address space.
static PointerType *get(Type *ElementType, unsigned AddressSpace);
@@ -456,6 +459,8 @@ public:
return PointerType::get(ElementType, 0);
}
+ Type *getElementType() const { return PointeeTy; }
+
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
@@ -475,6 +480,6 @@ unsigned Type::getPointerAddressSpace() const {
return cast<PointerType>(getScalarType())->getAddressSpace();
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_DERIVEDTYPES_H
diff --git a/include/llvm/IR/DiagnosticInfo.h b/include/llvm/IR/DiagnosticInfo.h
index 1c78684da64d..a93c180df1b5 100644
--- a/include/llvm/IR/DiagnosticInfo.h
+++ b/include/llvm/IR/DiagnosticInfo.h
@@ -15,13 +15,19 @@
#ifndef LLVM_IR_DIAGNOSTICINFO_H
#define LLVM_IR_DIAGNOSTICINFO_H
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/YAMLTraits.h"
#include "llvm-c/Types.h"
#include <functional>
+#include <algorithm>
+#include <cstdint>
+#include <iterator>
#include <string>
namespace llvm {
@@ -47,13 +53,13 @@ enum DiagnosticSeverity : char {
/// \brief Defines the different supported kind of a diagnostic.
/// This enum should be extended with a new ID for each added concrete subclass.
enum DiagnosticKind {
- DK_Bitcode,
DK_InlineAsm,
DK_ResourceLimit,
DK_StackSize,
DK_Linker,
DK_DebugMetadataVersion,
DK_DebugMetadataInvalid,
+ DK_ISelFallback,
DK_SampleProfile,
DK_OptimizationRemark,
DK_OptimizationRemarkMissed,
@@ -95,7 +101,7 @@ public:
DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
: Kind(Kind), Severity(Severity) {}
- virtual ~DiagnosticInfo() {}
+ virtual ~DiagnosticInfo() = default;
/* DiagnosticKind */ int getKind() const { return Kind; }
DiagnosticSeverity getSeverity() const { return Severity; }
@@ -272,7 +278,6 @@ public:
}
};
-
/// Diagnostic information for the sample profiler.
class DiagnosticInfoSampleProfile : public DiagnosticInfo {
public:
@@ -374,6 +379,65 @@ private:
/// Common features for diagnostics dealing with optimization remarks.
class DiagnosticInfoOptimizationBase : public DiagnosticInfoWithDebugLocBase {
public:
+ /// \brief Used to set IsVerbose via the stream interface.
+ struct setIsVerbose {};
+
+ /// \brief When an instance of this is inserted into the stream, the arguments
+ /// following will not appear in the remark printed in the compiler output
+ /// (-Rpass) but only in the optimization record file
+ /// (-fsave-optimization-record).
+ struct setExtraArgs {};
+
+ /// \brief Used in the streaming interface as the general argument type. It
+ /// internally converts everything into a key-value pair.
+ struct Argument {
+ StringRef Key;
+ std::string Val;
+ // If set, the debug location corresponding to the value.
+ DebugLoc DLoc;
+
+ explicit Argument(StringRef Str = "") : Key("String"), Val(Str) {}
+ Argument(StringRef Key, Value *V);
+ Argument(StringRef Key, Type *T);
+ Argument(StringRef Key, int N);
+ Argument(StringRef Key, unsigned N);
+ Argument(StringRef Key, bool B) : Key(Key), Val(B ? "true" : "false") {}
+ };
+
+ /// \p PassName is the name of the pass emitting this diagnostic. \p
+ /// RemarkName is a textual identifier for the remark. \p Fn is the function
+ /// where the diagnostic is being emitted. \p DLoc is the location information
+ /// to use in the diagnostic. If line table information is available, the
+ /// diagnostic will include the source code location. \p CodeRegion is IR
+ /// value (currently basic block) that the optimization operates on. This is
+ /// currently used to provide run-time hotness information with PGO.
+ DiagnosticInfoOptimizationBase(enum DiagnosticKind Kind,
+ enum DiagnosticSeverity Severity,
+ const char *PassName, StringRef RemarkName,
+ const Function &Fn, const DebugLoc &DLoc,
+ Value *CodeRegion = nullptr)
+ : DiagnosticInfoWithDebugLocBase(Kind, Severity, Fn, DLoc),
+ PassName(PassName), RemarkName(RemarkName), CodeRegion(CodeRegion) {}
+
+ /// \brief This is ctor variant allows a pass to build an optimization remark
+ /// from an existing remark.
+ ///
+ /// This is useful when a transformation pass (e.g LV) wants to emit a remark
+ /// (\p Orig) generated by one of its analyses (e.g. LAA) as its own analysis
+ /// remark. The string \p Prepend will be emitted before the original
+ /// message.
+ DiagnosticInfoOptimizationBase(const char *PassName, StringRef Prepend,
+ const DiagnosticInfoOptimizationBase &Orig)
+ : DiagnosticInfoWithDebugLocBase((DiagnosticKind)Orig.getKind(),
+ Orig.getSeverity(), Orig.getFunction(),
+ Orig.getDebugLoc()),
+ PassName(PassName), RemarkName(Orig.RemarkName),
+ CodeRegion(Orig.getCodeRegion()) {
+ *this << Prepend;
+ std::copy(Orig.Args.begin(), Orig.Args.end(), std::back_inserter(Args));
+ }
+
+ /// Legacy interface.
/// \p PassName is the name of the pass emitting this diagnostic.
/// \p Fn is the function where the diagnostic is being emitted. \p DLoc is
/// the location information to use in the diagnostic. If line table
@@ -387,7 +451,14 @@ public:
const DebugLoc &DLoc, const Twine &Msg,
Optional<uint64_t> Hotness = None)
: DiagnosticInfoWithDebugLocBase(Kind, Severity, Fn, DLoc),
- PassName(PassName), Msg(Msg), Hotness(Hotness) {}
+ PassName(PassName), Hotness(Hotness) {
+ Args.push_back(Argument(Msg.str()));
+ }
+
+ DiagnosticInfoOptimizationBase &operator<<(StringRef S);
+ DiagnosticInfoOptimizationBase &operator<<(Argument A);
+ DiagnosticInfoOptimizationBase &operator<<(setIsVerbose V);
+ DiagnosticInfoOptimizationBase &operator<<(setExtraArgs EA);
/// \see DiagnosticInfo::print.
void print(DiagnosticPrinter &DP) const override;
@@ -399,8 +470,14 @@ public:
/// in BackendConsumer::OptimizationRemarkHandler).
virtual bool isEnabled() const = 0;
- const char *getPassName() const { return PassName; }
- const Twine &getMsg() const { return Msg; }
+ StringRef getPassName() const { return PassName; }
+ std::string getMsg() const;
+ Optional<uint64_t> getHotness() const { return Hotness; }
+ void setHotness(Optional<uint64_t> H) { Hotness = H; }
+
+ Value *getCodeRegion() const { return CodeRegion; }
+
+ bool isVerbose() const { return IsVerbose; }
static bool classof(const DiagnosticInfo *DI) {
return DI->getKind() >= DK_FirstRemark &&
@@ -413,16 +490,34 @@ private:
/// be emitted.
const char *PassName;
- /// Message to report.
- const Twine &Msg;
+ /// Textual identifier for the remark. Can be used by external tools reading
+ /// the YAML output file for optimization remarks to identify the remark.
+ StringRef RemarkName;
/// If profile information is available, this is the number of times the
/// corresponding code was executed in a profile instrumentation run.
Optional<uint64_t> Hotness;
+
+ /// The IR value (currently basic block) that the optimization operates on.
+ /// This is currently used to provide run-time hotness information with PGO.
+ Value *CodeRegion;
+
+ /// Arguments collected via the streaming interface.
+ SmallVector<Argument, 4> Args;
+
+ /// The remark is expected to be noisy.
+ bool IsVerbose = false;
+
+ /// \brief If positive, the index of the first argument that only appear in
+ /// the optimization records and not in the remark printed in the compiler
+ /// output.
+ int FirstExtraArgIndex = -1;
+
+ friend struct yaml::MappingTraits<DiagnosticInfoOptimizationBase *>;
};
/// Diagnostic information for applied optimization remarks.
-class DiagnosticInfoOptimizationRemark : public DiagnosticInfoOptimizationBase {
+class OptimizationRemark : public DiagnosticInfoOptimizationBase {
public:
/// \p PassName is the name of the pass emitting this diagnostic. If
/// this name matches the regular expression given in -Rpass=, then the
@@ -432,10 +527,24 @@ public:
/// will include the source code location. \p Msg is the message to show.
/// Note that this class does not copy this message, so this reference
/// must be valid for the whole life time of the diagnostic.
- DiagnosticInfoOptimizationRemark(const char *PassName, const Function &Fn,
- const DebugLoc &DLoc, const Twine &Msg)
+ OptimizationRemark(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg,
+ Optional<uint64_t> Hotness = None)
: DiagnosticInfoOptimizationBase(DK_OptimizationRemark, DS_Remark,
- PassName, Fn, DLoc, Msg) {}
+ PassName, Fn, DLoc, Msg, Hotness) {}
+
+ /// \p PassName is the name of the pass emitting this diagnostic. If this name
+ /// matches the regular expression given in -Rpass=, then the diagnostic will
+ /// be emitted. \p RemarkName is a textual identifier for the remark. \p
+ /// DLoc is the debug location and \p CodeRegion is the region that the
+ /// optimization operates on (currently on block is supported).
+ OptimizationRemark(const char *PassName, StringRef RemarkName,
+ const DebugLoc &DLoc, Value *CodeRegion);
+
+ /// Same as above but the debug location and code region is derived from \p
+ /// Instr.
+ OptimizationRemark(const char *PassName, StringRef RemarkName,
+ Instruction *Inst);
static bool classof(const DiagnosticInfo *DI) {
return DI->getKind() == DK_OptimizationRemark;
@@ -446,8 +555,7 @@ public:
};
/// Diagnostic information for missed-optimization remarks.
-class DiagnosticInfoOptimizationRemarkMissed
- : public DiagnosticInfoOptimizationBase {
+class OptimizationRemarkMissed : public DiagnosticInfoOptimizationBase {
public:
/// \p PassName is the name of the pass emitting this diagnostic. If
/// this name matches the regular expression given in -Rpass-missed=, then the
@@ -457,13 +565,25 @@ public:
/// will include the source code location. \p Msg is the message to show.
/// Note that this class does not copy this message, so this reference
/// must be valid for the whole life time of the diagnostic.
- DiagnosticInfoOptimizationRemarkMissed(const char *PassName,
- const Function &Fn,
- const DebugLoc &DLoc, const Twine &Msg,
- Optional<uint64_t> Hotness = None)
+ OptimizationRemarkMissed(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg,
+ Optional<uint64_t> Hotness = None)
: DiagnosticInfoOptimizationBase(DK_OptimizationRemarkMissed, DS_Remark,
PassName, Fn, DLoc, Msg, Hotness) {}
+ /// \p PassName is the name of the pass emitting this diagnostic. If this name
+ /// matches the regular expression given in -Rpass-missed=, then the
+ /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+ /// remark. \p DLoc is the debug location and \p CodeRegion is the region
+ /// that the optimization operates on (currently on block is supported).
+ OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
+ const DebugLoc &DLoc, Value *CodeRegion);
+
+ /// \brief Same as above but \p Inst is used to derive code region and debug
+ /// location.
+ OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
+ Instruction *Inst);
+
static bool classof(const DiagnosticInfo *DI) {
return DI->getKind() == DK_OptimizationRemarkMissed;
}
@@ -473,8 +593,7 @@ public:
};
/// Diagnostic information for optimization analysis remarks.
-class DiagnosticInfoOptimizationRemarkAnalysis
- : public DiagnosticInfoOptimizationBase {
+class OptimizationRemarkAnalysis : public DiagnosticInfoOptimizationBase {
public:
/// \p PassName is the name of the pass emitting this diagnostic. If
/// this name matches the regular expression given in -Rpass-analysis=, then
@@ -484,12 +603,35 @@ public:
/// include the source code location. \p Msg is the message to show. Note that
/// this class does not copy this message, so this reference must be valid for
/// the whole life time of the diagnostic.
- DiagnosticInfoOptimizationRemarkAnalysis(const char *PassName,
- const Function &Fn,
- const DebugLoc &DLoc,
- const Twine &Msg)
+ OptimizationRemarkAnalysis(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg,
+ Optional<uint64_t> Hotness = None)
: DiagnosticInfoOptimizationBase(DK_OptimizationRemarkAnalysis, DS_Remark,
- PassName, Fn, DLoc, Msg) {}
+ PassName, Fn, DLoc, Msg, Hotness) {}
+
+ /// \p PassName is the name of the pass emitting this diagnostic. If this name
+ /// matches the regular expression given in -Rpass-analysis=, then the
+ /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+ /// remark. \p DLoc is the debug location and \p CodeRegion is the region
+ /// that the optimization operates on (currently on block is supported).
+ OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
+ const DebugLoc &DLoc, Value *CodeRegion);
+
+ /// \brief This is ctor variant allows a pass to build an optimization remark
+ /// from an existing remark.
+ ///
+ /// This is useful when a transformation pass (e.g LV) wants to emit a remark
+ /// (\p Orig) generated by one of its analyses (e.g. LAA) as its own analysis
+ /// remark. The string \p Prepend will be emitted before the original
+ /// message.
+ OptimizationRemarkAnalysis(const char *PassName, StringRef Prepend,
+ const OptimizationRemarkAnalysis &Orig)
+ : DiagnosticInfoOptimizationBase(PassName, Prepend, Orig) {}
+
+ /// \brief Same as above but \p Inst is used to derive code region and debug
+ /// location.
+ OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
+ Instruction *Inst);
static bool classof(const DiagnosticInfo *DI) {
return DI->getKind() == DK_OptimizationRemarkAnalysis;
@@ -503,19 +645,20 @@ public:
bool shouldAlwaysPrint() const { return getPassName() == AlwaysPrint; }
protected:
- DiagnosticInfoOptimizationRemarkAnalysis(enum DiagnosticKind Kind,
- const char *PassName,
- const Function &Fn,
- const DebugLoc &DLoc,
- const Twine &Msg)
- : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, Fn, DLoc,
- Msg) {}
+ OptimizationRemarkAnalysis(enum DiagnosticKind Kind, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg, Optional<uint64_t> Hotness)
+ : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, Fn, DLoc, Msg,
+ Hotness) {}
+
+ OptimizationRemarkAnalysis(enum DiagnosticKind Kind, const char *PassName,
+ StringRef RemarkName, const DebugLoc &DLoc,
+ Value *CodeRegion);
};
/// Diagnostic information for optimization analysis remarks related to
/// floating-point non-commutativity.
-class DiagnosticInfoOptimizationRemarkAnalysisFPCommute
- : public DiagnosticInfoOptimizationRemarkAnalysis {
+class OptimizationRemarkAnalysisFPCommute : public OptimizationRemarkAnalysis {
public:
/// \p PassName is the name of the pass emitting this diagnostic. If
/// this name matches the regular expression given in -Rpass-analysis=, then
@@ -527,12 +670,24 @@ public:
/// floating-point non-commutativity. Note that this class does not copy this
/// message, so this reference must be valid for the whole life time of the
/// diagnostic.
- DiagnosticInfoOptimizationRemarkAnalysisFPCommute(const char *PassName,
- const Function &Fn,
- const DebugLoc &DLoc,
- const Twine &Msg)
- : DiagnosticInfoOptimizationRemarkAnalysis(
- DK_OptimizationRemarkAnalysisFPCommute, PassName, Fn, DLoc, Msg) {}
+ OptimizationRemarkAnalysisFPCommute(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg,
+ Optional<uint64_t> Hotness = None)
+ : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisFPCommute,
+ PassName, Fn, DLoc, Msg, Hotness) {}
+
+ /// \p PassName is the name of the pass emitting this diagnostic. If this name
+ /// matches the regular expression given in -Rpass-analysis=, then the
+ /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+ /// remark. \p DLoc is the debug location and \p CodeRegion is the region
+ /// that the optimization operates on (currently on block is supported). The
+ /// front-end will append its own message related to options that address
+ /// floating-point non-commutativity.
+ OptimizationRemarkAnalysisFPCommute(const char *PassName,
+ StringRef RemarkName,
+ const DebugLoc &DLoc, Value *CodeRegion)
+ : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisFPCommute,
+ PassName, RemarkName, DLoc, CodeRegion) {}
static bool classof(const DiagnosticInfo *DI) {
return DI->getKind() == DK_OptimizationRemarkAnalysisFPCommute;
@@ -541,8 +696,7 @@ public:
/// Diagnostic information for optimization analysis remarks related to
/// pointer aliasing.
-class DiagnosticInfoOptimizationRemarkAnalysisAliasing
- : public DiagnosticInfoOptimizationRemarkAnalysis {
+class OptimizationRemarkAnalysisAliasing : public OptimizationRemarkAnalysis {
public:
/// \p PassName is the name of the pass emitting this diagnostic. If
/// this name matches the regular expression given in -Rpass-analysis=, then
@@ -554,12 +708,23 @@ public:
/// pointer aliasing legality. Note that this class does not copy this
/// message, so this reference must be valid for the whole life time of the
/// diagnostic.
- DiagnosticInfoOptimizationRemarkAnalysisAliasing(const char *PassName,
- const Function &Fn,
- const DebugLoc &DLoc,
- const Twine &Msg)
- : DiagnosticInfoOptimizationRemarkAnalysis(
- DK_OptimizationRemarkAnalysisAliasing, PassName, Fn, DLoc, Msg) {}
+ OptimizationRemarkAnalysisAliasing(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg,
+ Optional<uint64_t> Hotness = None)
+ : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisAliasing,
+ PassName, Fn, DLoc, Msg, Hotness) {}
+
+ /// \p PassName is the name of the pass emitting this diagnostic. If this name
+ /// matches the regular expression given in -Rpass-analysis=, then the
+ /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
+ /// remark. \p DLoc is the debug location and \p CodeRegion is the region
+ /// that the optimization operates on (currently on block is supported). The
+ /// front-end will append its own message related to options that address
+ /// pointer aliasing legality.
+ OptimizationRemarkAnalysisAliasing(const char *PassName, StringRef RemarkName,
+ const DebugLoc &DLoc, Value *CodeRegion)
+ : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisAliasing,
+ PassName, RemarkName, DLoc, CodeRegion) {}
static bool classof(const DiagnosticInfo *DI) {
return DI->getKind() == DK_OptimizationRemarkAnalysisAliasing;
@@ -584,6 +749,25 @@ public:
}
};
+/// Diagnostic information for ISel fallback path.
+class DiagnosticInfoISelFallback : public DiagnosticInfo {
+ /// The function that is concerned by this diagnostic.
+ const Function &Fn;
+
+public:
+ DiagnosticInfoISelFallback(const Function &Fn,
+ DiagnosticSeverity Severity = DS_Warning)
+ : DiagnosticInfo(DK_ISelFallback, Severity), Fn(Fn) {}
+
+ const Function &getFunction() const { return Fn; }
+
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_ISelFallback;
+ }
+};
+
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DiagnosticInfo, LLVMDiagnosticInfoRef)
diff --git a/include/llvm/IR/DiagnosticPrinter.h b/include/llvm/IR/DiagnosticPrinter.h
index 1bcd73738b66..59c83291affa 100644
--- a/include/llvm/IR/DiagnosticPrinter.h
+++ b/include/llvm/IR/DiagnosticPrinter.h
@@ -19,6 +19,7 @@
#include <string>
namespace llvm {
+
// Forward declarations.
class Module;
class raw_ostream;
@@ -30,7 +31,7 @@ class Value;
/// \brief Interface for custom diagnostic printing.
class DiagnosticPrinter {
public:
- virtual ~DiagnosticPrinter() {}
+ virtual ~DiagnosticPrinter() = default;
// Simple types.
virtual DiagnosticPrinter &operator<<(char C) = 0;
@@ -89,6 +90,7 @@ public:
// Other types.
DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) override;
};
-} // End namespace llvm
-#endif
+} // end namespace llvm
+
+#endif // LLVM_IR_DIAGNOSTICPRINTER_H
diff --git a/include/llvm/IR/Dominators.h b/include/llvm/IR/Dominators.h
index f445a49b67b8..7c733bac8da0 100644
--- a/include/llvm/IR/Dominators.h
+++ b/include/llvm/IR/Dominators.h
@@ -33,9 +33,9 @@ extern template class DomTreeNodeBase<BasicBlock>;
extern template class DominatorTreeBase<BasicBlock>;
extern template void Calculate<Function, BasicBlock *>(
- DominatorTreeBase<GraphTraits<BasicBlock *>::NodeType> &DT, Function &F);
+ DominatorTreeBaseByGraphTraits<GraphTraits<BasicBlock *>> &DT, Function &F);
extern template void Calculate<Function, Inverse<BasicBlock *>>(
- DominatorTreeBase<GraphTraits<Inverse<BasicBlock *>>::NodeType> &DT,
+ DominatorTreeBaseByGraphTraits<GraphTraits<Inverse<BasicBlock *>>> &DT,
Function &F);
typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
@@ -102,13 +102,6 @@ public:
recalculate(F);
}
- DominatorTree(DominatorTree &&Arg)
- : Base(std::move(static_cast<Base &>(Arg))) {}
- DominatorTree &operator=(DominatorTree &&RHS) {
- Base::operator=(std::move(static_cast<Base &>(RHS)));
- return *this;
- }
-
/// \brief Returns *false* if the other dominator tree matches this dominator
/// tree.
inline bool compare(const DominatorTree &Other) const {
@@ -155,23 +148,19 @@ public:
// iterable by generic graph iterators.
template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
- typedef Node NodeType;
+ typedef Node *NodeRef;
typedef ChildIterator ChildIteratorType;
- typedef df_iterator<Node *, SmallPtrSet<NodeType *, 8>> nodes_iterator;
+ typedef df_iterator<Node *, df_iterator_default_set<Node*>> nodes_iterator;
- static NodeType *getEntryNode(NodeType *N) { return N; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) { return N->end(); }
+ static NodeRef getEntryNode(NodeRef N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->end(); }
- static nodes_iterator nodes_begin(NodeType *N) {
+ static nodes_iterator nodes_begin(NodeRef N) {
return df_begin(getEntryNode(N));
}
- static nodes_iterator nodes_end(NodeType *N) {
- return df_end(getEntryNode(N));
- }
+ static nodes_iterator nodes_end(NodeRef N) { return df_end(getEntryNode(N)); }
};
template <>
@@ -185,9 +174,7 @@ struct GraphTraits<const DomTreeNode *>
template <> struct GraphTraits<DominatorTree*>
: public GraphTraits<DomTreeNode*> {
- static NodeType *getEntryNode(DominatorTree *DT) {
- return DT->getRootNode();
- }
+ static NodeRef getEntryNode(DominatorTree *DT) { return DT->getRootNode(); }
static nodes_iterator nodes_begin(DominatorTree *N) {
return df_begin(getEntryNode(N));
@@ -201,14 +188,14 @@ template <> struct GraphTraits<DominatorTree*>
/// \brief Analysis pass which computes a \c DominatorTree.
class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
friend AnalysisInfoMixin<DominatorTreeAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef DominatorTree Result;
/// \brief Run the analysis pass over a function and produce a dominator tree.
- DominatorTree run(Function &F, AnalysisManager<Function> &);
+ DominatorTree run(Function &F, FunctionAnalysisManager &);
};
/// \brief Printer pass for the \c DominatorTree.
@@ -218,12 +205,12 @@ class DominatorTreePrinterPass
public:
explicit DominatorTreePrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for the \c DominatorTree.
struct DominatorTreeVerifierPass : PassInfoMixin<DominatorTreeVerifierPass> {
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes a \c DominatorTree.
diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h
index d7d27e7585c1..1854d413c627 100644
--- a/include/llvm/IR/Function.h
+++ b/include/llvm/IR/Function.h
@@ -18,26 +18,33 @@
#ifndef LLVM_IR_FUNCTION_H
#define LLVM_IR_FUNCTION_H
+#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
namespace llvm {
template <typename T> class Optional;
+class AssemblyAnnotationWriter;
class FunctionType;
class LLVMContext;
class DISubprogram;
-template <>
-struct SymbolTableListSentinelTraits<Argument>
- : public ilist_half_embedded_sentinel_traits<Argument> {};
-
class Function : public GlobalObject, public ilist_node<Function> {
public:
typedef SymbolTableList<Argument> ArgumentListType;
@@ -54,7 +61,8 @@ private:
// Important things that make up a function!
BasicBlockListType BasicBlocks; ///< The basic blocks
mutable ArgumentListType ArgumentList; ///< The formal arguments
- ValueSymbolTable *SymTab; ///< Symbol table of args/instructions
+ std::unique_ptr<ValueSymbolTable>
+ SymTab; ///< Symbol table of args/instructions
AttributeSet AttributeSets; ///< Parameter attributes
/*
@@ -77,8 +85,6 @@ private:
friend class SymbolTableListTraits<Function>;
- void setParent(Module *parent);
-
/// hasLazyArguments/CheckLazyArguments - The argument list of a function is
/// built on demand, so that the list isn't allocated until the first client
/// needs it. The hasLazyArguments predicate returns true if the arg list
@@ -93,10 +99,8 @@ private:
if (hasLazyArguments())
BuildLazyArguments();
}
- void BuildLazyArguments() const;
- Function(const Function&) = delete;
- void operator=(const Function&) = delete;
+ void BuildLazyArguments() const;
/// Function ctor - If the (optional) Module argument is specified, the
/// function is automatically inserted into the end of the function list for
@@ -106,18 +110,21 @@ private:
const Twine &N = "", Module *M = nullptr);
public:
+ Function(const Function&) = delete;
+ void operator=(const Function&) = delete;
+ ~Function() override;
+
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = nullptr) {
return new Function(Ty, Linkage, N, M);
}
- ~Function() override;
-
- /// \brief Provide fast operand accessors
+ // Provide fast operand accessors.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
-
- Type *getReturnType() const; // Return the type of the ret val
- FunctionType *getFunctionType() const; // Return the FunctionType for me
+ /// Returns the type of the ret val.
+ Type *getReturnType() const;
+ /// Returns the FunctionType for me.
+ FunctionType *getFunctionType() const;
/// getContext - Return a reference to the LLVMContext associated with this
/// function.
@@ -137,7 +144,13 @@ public:
/// The particular intrinsic functions which correspond to this value are
/// defined in llvm/Intrinsics.h.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY { return IntID; }
- bool isIntrinsic() const { return getName().startswith("llvm."); }
+
+ /// isIntrinsic - Returns true if the function's name starts with "llvm.".
+ /// It's possible for this function to return true while getIntrinsicID()
+ /// returns Intrinsic::not_intrinsic!
+ bool isIntrinsic() const { return HasLLVMReservedName; }
+
+ static Intrinsic::ID lookupIntrinsicID(StringRef Name);
/// \brief Recalculate the ID for this function if it is an Intrinsic defined
/// in llvm/Intrinsics.h. Sets the intrinsic ID to Intrinsic::not_intrinsic
@@ -166,41 +179,55 @@ public:
void setAttributes(AttributeSet Attrs) { AttributeSets = Attrs; }
/// @brief Add function attributes to this function.
- void addFnAttr(Attribute::AttrKind N) {
- setAttributes(AttributeSets.addAttribute(getContext(),
- AttributeSet::FunctionIndex, N));
+ void addFnAttr(Attribute::AttrKind Kind) {
+ addAttribute(AttributeSet::FunctionIndex, Kind);
+ }
+
+ /// @brief Add function attributes to this function.
+ void addFnAttr(StringRef Kind, StringRef Val = StringRef()) {
+ addAttribute(AttributeSet::FunctionIndex,
+ Attribute::get(getContext(), Kind, Val));
+ }
+
+ void addFnAttr(Attribute Attr) {
+ addAttribute(AttributeSet::FunctionIndex, Attr);
}
/// @brief Remove function attributes from this function.
void removeFnAttr(Attribute::AttrKind Kind) {
- setAttributes(AttributeSets.removeAttribute(
- getContext(), AttributeSet::FunctionIndex, Kind));
+ removeAttribute(AttributeSet::FunctionIndex, Kind);
}
- /// @brief Add function attributes to this function.
- void addFnAttr(StringRef Kind) {
- setAttributes(
- AttributeSets.addAttribute(getContext(),
- AttributeSet::FunctionIndex, Kind));
- }
- void addFnAttr(StringRef Kind, StringRef Value) {
- setAttributes(
- AttributeSets.addAttribute(getContext(),
- AttributeSet::FunctionIndex, Kind, Value));
+ /// @brief Remove function attribute from this function.
+ void removeFnAttr(StringRef Kind) {
+ setAttributes(AttributeSets.removeAttribute(
+ getContext(), AttributeSet::FunctionIndex, Kind));
}
- /// Set the entry count for this function.
+ /// \brief Set the entry count for this function.
+ ///
+ /// Entry count is the number of times this function was executed based on
+ /// pgo data.
void setEntryCount(uint64_t Count);
- /// Get the entry count for this function.
+ /// \brief Get the entry count for this function.
+ ///
+ /// Entry count is the number of times the function was executed based on
+ /// pgo data.
Optional<uint64_t> getEntryCount() const;
+ /// Set the section prefix for this function.
+ void setSectionPrefix(StringRef Prefix);
+
+ /// Get the section prefix for this function.
+ Optional<StringRef> getSectionPrefix() const;
+
/// @brief Return true if the function has the attribute.
bool hasFnAttribute(Attribute::AttrKind Kind) const {
return AttributeSets.hasFnAttribute(Kind);
}
bool hasFnAttribute(StringRef Kind) const {
- return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Kind);
+ return AttributeSets.hasFnAttribute(Kind);
}
/// @brief Return the attribute for the given attribute kind.
@@ -323,7 +350,7 @@ public:
}
/// @brief Determine if the function may only access memory that is
- // either inaccessible from the IR or pointed to by its arguments.
+ /// either inaccessible from the IR or pointed to by its arguments.
bool onlyAccessesInaccessibleMemOrArgMem() const {
return hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly);
}
@@ -431,7 +458,7 @@ public:
}
/// Optimize this function for minimum size (-Oz).
- bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); };
+ bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); }
/// Optimize this function for size (-Os) or minimum size (-Oz).
bool optForSize() const {
@@ -477,12 +504,14 @@ public:
CheckLazyArguments();
return ArgumentList;
}
+
static ArgumentListType Function::*getSublistAccess(Argument*) {
return &Function::ArgumentList;
}
const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; }
BasicBlockListType &getBasicBlockList() { return BasicBlocks; }
+
static BasicBlockListType Function::*getSublistAccess(BasicBlock*) {
return &Function::BasicBlocks;
}
@@ -493,10 +522,12 @@ public:
//===--------------------------------------------------------------------===//
// Symbol Table Accessing functions...
- /// getSymbolTable() - Return the symbol table...
+ /// getSymbolTable() - Return the symbol table if any, otherwise nullptr.
///
- inline ValueSymbolTable &getValueSymbolTable() { return *SymTab; }
- inline const ValueSymbolTable &getValueSymbolTable() const { return *SymTab; }
+ inline ValueSymbolTable *getValueSymbolTable() { return SymTab.get(); }
+ inline const ValueSymbolTable *getValueSymbolTable() const {
+ return SymTab.get();
+ }
//===--------------------------------------------------------------------===//
// BasicBlock iterator forwarding functions
@@ -524,6 +555,7 @@ public:
CheckLazyArguments();
return ArgumentList.begin();
}
+
arg_iterator arg_end() {
CheckLazyArguments();
return ArgumentList.end();
@@ -536,7 +568,6 @@ public:
iterator_range<arg_iterator> args() {
return make_range(arg_begin(), arg_end());
}
-
iterator_range<const_arg_iterator> args() const {
return make_range(arg_begin(), arg_end());
}
@@ -644,8 +675,8 @@ private:
void allocHungoffUselist();
template<int Idx> void setHungoffOperand(Constant *C);
- // Shadow Value::setValueSubclassData with a private forwarding method so that
- // subclasses cannot accidentally use it.
+ /// Shadow Value::setValueSubclassData with a private forwarding method so
+ /// that subclasses cannot accidentally use it.
void setValueSubclassData(unsigned short D) {
Value::setValueSubclassData(D);
}
@@ -657,6 +688,6 @@ struct OperandTraits<Function> : public HungoffOperandTraits<3> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(Function, Value)
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_FUNCTION_H
diff --git a/include/llvm/IR/GVMaterializer.h b/include/llvm/IR/GVMaterializer.h
index 9e47722c892b..675abeb6ec3a 100644
--- a/include/llvm/IR/GVMaterializer.h
+++ b/include/llvm/IR/GVMaterializer.h
@@ -18,36 +18,35 @@
#ifndef LLVM_IR_GVMATERIALIZER_H
#define LLVM_IR_GVMATERIALIZER_H
-#include <system_error>
#include <vector>
namespace llvm {
-class Function;
+
+class Error;
class GlobalValue;
-class Module;
class StructType;
class GVMaterializer {
protected:
- GVMaterializer() {}
+ GVMaterializer() = default;
public:
virtual ~GVMaterializer();
/// Make sure the given GlobalValue is fully read.
///
- virtual std::error_code materialize(GlobalValue *GV) = 0;
+ virtual Error materialize(GlobalValue *GV) = 0;
/// Make sure the entire Module has been completely read.
///
- virtual std::error_code materializeModule() = 0;
+ virtual Error materializeModule() = 0;
- virtual std::error_code materializeMetadata() = 0;
+ virtual Error materializeMetadata() = 0;
virtual void setStripDebugInfo() = 0;
virtual std::vector<StructType *> getIdentifiedStructTypes() const = 0;
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GVMATERIALIZER_H
diff --git a/include/llvm/IR/GetElementPtrTypeIterator.h b/include/llvm/IR/GetElementPtrTypeIterator.h
index 4953aebbe8aa..490bff29cf38 100644
--- a/include/llvm/IR/GetElementPtrTypeIterator.h
+++ b/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -15,12 +15,17 @@
#ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
-#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Casting.h"
+#include <cstddef>
+#include <iterator>
namespace llvm {
+
template<typename ItTy = User::const_op_iterator>
class generic_gep_type_iterator
: public std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t> {
@@ -28,20 +33,19 @@ namespace llvm {
Type *, ptrdiff_t> super;
ItTy OpIt;
- PointerIntPair<Type *, 1> CurTy;
- unsigned AddrSpace;
- generic_gep_type_iterator() {}
- public:
+ PointerUnion<StructType *, Type *> CurTy;
+ enum : uint64_t { Unbounded = -1ull };
+ uint64_t NumElements = Unbounded;
+ generic_gep_type_iterator() = default;
- static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace,
- ItTy It) {
+ public:
+ static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
generic_gep_type_iterator I;
- I.CurTy.setPointer(Ty);
- I.CurTy.setInt(true);
- I.AddrSpace = AddrSpace;
+ I.CurTy = Ty;
I.OpIt = It;
return I;
}
+
static generic_gep_type_iterator end(ItTy It) {
generic_gep_type_iterator I;
I.OpIt = It;
@@ -51,38 +55,30 @@ namespace llvm {
bool operator==(const generic_gep_type_iterator& x) const {
return OpIt == x.OpIt;
}
+
bool operator!=(const generic_gep_type_iterator& x) const {
return !operator==(x);
}
- Type *operator*() const {
- if (CurTy.getInt())
- return CurTy.getPointer()->getPointerTo(AddrSpace);
- return CurTy.getPointer();
- }
-
+ // FIXME: Make this the iterator's operator*() after the 4.0 release.
+ // operator*() had a different meaning in earlier releases, so we're
+ // temporarily not giving this iterator an operator*() to avoid a subtle
+ // semantics break.
Type *getIndexedType() const {
- if (CurTy.getInt())
- return CurTy.getPointer();
- CompositeType *CT = cast<CompositeType>(CurTy.getPointer());
- return CT->getTypeAtIndex(getOperand());
+ if (auto *T = CurTy.dyn_cast<Type *>())
+ return T;
+ return CurTy.get<StructType *>()->getTypeAtIndex(getOperand());
}
- // This is a non-standard operator->. It allows you to call methods on the
- // current type directly.
- Type *operator->() const { return operator*(); }
-
Value *getOperand() const { return const_cast<Value *>(&**OpIt); }
generic_gep_type_iterator& operator++() { // Preincrement
- if (CurTy.getInt()) {
- CurTy.setInt(false);
- } else if (CompositeType *CT =
- dyn_cast<CompositeType>(CurTy.getPointer())) {
- CurTy.setPointer(CT->getTypeAtIndex(getOperand()));
- } else {
- CurTy.setPointer(nullptr);
- }
+ Type *Ty = getIndexedType();
+ if (auto *STy = dyn_cast<SequentialType>(Ty)) {
+ CurTy = STy->getElementType();
+ NumElements = STy->getNumElements();
+ } else
+ CurTy = dyn_cast<StructType>(Ty);
++OpIt;
return *this;
}
@@ -90,6 +86,39 @@ namespace llvm {
generic_gep_type_iterator operator++(int) { // Postincrement
generic_gep_type_iterator tmp = *this; ++*this; return tmp;
}
+
+ // All of the below API is for querying properties of the "outer type", i.e.
+ // the type that contains the indexed type. Most of the time this is just
+ // the type that was visited immediately prior to the indexed type, but for
+ // the first element this is an unbounded array of the GEP's source element
+ // type, for which there is no clearly corresponding IR type (we've
+ // historically used a pointer type as the outer type in this case, but
+ // pointers will soon lose their element type).
+ //
+ // FIXME: Most current users of this class are just interested in byte
+ // offsets (a few need to know whether the outer type is a struct because
+ // they are trying to replace a constant with a variable, which is only
+ // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
+ // we should provide a more minimal API here that exposes not much more than
+ // that.
+
+ bool isStruct() const { return CurTy.is<StructType *>(); }
+ bool isSequential() const { return CurTy.is<Type *>(); }
+
+ StructType *getStructType() const { return CurTy.get<StructType *>(); }
+
+ StructType *getStructTypeOrNull() const {
+ return CurTy.dyn_cast<StructType *>();
+ }
+
+ bool isBoundedSequential() const {
+ return isSequential() && NumElements != Unbounded;
+ }
+
+ uint64_t getSequentialNumElements() const {
+ assert(isBoundedSequential());
+ return NumElements;
+ }
};
typedef generic_gep_type_iterator<> gep_type_iterator;
@@ -98,36 +127,36 @@ namespace llvm {
auto *GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp->getSourceElementType(),
- cast<PointerType>(GEPOp->getPointerOperandType()->getScalarType())
- ->getAddressSpace(),
GEP->op_begin() + 1);
}
+
inline gep_type_iterator gep_type_end(const User *GEP) {
return gep_type_iterator::end(GEP->op_end());
}
+
inline gep_type_iterator gep_type_begin(const User &GEP) {
auto &GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp.getSourceElementType(),
- cast<PointerType>(GEPOp.getPointerOperandType()->getScalarType())
- ->getAddressSpace(),
GEP.op_begin() + 1);
}
+
inline gep_type_iterator gep_type_end(const User &GEP) {
return gep_type_iterator::end(GEP.op_end());
}
template<typename T>
inline generic_gep_type_iterator<const T *>
- gep_type_begin(Type *Op0, unsigned AS, ArrayRef<T> A) {
- return generic_gep_type_iterator<const T *>::begin(Op0, AS, A.begin());
+ gep_type_begin(Type *Op0, ArrayRef<T> A) {
+ return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
}
template<typename T>
inline generic_gep_type_iterator<const T *>
- gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef<T> A) {
+ gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::end(A.end());
}
+
} // end namespace llvm
-#endif
+#endif // LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
diff --git a/include/llvm/IR/GlobalAlias.h b/include/llvm/IR/GlobalAlias.h
index 3ae3e4a001e1..37a291dfeb7a 100644
--- a/include/llvm/IR/GlobalAlias.h
+++ b/include/llvm/IR/GlobalAlias.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/Value.h"
namespace llvm {
@@ -27,15 +28,14 @@ template <typename ValueSubClass> class SymbolTableListTraits;
class GlobalAlias : public GlobalIndirectSymbol,
public ilist_node<GlobalAlias> {
friend class SymbolTableListTraits<GlobalAlias>;
- void operator=(const GlobalAlias &) = delete;
- GlobalAlias(const GlobalAlias &) = delete;
-
- void setParent(Module *parent);
GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
const Twine &Name, Constant *Aliasee, Module *Parent);
public:
+ GlobalAlias(const GlobalAlias &) = delete;
+ GlobalAlias &operator=(const GlobalAlias &) = delete;
+
/// If a parent module is specified, the alias is automatically inserted into
/// the end of the specified module's alias list.
static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
@@ -89,6 +89,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GLOBALALIAS_H
diff --git a/include/llvm/IR/GlobalIFunc.h b/include/llvm/IR/GlobalIFunc.h
index 0cbe882c58d8..bfaa9960cb13 100644
--- a/include/llvm/IR/GlobalIFunc.h
+++ b/include/llvm/IR/GlobalIFunc.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/Value.h"
namespace llvm {
@@ -32,15 +33,14 @@ template <typename ValueSubClass> class SymbolTableListTraits;
class GlobalIFunc final : public GlobalIndirectSymbol,
public ilist_node<GlobalIFunc> {
friend class SymbolTableListTraits<GlobalIFunc>;
- void operator=(const GlobalIFunc &) = delete;
- GlobalIFunc(const GlobalIFunc &) = delete;
-
- void setParent(Module *parent);
GlobalIFunc(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
const Twine &Name, Constant *Resolver, Module *Parent);
public:
+ GlobalIFunc(const GlobalIFunc &) = delete;
+ GlobalIFunc &operator=(const GlobalIFunc &) = delete;
+
/// If a parent module is specified, the ifunc is automatically inserted into
/// the end of the specified module's ifunc list.
static GlobalIFunc *create(Type *Ty, unsigned AddressSpace,
@@ -71,6 +71,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GLOBALIFUNC_H
diff --git a/include/llvm/IR/GlobalIndirectSymbol.h b/include/llvm/IR/GlobalIndirectSymbol.h
index 8edb3d1dbf4b..671309e85d19 100644
--- a/include/llvm/IR/GlobalIndirectSymbol.h
+++ b/include/llvm/IR/GlobalIndirectSymbol.h
@@ -16,20 +16,25 @@
#ifndef LLVM_IR_GLOBALINDIRECTSYMBOL_H
#define LLVM_IR_GLOBALINDIRECTSYMBOL_H
+#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cstddef>
namespace llvm {
class GlobalIndirectSymbol : public GlobalValue {
- void operator=(const GlobalIndirectSymbol &) = delete;
- GlobalIndirectSymbol(const GlobalIndirectSymbol &) = delete;
-
protected:
GlobalIndirectSymbol(Type *Ty, ValueTy VTy, unsigned AddressSpace,
LinkageTypes Linkage, const Twine &Name, Constant *Symbol);
public:
+ GlobalIndirectSymbol(const GlobalIndirectSymbol &) = delete;
+ GlobalIndirectSymbol &operator=(const GlobalIndirectSymbol &) = delete;
+
// allocate space for exactly one operand
void *operator new(size_t s) {
return User::operator new(s, 1);
@@ -79,6 +84,6 @@ struct OperandTraits<GlobalIndirectSymbol> :
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalIndirectSymbol, Constant)
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GLOBALINDIRECTSYMBOL_H
diff --git a/include/llvm/IR/GlobalObject.h b/include/llvm/IR/GlobalObject.h
index 04737a045ae5..11eb713a4e1d 100644
--- a/include/llvm/IR/GlobalObject.h
+++ b/include/llvm/IR/GlobalObject.h
@@ -15,18 +15,19 @@
#ifndef LLVM_IR_GLOBALOBJECT_H
#define LLVM_IR_GLOBALOBJECT_H
-#include "llvm/IR/DerivedTypes.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Value.h"
+#include <string>
+#include <utility>
namespace llvm {
+
class Comdat;
class MDNode;
class Metadata;
-class Module;
class GlobalObject : public GlobalValue {
- GlobalObject(const GlobalObject &) = delete;
-
protected:
GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
LinkageTypes Linkage, const Twine &Name,
@@ -53,6 +54,8 @@ private:
static const unsigned GlobalObjectMask = (1 << GlobalObjectBits) - 1;
public:
+ GlobalObject(const GlobalObject &) = delete;
+
unsigned getAlignment() const {
unsigned Data = getGlobalValueSubClassData();
unsigned AlignmentData = Data & AlignmentMask;
@@ -141,6 +144,6 @@ private:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GLOBALOBJECT_H
diff --git a/include/llvm/IR/GlobalValue.h b/include/llvm/IR/GlobalValue.h
index 09682f7aa349..c6398aaa4847 100644
--- a/include/llvm/IR/GlobalValue.h
+++ b/include/llvm/IR/GlobalValue.h
@@ -18,23 +18,31 @@
#ifndef LLVM_IR_GLOBALVALUE_H
#define LLVM_IR_GLOBALVALUE_H
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/MD5.h"
-#include <system_error>
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
namespace llvm {
class Comdat;
-class PointerType;
+class ConstantRange;
+class Error;
+class GlobalObject;
class Module;
namespace Intrinsic {
enum ID : unsigned;
-}
+} // end namespace Intrinsic
class GlobalValue : public Constant {
- GlobalValue(const GlobalValue &) = delete;
public:
/// @brief An enumeration for the kinds of linkage for global values.
enum LinkageTypes {
@@ -72,11 +80,14 @@ protected:
ValueType(Ty), Linkage(Linkage), Visibility(DefaultVisibility),
UnnamedAddrVal(unsigned(UnnamedAddr::None)),
DllStorageClass(DefaultStorageClass), ThreadLocal(NotThreadLocal),
- IntID((Intrinsic::ID)0U), Parent(nullptr) {
+ HasLLVMReservedName(false), IntID((Intrinsic::ID)0U), Parent(nullptr) {
setName(Name);
}
Type *ValueType;
+
+ static const unsigned GlobalValueSubClassDataBits = 18;
+
// All bitfields use unsigned as the underlying type so that MSVC will pack
// them.
unsigned Linkage : 4; // The linkage of this global
@@ -86,14 +97,19 @@ protected:
unsigned ThreadLocal : 3; // Is this symbol "Thread Local", if so, what is
// the desired model?
- static const unsigned GlobalValueSubClassDataBits = 19;
+
+ /// True if the function's name starts with "llvm.". This corresponds to the
+ /// value of Function::isIntrinsic(), which may be true even if
+ /// Function::intrinsicID() returns Intrinsic::not_intrinsic.
+ unsigned HasLLVMReservedName : 1;
private:
+ friend class Constant;
+
// Give subclasses access to what otherwise would be wasted padding.
- // (19 + 4 + 2 + 2 + 2 + 3) == 32.
+ // (18 + 4 + 2 + 2 + 2 + 3 + 1) == 32.
unsigned SubClassData : GlobalValueSubClassDataBits;
- friend class Constant;
void destroyConstantImpl();
Value *handleOperandChangeImpl(Value *From, Value *To);
@@ -139,6 +155,12 @@ protected:
}
Module *Parent; // The containing module.
+
+ // Used by SymbolTableListTraits.
+ void setParent(Module *parent) {
+ Parent = parent;
+ }
+
public:
enum ThreadLocalMode {
NotThreadLocal = 0,
@@ -148,6 +170,8 @@ public:
LocalExecTLSModel
};
+ GlobalValue(const GlobalValue &) = delete;
+
~GlobalValue() override {
removeDeadConstantUsers(); // remove any dead constants using this.
}
@@ -460,10 +484,8 @@ public:
/// function has been read in yet or not.
bool isMaterializable() const;
- /// Make sure this GlobalValue is fully read. If the module is corrupt, this
- /// returns true and fills in the optional string with information about the
- /// problem. If successful, this returns false.
- std::error_code materialize();
+ /// Make sure this GlobalValue is fully read.
+ Error materialize();
/// @}
@@ -492,6 +514,18 @@ public:
// increased.
bool canIncreaseAlignment() const;
+ const GlobalObject *getBaseObject() const {
+ return const_cast<GlobalValue *>(this)->getBaseObject();
+ }
+ GlobalObject *getBaseObject();
+
+ /// Returns whether this is a reference to an absolute symbol.
+ bool isAbsoluteSymbolRef() const;
+
+ /// If this is an absolute symbol reference, returns the range of the symbol,
+ /// otherwise returns None.
+ Optional<ConstantRange> getAbsoluteSymbolRange() const;
+
/// This method unlinks 'this' from the containing module, but does not delete
/// it.
virtual void removeFromParent() = 0;
@@ -512,6 +546,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GLOBALVALUE_H
diff --git a/include/llvm/IR/GlobalVariable.h b/include/llvm/IR/GlobalVariable.h
index ebeb635468d0..3b545d811d44 100644
--- a/include/llvm/IR/GlobalVariable.h
+++ b/include/llvm/IR/GlobalVariable.h
@@ -20,36 +20,34 @@
#ifndef LLVM_IR_GLOBALVARIABLE_H
#define LLVM_IR_GLOBALVARIABLE_H
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Value.h"
+#include <cassert>
+#include <cstddef>
namespace llvm {
-class Module;
class Constant;
+class Module;
+
template <typename ValueSubClass> class SymbolTableListTraits;
+class DIGlobalVariable;
+class DIGlobalVariableExpression;
class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
friend class SymbolTableListTraits<GlobalVariable>;
- void *operator new(size_t, unsigned) = delete;
- void operator=(const GlobalVariable &) = delete;
- GlobalVariable(const GlobalVariable &) = delete;
-
- void setParent(Module *parent);
bool isConstantGlobal : 1; // Is this a global constant?
bool isExternallyInitializedConstant : 1; // Is this a global whose value
// can change from its initial
// value before global
// initializers are run?
-public:
- // allocate space for exactly one operand
- void *operator new(size_t s) {
- return User::operator new(s, 1);
- }
+public:
/// GlobalVariable ctor - If a parent module is specified, the global is
/// automatically inserted into the end of the specified modules global list.
GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
@@ -63,6 +61,8 @@ public:
const Twine &Name = "", GlobalVariable *InsertBefore = nullptr,
ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
bool isExternallyInitialized = false);
+ GlobalVariable(const GlobalVariable &) = delete;
+ GlobalVariable &operator=(const GlobalVariable &) = delete;
~GlobalVariable() override {
dropAllReferences();
@@ -71,6 +71,13 @@ public:
setGlobalVariableNumOperands(1);
}
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 1);
+ }
+
+ void *operator new(size_t, unsigned) = delete;
+
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -165,6 +172,12 @@ public:
/// drops not only the reference to the initializer but also to any metadata.
void dropAllReferences();
+ /// Attach a DIGlobalVariableExpression.
+ void addDebugInfo(DIGlobalVariableExpression *GV);
+
+ /// Fill the vector with all debug info attachements.
+ void getDebugInfo(SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const;
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *V) {
return V->getValueID() == Value::GlobalVariableVal;
@@ -178,6 +191,6 @@ struct OperandTraits<GlobalVariable> :
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_GLOBALVARIABLE_H
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index 016e9e1d2c50..1d9c16989de9 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -17,7 +17,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/BasicBlock.h"
@@ -45,6 +44,8 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <algorithm>
+#include <functional>
namespace llvm {
@@ -67,6 +68,23 @@ protected:
}
};
+/// Provides an 'InsertHelper' that calls a user-provided callback after
+/// performing the default insertion.
+class IRBuilderCallbackInserter : IRBuilderDefaultInserter {
+ std::function<void(Instruction *)> Callback;
+
+public:
+ IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
+ : Callback(Callback) {}
+
+protected:
+ void InsertHelper(Instruction *I, const Twine &Name,
+ BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+ IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
+ Callback(I);
+ }
+};
+
/// \brief Common base class shared among various IRBuilders.
class IRBuilderBase {
DebugLoc CurDbgLocation;
@@ -84,7 +102,7 @@ protected:
public:
IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr,
ArrayRef<OperandBundleDef> OpBundles = None)
- : Context(context), DefaultFPMathTag(FPMathTag), FMF(),
+ : Context(context), DefaultFPMathTag(FPMathTag),
DefaultOperandBundles(OpBundles) {
ClearInsertionPoint();
}
@@ -97,7 +115,7 @@ public:
/// inserted into a block.
void ClearInsertionPoint() {
BB = nullptr;
- InsertPt.reset(nullptr);
+ InsertPt = BasicBlock::iterator();
}
BasicBlock *GetInsertBlock() const { return BB; }
@@ -148,12 +166,12 @@ public:
/// InsertPoint - A saved insertion point.
class InsertPoint {
- BasicBlock *Block;
+ BasicBlock *Block = nullptr;
BasicBlock::iterator Point;
public:
/// \brief Creates a new insertion point which doesn't point to anything.
- InsertPoint() : Block(nullptr) {}
+ InsertPoint() = default;
/// \brief Creates a new insertion point at the given location.
InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
@@ -162,8 +180,8 @@ public:
/// \brief Returns true if this insert point is set.
bool isSet() const { return (Block != nullptr); }
- llvm::BasicBlock *getBlock() const { return Block; }
- llvm::BasicBlock::iterator getPoint() const { return Point; }
+ BasicBlock *getBlock() const { return Block; }
+ BasicBlock::iterator getPoint() const { return Point; }
};
/// \brief Returns the current insert point.
@@ -213,14 +231,14 @@ public:
BasicBlock::iterator Point;
DebugLoc DbgLoc;
- InsertPointGuard(const InsertPointGuard &) = delete;
- InsertPointGuard &operator=(const InsertPointGuard &) = delete;
-
public:
InsertPointGuard(IRBuilderBase &B)
: Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
DbgLoc(B.getCurrentDebugLocation()) {}
+ InsertPointGuard(const InsertPointGuard &) = delete;
+ InsertPointGuard &operator=(const InsertPointGuard &) = delete;
+
~InsertPointGuard() {
Builder.restoreIP(InsertPoint(Block, Point));
Builder.SetCurrentDebugLocation(DbgLoc);
@@ -234,14 +252,13 @@ public:
FastMathFlags FMF;
MDNode *FPMathTag;
- FastMathFlagGuard(const FastMathFlagGuard &) = delete;
- FastMathFlagGuard &operator=(
- const FastMathFlagGuard &) = delete;
-
public:
FastMathFlagGuard(IRBuilderBase &B)
: Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {}
+ FastMathFlagGuard(const FastMathFlagGuard &) = delete;
+ FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
+
~FastMathFlagGuard() {
Builder.FMF = FMF;
Builder.DefaultFPMathTag = FPMathTag;
@@ -446,6 +463,11 @@ public:
/// If the pointer isn't i8* it will be converted.
CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
+ /// Create a call to invariant.start intrinsic.
+ ///
+ /// If the pointer isn't i8* it will be converted.
+ CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
+
/// \brief Create a call to Masked Load intrinsic
CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
@@ -681,6 +703,19 @@ public:
BranchWeights, Unpredictable));
}
+ /// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
+ /// instruction. Copy branch meta data if available.
+ BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
+ Instruction *MDSrc) {
+ BranchInst *Br = BranchInst::Create(True, False, Cond);
+ if (MDSrc) {
+ unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
+ LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
+ Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
+ }
+ return Insert(Br);
+ }
+
/// \brief Create a switch instruction with the specified value, default dest,
/// and with a hint for the number of cases that will be added (for efficient
/// allocation).
@@ -1000,7 +1035,7 @@ public:
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateBinOp(Opc, LC, RC), Name);
- llvm::Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
+ Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
if (isa<FPMathOperator>(BinOp))
BinOp = AddFPMathAttributes(BinOp, FPMathTag, FMF);
return Insert(BinOp, Name);
@@ -1410,12 +1445,6 @@ public:
return CreateBitCast(V, DestTy, Name);
}
-private:
- // \brief Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
- // compile time error, instead of converting the string to bool for the
- // isSigned parameter.
- Value *CreateIntCast(Value *, Type *, const char *) = delete;
-
public:
Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
if (V->getType() == DestTy)
@@ -1425,6 +1454,11 @@ public:
return Insert(CastInst::CreateFPCast(V, DestTy), Name);
}
+ // \brief Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
+ // compile time error, instead of converting the string to bool for the
+ // isSigned parameter.
+ Value *CreateIntCast(Value *, Type *, const char *) = delete;
+
//===--------------------------------------------------------------------===//
// Instruction creation methods: Compare Instructions
//===--------------------------------------------------------------------===//
@@ -1549,7 +1583,7 @@ public:
return CreateCall(FTy, Callee, Args, Name, FPMathTag);
}
- CallInst *CreateCall(llvm::FunctionType *FTy, Value *Callee,
+ CallInst *CreateCall(FunctionType *FTy, Value *Callee,
ArrayRef<Value *> Args, const Twine &Name = "",
MDNode *FPMathTag = nullptr) {
CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
diff --git a/include/llvm/IR/IRPrintingPasses.h b/include/llvm/IR/IRPrintingPasses.h
index bc6de19a6c3a..0825e0696cac 100644
--- a/include/llvm/IR/IRPrintingPasses.h
+++ b/include/llvm/IR/IRPrintingPasses.h
@@ -30,7 +30,7 @@ class Module;
class ModulePass;
class PreservedAnalyses;
class raw_ostream;
-template <typename IRUnitT> class AnalysisManager;
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
/// \brief Create and return a pass that writes the module to the specified
/// \c raw_ostream.
diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h
index 40ba830b8819..f95509b9b09a 100644
--- a/include/llvm/IR/InlineAsm.h
+++ b/include/llvm/IR/InlineAsm.h
@@ -18,15 +18,14 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Value.h"
+#include <cassert>
+#include <string>
#include <vector>
namespace llvm {
-class PointerType;
class FunctionType;
-class Module;
-
-struct InlineAsmKeyType;
+class PointerType;
template <class ConstantClass> class ConstantUniqueMap;
class InlineAsm : public Value {
@@ -40,9 +39,6 @@ private:
friend struct InlineAsmKeyType;
friend class ConstantUniqueMap<InlineAsm>;
- InlineAsm(const InlineAsm &) = delete;
- void operator=(const InlineAsm&) = delete;
-
std::string AsmString, Constraints;
FunctionType *FTy;
bool HasSideEffects;
@@ -59,6 +55,9 @@ private:
void destroyConstant();
public:
+ InlineAsm(const InlineAsm &) = delete;
+ InlineAsm &operator=(const InlineAsm &) = delete;
+
/// InlineAsm::get - Return the specified uniqued inline asm string.
///
static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
@@ -361,6 +360,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_INLINEASM_H
diff --git a/include/llvm/IR/InstIterator.h b/include/llvm/IR/InstIterator.h
index 1baca21c73af..28fc473f1490 100644
--- a/include/llvm/IR/InstIterator.h
+++ b/include/llvm/IR/InstIterator.h
@@ -19,8 +19,11 @@
#ifndef LLVM_IR_INSTITERATOR_H
#define LLVM_IR_INSTITERATOR_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include <iterator>
namespace llvm {
@@ -35,6 +38,7 @@ template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
BB_t *BBs; // BasicBlocksType
BB_i_t BB; // BasicBlocksType::iterator
BI_t BI; // BasicBlock::iterator
+
public:
typedef std::bidirectional_iterator_tag iterator_category;
typedef IIty value_type;
@@ -43,7 +47,7 @@ public:
typedef IIty& reference;
// Default constructor
- InstIterator() {}
+ InstIterator() = default;
// Copy constructor...
template<typename A, typename B, typename C, typename D>
@@ -97,7 +101,7 @@ public:
--BI;
return *this;
}
- inline InstIterator operator--(int) {
+ inline InstIterator operator--(int) {
InstIterator tmp = *this; --*this; return tmp;
}
@@ -152,6 +156,6 @@ inline const_inst_range instructions(const Function &F) {
return const_inst_range(inst_begin(F), inst_end(F));
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_INSTITERATOR_H
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index 39514c5675a7..f2abbec64fe6 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -16,18 +16,32 @@
#ifndef LLVM_IR_INSTRTYPES_H
#define LLVM_IR_INSTRTYPES_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/User.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <string>
+#include <vector>
namespace llvm {
-class LLVMContext;
-
//===----------------------------------------------------------------------===//
// TerminatorInst Class
//===----------------------------------------------------------------------===//
@@ -249,8 +263,8 @@ public:
typedef SuccIterator<TerminatorInst *, BasicBlock> succ_iterator;
typedef SuccIterator<const TerminatorInst *, const BasicBlock>
succ_const_iterator;
- typedef llvm::iterator_range<succ_iterator> succ_range;
- typedef llvm::iterator_range<succ_const_iterator> succ_const_range;
+ typedef iterator_range<succ_iterator> succ_range;
+ typedef iterator_range<succ_const_iterator> succ_const_range;
private:
inline succ_iterator succ_begin() { return succ_iterator(this); }
@@ -276,8 +290,6 @@ public:
//===----------------------------------------------------------------------===//
class UnaryInstruction : public Instruction {
- void *operator new(size_t, unsigned) = delete;
-
protected:
UnaryInstruction(Type *Ty, unsigned iType, Value *V,
Instruction *IB = nullptr)
@@ -295,6 +307,8 @@ public:
return User::operator new(s, 1);
}
+ void *operator new(size_t, unsigned) = delete;
+
// Out of line virtual method, so the vtable, etc has a home.
~UnaryInstruction() override;
@@ -326,8 +340,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
//===----------------------------------------------------------------------===//
class BinaryOperator : public Instruction {
- void *operator new(size_t, unsigned) = delete;
-
protected:
void init(BinaryOps iType);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
@@ -345,6 +357,8 @@ public:
return User::operator new(s, 2);
}
+ void *operator new(size_t, unsigned) = delete;
+
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -899,10 +913,6 @@ public:
BAD_ICMP_PREDICATE = ICMP_SLE + 1
};
-private:
- void *operator new(size_t, unsigned) = delete;
- CmpInst() = delete;
-
protected:
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
Value *LHS, Value *RHS, const Twine &Name = "",
@@ -915,10 +925,15 @@ protected:
void anchor() override; // Out of line virtual method.
public:
+ CmpInst() = delete;
+
// allocate space for exactly two operands
void *operator new(size_t s) {
return User::operator new(s, 2);
}
+
+ void *operator new(size_t, unsigned) = delete;
+
/// Construct a compare instruction, given the opcode, the predicate and
/// the two operands. Optionally (if InstBefore is specified) insert the
/// instruction into a BasicBlock right before the specified instruction.
@@ -957,6 +972,8 @@ public:
return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
}
+ static StringRef getPredicateName(Predicate P);
+
bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
@@ -1189,7 +1206,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)
struct OperandBundleUse {
ArrayRef<Use> Inputs;
- OperandBundleUse() {}
+ OperandBundleUse() = default;
explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
: Inputs(Inputs), Tag(Tag) {}
@@ -1202,7 +1219,7 @@ struct OperandBundleUse {
// Conservative answer: no operands have any attributes.
return false;
- };
+ }
/// \brief Return the tag of this operand bundle as a string.
StringRef getTagName() const {
@@ -1335,6 +1352,12 @@ public:
return bundle_op_info_end()[-1].End;
}
+ /// Return true if the operand at index \p Idx is a bundle operand.
+ bool isBundleOperand(unsigned Idx) const {
+ return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
+ Idx < getBundleOperandsEndIndex();
+ }
+
/// \brief Return the total number operands (not operand bundles) used by
/// every operand bundle in this OperandBundleUser.
unsigned getNumTotalBundleOperands() const {
@@ -1471,14 +1494,14 @@ public:
return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
Other.bundle_op_info_begin());
- };
+ }
/// \brief Return true if this operand bundle user contains operand bundles
/// with tags other than those specified in \p IDs.
bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
uint32_t ID = getOperandBundleAt(i).getTagID();
- if (std::find(IDs.begin(), IDs.end(), ID) == IDs.end())
+ if (!is_contained(IDs, ID))
return true;
}
return false;
diff --git a/include/llvm/IR/Instruction.h b/include/llvm/IR/Instruction.h
index df4f8df78b12..fd7c54d69b63 100644
--- a/include/llvm/IR/Instruction.h
+++ b/include/llvm/IR/Instruction.h
@@ -17,27 +17,27 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
namespace llvm {
+class BasicBlock;
class FastMathFlags;
-class LLVMContext;
class MDNode;
-class BasicBlock;
struct AAMDNodes;
-template <>
-struct SymbolTableListSentinelTraits<Instruction>
- : public ilist_half_embedded_sentinel_traits<Instruction> {};
-
class Instruction : public User,
public ilist_node_with_parent<Instruction, BasicBlock> {
- void operator=(const Instruction &) = delete;
- Instruction(const Instruction &) = delete;
-
BasicBlock *Parent;
DebugLoc DbgLoc; // 'dbg' Metadata cache.
@@ -46,7 +46,11 @@ class Instruction : public User,
/// this instruction has metadata attached to it or not.
HasMetadataBit = 1 << 15
};
+
public:
+ Instruction(const Instruction &) = delete;
+ Instruction &operator=(const Instruction &) = delete;
+
// Out of line virtual method, so the vtable, etc has a home.
~Instruction() override;
@@ -94,6 +98,11 @@ public:
/// the basic block that MovePos lives in, right before MovePos.
void moveBefore(Instruction *MovePos);
+ /// Unlink this instruction and insert into BB before I.
+ ///
+ /// \pre I is a valid iterator into BB.
+ void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I);
+
//===--------------------------------------------------------------------===//
// Subclass classification.
//===--------------------------------------------------------------------===//
@@ -133,6 +142,11 @@ public:
return getOpcode() == AShr;
}
+ /// Return true if this is and/or/xor.
+ inline bool isBitwiseLogicOp() const {
+ return getOpcode() == And || getOpcode() == Or || getOpcode() == Xor;
+ }
+
/// Determine if the OpCode is one of the CastInst instructions.
static inline bool isCast(unsigned OpCode) {
return OpCode >= CastOpsBegin && OpCode < CastOpsEnd;
@@ -197,6 +211,17 @@ public:
void setMetadata(unsigned KindID, MDNode *Node);
void setMetadata(StringRef Kind, MDNode *Node);
+ /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty,
+ /// specifies the list of meta data that needs to be copied. If \p WL is
+ /// empty, all meta data will be copied.
+ void copyMetadata(const Instruction &SrcInst,
+ ArrayRef<unsigned> WL = ArrayRef<unsigned>());
+
+ /// If the instruction has "branch_weights" MD_prof metadata and the MDNode
+ /// has three operands (including name string), swap the order of the
+ /// metadata.
+ void swapProfMetadata();
+
/// Drop all unknown metadata except for debug locations.
/// @{
/// Passes are required to drop metadata they don't understand. This is a
@@ -220,12 +245,12 @@ public:
/// Retrieve the raw weight values of a conditional branch or select.
/// Returns true on success with profile weights filled in.
/// Returns false if no metadata or invalid metadata was found.
- bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal);
+ bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const;
/// Retrieve total raw weight values of a branch.
/// Returns true on success with profile total weights filled in.
/// Returns false if no metadata was found.
- bool extractProfTotalWeight(uint64_t &TotalVal);
+ bool extractProfTotalWeight(uint64_t &TotalVal) const;
/// Set the debug location information for this instruction.
void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
@@ -335,12 +360,12 @@ private:
SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
/// Clear all hashtable-based metadata from this instruction.
void clearMetadataHashEntries();
+
public:
//===--------------------------------------------------------------------===//
// Predicates and helper methods.
//===--------------------------------------------------------------------===//
-
/// Return true if the instruction is associative:
///
/// Associative operators satisfy: x op (y op z) === (x op y) op z
@@ -450,7 +475,7 @@ public:
bool isIdenticalTo(const Instruction *I) const;
/// This is like isIdenticalTo, except that it ignores the
- /// SubclassOptionalData flags, which specify conditions under which the
+ /// SubclassOptionalData flags, which may specify conditions under which the
/// instruction's result is undefined.
bool isIdenticalToWhenDefined(const Instruction *I) const;
@@ -529,12 +554,16 @@ public:
#define LAST_OTHER_INST(N) OtherOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
+
private:
+ friend class SymbolTableListTraits<Instruction>;
+
// Shadow Value::setValueSubclassData with a private forwarding method so that
// subclasses cannot accidentally use it.
void setValueSubclassData(unsigned short D) {
Value::setValueSubclassData(D);
}
+
unsigned short getSubclassDataFromValue() const {
return Value::getSubclassDataFromValue();
}
@@ -544,8 +573,8 @@ private:
(V ? HasMetadataBit : 0));
}
- friend class SymbolTableListTraits<Instruction>;
void setParent(BasicBlock *P);
+
protected:
// Instruction subclasses can stick up to 15 bits of stuff into the
// SubclassData field of instruction with these members.
@@ -570,18 +599,6 @@ private:
Instruction *cloneImpl() const;
};
-// Instruction* is only 4-byte aligned.
-template<>
-class PointerLikeTypeTraits<Instruction*> {
- typedef Instruction* PT;
-public:
- static inline void *getAsVoidPointer(PT P) { return P; }
- static inline PT getFromVoidPointer(void *P) {
- return static_cast<PT>(P);
- }
- enum { NumLowBitsAvailable = 2 };
-};
-
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_INSTRUCTION_H
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index be077725f7bc..a5d78a08171a 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -17,23 +17,33 @@
#define LLVM_IR_INSTRUCTIONS_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
-#include <iterator>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
namespace llvm {
class APInt;
class ConstantInt;
-class ConstantRange;
class DataLayout;
class LLVMContext;
@@ -46,14 +56,14 @@ enum SynchronizationScope {
// AllocaInst Class
//===----------------------------------------------------------------------===//
-/// AllocaInst - an instruction to allocate memory on the stack
-///
+/// an instruction to allocate memory on the stack
class AllocaInst : public UnaryInstruction {
Type *AllocatedType;
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
AllocaInst *cloneImpl() const;
public:
@@ -74,65 +84,56 @@ public:
// Out of line virtual method, so the vtable, etc. has a home.
~AllocaInst() override;
- /// isArrayAllocation - Return true if there is an allocation size parameter
- /// to the allocation instruction that is not 1.
- ///
+ /// Return true if there is an allocation size parameter to the allocation
+ /// instruction that is not 1.
bool isArrayAllocation() const;
- /// getArraySize - Get the number of elements allocated. For a simple
- /// allocation of a single element, this will return a constant 1 value.
- ///
+ /// Get the number of elements allocated. For a simple allocation of a single
+ /// element, this will return a constant 1 value.
const Value *getArraySize() const { return getOperand(0); }
Value *getArraySize() { return getOperand(0); }
- /// getType - Overload to return most specific pointer type
- ///
+ /// Overload to return most specific pointer type.
PointerType *getType() const {
return cast<PointerType>(Instruction::getType());
}
- /// getAllocatedType - Return the type that is being allocated by the
- /// instruction.
- ///
+ /// Return the type that is being allocated by the instruction.
Type *getAllocatedType() const { return AllocatedType; }
- /// \brief for use only in special circumstances that need to generically
+ /// for use only in special circumstances that need to generically
/// transform a whole instruction (eg: IR linking and vectorization).
void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
- /// getAlignment - Return the alignment of the memory that is being allocated
- /// by the instruction.
- ///
+ /// Return the alignment of the memory that is being allocated by the
+ /// instruction.
unsigned getAlignment() const {
return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
}
void setAlignment(unsigned Align);
- /// isStaticAlloca - Return true if this alloca is in the entry block of the
- /// function and is a constant size. If so, the code generator will fold it
- /// into the prolog/epilog code, so it is basically free.
+ /// Return true if this alloca is in the entry block of the function and is a
+ /// constant size. If so, the code generator will fold it into the
+ /// prolog/epilog code, so it is basically free.
bool isStaticAlloca() const;
- /// \brief Return true if this alloca is used as an inalloca argument to a
- /// call. Such allocas are never considered static even if they are in the
- /// entry block.
+ /// Return true if this alloca is used as an inalloca argument to a call. Such
+ /// allocas are never considered static even if they are in the entry block.
bool isUsedWithInAlloca() const {
return getSubclassDataFromInstruction() & 32;
}
- /// \brief Specify whether this alloca is used to represent the arguments to
- /// a call.
+ /// Specify whether this alloca is used to represent the arguments to a call.
void setUsedWithInAlloca(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
(V ? 32 : 0));
}
- /// \brief Return true if this alloca is used as a swifterror argument to a
- /// call.
+ /// Return true if this alloca is used as a swifterror argument to a call.
bool isSwiftError() const {
return getSubclassDataFromInstruction() & 64;
}
- /// \brief Specify whether this alloca is used to represent a swifterror.
+ /// Specify whether this alloca is used to represent a swifterror.
void setSwiftError(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
(V ? 64 : 0));
@@ -158,15 +159,15 @@ private:
// LoadInst Class
//===----------------------------------------------------------------------===//
-/// LoadInst - an instruction for reading from memory. This uses the
-/// SubclassData field in Value to store whether or not the load is volatile.
-///
+/// An instruction for reading from memory. This uses the SubclassData field in
+/// Value to store whether or not the load is volatile.
class LoadInst : public UnaryInstruction {
void AssertOK();
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
LoadInst *cloneImpl() const;
public:
@@ -201,7 +202,6 @@ public:
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
-
LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
@@ -214,20 +214,16 @@ public:
LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
- /// isVolatile - Return true if this is a load from a volatile memory
- /// location.
- ///
+ /// Return true if this is a load from a volatile memory location.
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
- /// setVolatile - Specify whether this is a volatile load or not.
- ///
+ /// Specify whether this is a volatile load or not.
void setVolatile(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
(V ? 1 : 0));
}
- /// getAlignment - Return the alignment of the access that is being performed
- ///
+ /// Return the alignment of the access that is being performed.
unsigned getAlignment() const {
return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
}
@@ -275,7 +271,7 @@ public:
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
@@ -300,22 +296,17 @@ private:
// StoreInst Class
//===----------------------------------------------------------------------===//
-/// StoreInst - an instruction for storing to memory
-///
+/// An instruction for storing to memory.
class StoreInst : public Instruction {
- void *operator new(size_t, unsigned) = delete;
void AssertOK();
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
StoreInst *cloneImpl() const;
public:
- // allocate space for exactly two operands
- void *operator new(size_t s) {
- return User::operator new(s, 2);
- }
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
@@ -334,13 +325,17 @@ public:
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
- /// isVolatile - Return true if this is a store to a volatile memory
- /// location.
- ///
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+
+ void *operator new(size_t, unsigned) = delete;
+
+ /// Return true if this is a store to a volatile memory location.
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
- /// setVolatile - Specify whether this is a volatile store or not.
- ///
+ /// Specify whether this is a volatile store or not.
void setVolatile(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
(V ? 1 : 0));
@@ -349,8 +344,7 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// getAlignment - Return the alignment of the access that is being performed
- ///
+ /// Return the alignment of the access that is being performed
unsigned getAlignment() const {
return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
}
@@ -401,7 +395,7 @@ public:
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
@@ -432,23 +426,17 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
// FenceInst Class
//===----------------------------------------------------------------------===//
-/// FenceInst - an instruction for ordering other memory operations
-///
+/// An instruction for ordering other memory operations.
class FenceInst : public Instruction {
- void *operator new(size_t, unsigned) = delete;
void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
FenceInst *cloneImpl() const;
public:
- // allocate space for exactly zero operands
- void *operator new(size_t s) {
- return User::operator new(s, 0);
- }
-
// Ordering may only be Acquire, Release, AcquireRelease, or
// SequentiallyConsistent.
FenceInst(LLVMContext &C, AtomicOrdering Ordering,
@@ -458,6 +446,13 @@ public:
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+
+ void *operator new(size_t, unsigned) = delete;
+
/// Returns the ordering effect of this fence.
AtomicOrdering getOrdering() const {
return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
@@ -502,12 +497,11 @@ private:
// AtomicCmpXchgInst Class
//===----------------------------------------------------------------------===//
-/// AtomicCmpXchgInst - an instruction that atomically checks whether a
+/// an instruction that atomically checks whether a
/// specified value is in a memory location, and, if it is, stores a new value
/// there. Returns the value that was loaded.
///
class AtomicCmpXchgInst : public Instruction {
- void *operator new(size_t, unsigned) = delete;
void Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
@@ -515,13 +509,10 @@ class AtomicCmpXchgInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
AtomicCmpXchgInst *cloneImpl() const;
public:
- // allocate space for exactly three operands
- void *operator new(size_t s) {
- return User::operator new(s, 3);
- }
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
@@ -533,14 +524,21 @@ public:
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
- /// isVolatile - Return true if this is a cmpxchg from a volatile memory
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+
+ void *operator new(size_t, unsigned) = delete;
+
+ /// Return true if this is a cmpxchg from a volatile memory
/// location.
///
bool isVolatile() const {
return getSubclassDataFromInstruction() & 1;
}
- /// setVolatile - Specify whether this is a volatile cmpxchg.
+ /// Specify whether this is a volatile cmpxchg.
///
void setVolatile(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
@@ -609,12 +607,12 @@ public:
Value *getNewValOperand() { return getOperand(2); }
const Value *getNewValOperand() const { return getOperand(2); }
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
- /// \brief Returns the strongest permitted ordering on failure, given the
+ /// Returns the strongest permitted ordering on failure, given the
/// desired ordering on success.
///
/// If the comparison in a cmpxchg operation fails, there is no atomic store
@@ -664,16 +662,15 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
// AtomicRMWInst Class
//===----------------------------------------------------------------------===//
-/// AtomicRMWInst - an instruction that atomically reads a memory location,
+/// an instruction that atomically reads a memory location,
/// combines it with another value, and then stores the result back. Returns
/// the old value.
///
class AtomicRMWInst : public Instruction {
- void *operator new(size_t, unsigned) = delete;
-
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
AtomicRMWInst *cloneImpl() const;
public:
@@ -710,10 +707,6 @@ public:
BAD_BINOP
};
- // allocate space for exactly two operands
- void *operator new(size_t s) {
- return User::operator new(s, 2);
- }
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering, SynchronizationScope SynchScope,
Instruction *InsertBefore = nullptr);
@@ -721,6 +714,13 @@ public:
AtomicOrdering Ordering, SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+
+ void *operator new(size_t, unsigned) = delete;
+
BinOp getOperation() const {
return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
}
@@ -731,13 +731,13 @@ public:
(Operation << 5));
}
- /// isVolatile - Return true if this is a RMW on a volatile memory location.
+ /// Return true if this is a RMW on a volatile memory location.
///
bool isVolatile() const {
return getSubclassDataFromInstruction() & 1;
}
- /// setVolatile - Specify whether this is a volatile RMW or not.
+ /// Specify whether this is a volatile RMW or not.
///
void setVolatile(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
@@ -781,7 +781,7 @@ public:
Value *getValOperand() { return getOperand(1); }
const Value *getValOperand() const { return getOperand(1); }
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
@@ -797,6 +797,7 @@ public:
private:
void Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering, SynchronizationScope SynchScope);
+
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
void setInstructionSubclassData(unsigned short D) {
@@ -823,7 +824,7 @@ inline Type *checkGEPType(Type *Ty) {
return Ty;
}
-/// GetElementPtrInst - an instruction for type-safe pointer arithmetic to
+/// an instruction for type-safe pointer arithmetic to
/// access elements of arrays and structs
///
class GetElementPtrInst : public Instruction {
@@ -849,6 +850,7 @@ class GetElementPtrInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
GetElementPtrInst *cloneImpl() const;
public:
@@ -867,6 +869,7 @@ public:
return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
NameStr, InsertBefore);
}
+
static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr,
@@ -891,6 +894,7 @@ public:
Instruction *InsertBefore = nullptr){
return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
}
+
static GetElementPtrInst *
CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &NameStr = "",
@@ -900,12 +904,14 @@ public:
GEP->setIsInBounds(true);
return GEP;
}
+
static GetElementPtrInst *CreateInBounds(Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
}
+
static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr,
@@ -919,11 +925,6 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- // getType - Overload to return most specific sequential type.
- SequentialType *getType() const {
- return cast<SequentialType>(Instruction::getType());
- }
-
Type *getSourceElementType() const { return SourceElementType; }
void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
@@ -935,14 +936,14 @@ public:
return ResultElementType;
}
- /// \brief Returns the address space of this instruction's pointer type.
+ /// Returns the address space of this instruction's pointer type.
unsigned getAddressSpace() const {
// Note that this is always the same as the pointer operand's address space
// and that is cheaper to compute, so cheat here.
return getPointerAddressSpace();
}
- /// getIndexedType - Returns the type of the element that would be loaded with
+ /// Returns the type of the element that would be loaded with
/// a load instruction with the specified parameters.
///
/// Null is returned if the indices are invalid for the specified
@@ -967,23 +968,23 @@ public:
return 0U; // get index for modifying correct operand.
}
- /// getPointerOperandType - Method to return the pointer operand as a
+ /// Method to return the pointer operand as a
/// PointerType.
Type *getPointerOperandType() const {
return getPointerOperand()->getType();
}
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperandType()->getPointerAddressSpace();
}
- /// GetGEPReturnType - Returns the pointer type returned by the GEP
+ /// Returns the pointer type returned by the GEP
/// instruction, which may be a vector of pointers.
static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
return getGEPReturnType(
- cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
- Ptr, IdxList);
+ cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
+ Ptr, IdxList);
}
static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
ArrayRef<Value *> IdxList) {
@@ -1011,24 +1012,24 @@ public:
return getNumOperands() > 1;
}
- /// hasAllZeroIndices - Return true if all of the indices of this GEP are
+ /// Return true if all of the indices of this GEP are
/// zeros. If so, the result pointer and the first operand have the same
/// value, just potentially different types.
bool hasAllZeroIndices() const;
- /// hasAllConstantIndices - Return true if all of the indices of this GEP are
+ /// Return true if all of the indices of this GEP are
/// constant integers. If so, the result pointer and the first operand have
/// a constant offset between them.
bool hasAllConstantIndices() const;
- /// setIsInBounds - Set or clear the inbounds flag on this GEP instruction.
+ /// Set or clear the inbounds flag on this GEP instruction.
/// See LangRef.html for the meaning of inbounds on a getelementptr.
void setIsInBounds(bool b = true);
- /// isInBounds - Determine whether the GEP has the inbounds flag.
+ /// Determine whether the GEP has the inbounds flag.
bool isInBounds() const;
- /// \brief Accumulate the constant address offset of this GEP if possible.
+ /// Accumulate the constant address offset of this GEP if possible.
///
/// This routine accepts an APInt into which it will accumulate the constant
/// offset of this GEP if the GEP is in fact constant. If the GEP is not
@@ -1065,6 +1066,7 @@ GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
cast<PointerType>(getType()->getScalarType())->getElementType());
init(Ptr, IdxList, NameStr);
}
+
GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList, unsigned Values,
const Twine &NameStr,
@@ -1088,7 +1090,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on integers or pointers. The operands
/// must be identical types.
-/// \brief Represent an integer comparison operator.
+/// Represent an integer comparison operator.
class ICmpInst: public CmpInst {
void anchor() override;
@@ -1107,11 +1109,12 @@ class ICmpInst: public CmpInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical ICmpInst
+
+ /// Clone an identical ICmpInst
ICmpInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics.
+ /// Constructor with insert-before-instruction semantics.
ICmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
@@ -1126,7 +1129,7 @@ public:
#endif
}
- /// \brief Constructor with insert-at-end semantics.
+ /// Constructor with insert-at-end semantics.
ICmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
@@ -1141,7 +1144,7 @@ public:
#endif
}
- /// \brief Constructor with no-insertion semantics
+ /// Constructor with no-insertion semantics
ICmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
@@ -1157,64 +1160,60 @@ public:
/// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as signed.
- /// \brief Return the signed version of the predicate
+ /// Return the signed version of the predicate
Predicate getSignedPredicate() const {
return getSignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
- /// \brief Return the signed version of the predicate.
+ /// Return the signed version of the predicate.
static Predicate getSignedPredicate(Predicate pred);
/// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as unsigned.
- /// \brief Return the unsigned version of the predicate
+ /// Return the unsigned version of the predicate
Predicate getUnsignedPredicate() const {
return getUnsignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
- /// \brief Return the unsigned version of the predicate.
+ /// Return the unsigned version of the predicate.
static Predicate getUnsignedPredicate(Predicate pred);
- /// isEquality - Return true if this predicate is either EQ or NE. This also
+ /// Return true if this predicate is either EQ or NE. This also
/// tests for commutativity.
static bool isEquality(Predicate P) {
return P == ICMP_EQ || P == ICMP_NE;
}
- /// isEquality - Return true if this predicate is either EQ or NE. This also
+ /// Return true if this predicate is either EQ or NE. This also
/// tests for commutativity.
bool isEquality() const {
return isEquality(getPredicate());
}
/// @returns true if the predicate of this ICmpInst is commutative
- /// \brief Determine if this relation is commutative.
+ /// Determine if this relation is commutative.
bool isCommutative() const { return isEquality(); }
- /// isRelational - Return true if the predicate is relational (not EQ or NE).
+ /// Return true if the predicate is relational (not EQ or NE).
///
bool isRelational() const {
return !isEquality();
}
- /// isRelational - Return true if the predicate is relational (not EQ or NE).
+ /// Return true if the predicate is relational (not EQ or NE).
///
static bool isRelational(Predicate P) {
return !isEquality(P);
}
- /// Initialize a set of values that all satisfy the predicate with C.
- /// \brief Make a ConstantRange for a relation with a constant value.
- static ConstantRange makeConstantRange(Predicate pred, const APInt &C);
-
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
- /// \brief Swap operands and adjust predicate.
+ /// Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
@@ -1236,16 +1235,17 @@ public:
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on floating point values or packed
/// vectors of floating point values. The operands must be identical types.
-/// \brief Represents a floating point comparison operator.
+/// Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical FCmpInst
+
+ /// Clone an identical FCmpInst
FCmpInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics.
+ /// Constructor with insert-before-instruction semantics.
FCmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
@@ -1264,7 +1264,7 @@ public:
"Invalid operand types for FCmp instruction");
}
- /// \brief Constructor with insert-at-end semantics.
+ /// Constructor with insert-at-end semantics.
FCmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
@@ -1283,7 +1283,7 @@ public:
"Invalid operand types for FCmp instruction");
}
- /// \brief Constructor with no-insertion semantics
+ /// Constructor with no-insertion semantics
FCmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
@@ -1301,18 +1301,18 @@ public:
}
/// @returns true if the predicate of this instruction is EQ or NE.
- /// \brief Determine if this is an equality predicate.
+ /// Determine if this is an equality predicate.
static bool isEquality(Predicate Pred) {
return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
Pred == FCMP_UNE;
}
/// @returns true if the predicate of this instruction is EQ or NE.
- /// \brief Determine if this is an equality predicate.
+ /// Determine if this is an equality predicate.
bool isEquality() const { return isEquality(getPredicate()); }
/// @returns true if the predicate of this instruction is commutative.
- /// \brief Determine if this is a commutative predicate.
+ /// Determine if this is a commutative predicate.
bool isCommutative() const {
return isEquality() ||
getPredicate() == FCMP_FALSE ||
@@ -1322,20 +1322,20 @@ public:
}
/// @returns true if the predicate is relational (not EQ or NE).
- /// \brief Determine if this a relational predicate.
+ /// Determine if this a relational predicate.
bool isRelational() const { return !isEquality(); }
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
- /// \brief Swap operands and adjust predicate.
+ /// Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::FCmp;
}
@@ -1345,31 +1345,26 @@ public:
};
//===----------------------------------------------------------------------===//
-/// CallInst - This class represents a function call, abstracting a target
+/// This class represents a function call, abstracting a target
/// machine's calling convention. This class uses low bit of the SubClassData
/// field to indicate whether or not this is a tail call. The rest of the bits
/// hold the calling convention of the call.
///
class CallInst : public Instruction,
public OperandBundleUser<CallInst, User::op_iterator> {
+ friend class OperandBundleUser<CallInst, User::op_iterator>;
+
AttributeSet AttributeList; ///< parameter attributes for call
FunctionType *FTy;
+
CallInst(const CallInst &CI);
- void init(Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
- init(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, Args, Bundles, NameStr);
- }
- void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
- void init(Value *Func, const Twine &NameStr);
/// Construct a CallInst given a range of arguments.
- /// \brief Construct a CallInst from a range of arguments
+ /// Construct a CallInst from a range of arguments
inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
Instruction *InsertBefore);
+
inline CallInst(Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
Instruction *InsertBefore)
@@ -1382,24 +1377,37 @@ class CallInst : public Instruction,
: CallInst(Func, Args, None, NameStr, InsertBefore) {}
/// Construct a CallInst given a range of arguments.
- /// \brief Construct a CallInst from a range of arguments
+ /// Construct a CallInst from a range of arguments
inline CallInst(Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
BasicBlock *InsertAtEnd);
explicit CallInst(Value *F, const Twine &NameStr,
Instruction *InsertBefore);
+
CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
- friend class OperandBundleUser<CallInst, User::op_iterator>;
+ void init(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
+ init(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, Bundles, NameStr);
+ }
+ void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+ void init(Value *Func, const Twine &NameStr);
+
bool hasDescriptor() const { return HasDescriptor; }
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
CallInst *cloneImpl() const;
public:
+ ~CallInst() override;
+
static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
const Twine &NameStr = "",
@@ -1408,6 +1416,7 @@ public:
cast<PointerType>(Func->getType())->getElementType()),
Func, Args, Bundles, NameStr, InsertBefore);
}
+
static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
@@ -1415,12 +1424,14 @@ public:
cast<PointerType>(Func->getType())->getElementType()),
Func, Args, None, NameStr, InsertBefore);
}
+
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
return new (unsigned(Args.size() + 1))
CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
}
+
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
const Twine &NameStr = "",
@@ -1432,6 +1443,7 @@ public:
return new (TotalOps, DescriptorBytes)
CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
}
+
static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
@@ -1442,21 +1454,24 @@ public:
return new (TotalOps, DescriptorBytes)
CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
}
+
static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return new (unsigned(Args.size() + 1))
CallInst(Func, Args, None, NameStr, InsertAtEnd);
}
+
static CallInst *Create(Value *F, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new(1) CallInst(F, NameStr, InsertBefore);
}
+
static CallInst *Create(Value *F, const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new(1) CallInst(F, NameStr, InsertAtEnd);
}
- /// \brief Create a clone of \p CI with a different set of operand bundles and
+ /// Create a clone of \p CI with a different set of operand bundles and
/// insert it before \p InsertPt.
///
/// The returned call instruction is identical \p CI in every way except that
@@ -1465,7 +1480,7 @@ public:
static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
Instruction *InsertPt = nullptr);
- /// CreateMalloc - Generate the IR for a call to malloc:
+ /// Generate the IR for a call to malloc:
/// 1. Compute the malloc call's argument as the specified type's size,
/// possibly multiplied by the array size if the array size is not
/// constant 1.
@@ -1493,7 +1508,7 @@ public:
ArrayRef<OperandBundleDef> Bundles = None,
Function* MallocF = nullptr,
const Twine &Name = "");
- /// CreateFree - Generate the IR for a call to the builtin free function.
+ /// Generate the IR for a call to the builtin free function.
static Instruction *CreateFree(Value *Source,
Instruction *InsertBefore);
static Instruction *CreateFree(Value *Source,
@@ -1505,8 +1520,6 @@ public:
ArrayRef<OperandBundleDef> Bundles,
BasicBlock *InsertAtEnd);
- ~CallInst() override;
-
FunctionType *getFunctionType() const { return FTy; }
void mutateFunctionType(FunctionType *FTy) {
@@ -1520,20 +1533,25 @@ public:
TailCallKind getTailCallKind() const {
return TailCallKind(getSubclassDataFromInstruction() & 3);
}
+
bool isTailCall() const {
unsigned Kind = getSubclassDataFromInstruction() & 3;
return Kind == TCK_Tail || Kind == TCK_MustTail;
}
+
bool isMustTailCall() const {
return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
}
+
bool isNoTailCall() const {
return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
}
+
void setTailCall(bool isTC = true) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
unsigned(isTC ? TCK_Tail : TCK_None));
}
+
void setTailCallKind(TailCallKind TCK) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
unsigned(TCK));
@@ -1542,7 +1560,7 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// getNumArgOperands - Return the number of call arguments.
+ /// Return the number of call arguments.
///
unsigned getNumArgOperands() const {
return getNumOperands() - getNumTotalBundleOperands() - 1;
@@ -1559,35 +1577,35 @@ public:
setOperand(i, v);
}
- /// \brief Return the iterator pointing to the beginning of the argument list.
+ /// Return the iterator pointing to the beginning of the argument list.
op_iterator arg_begin() { return op_begin(); }
- /// \brief Return the iterator pointing to the end of the argument list.
+ /// Return the iterator pointing to the end of the argument list.
op_iterator arg_end() {
// [ call args ], [ operand bundles ], callee
return op_end() - getNumTotalBundleOperands() - 1;
- };
+ }
- /// \brief Iteration adapter for range-for loops.
+ /// Iteration adapter for range-for loops.
iterator_range<op_iterator> arg_operands() {
return make_range(arg_begin(), arg_end());
}
- /// \brief Return the iterator pointing to the beginning of the argument list.
+ /// Return the iterator pointing to the beginning of the argument list.
const_op_iterator arg_begin() const { return op_begin(); }
- /// \brief Return the iterator pointing to the end of the argument list.
+ /// Return the iterator pointing to the end of the argument list.
const_op_iterator arg_end() const {
// [ call args ], [ operand bundles ], callee
return op_end() - getNumTotalBundleOperands() - 1;
- };
+ }
- /// \brief Iteration adapter for range-for loops.
+ /// Iteration adapter for range-for loops.
iterator_range<const_op_iterator> arg_operands() const {
return make_range(arg_begin(), arg_end());
}
- /// \brief Wrappers for getting the \c Use of a call argument.
+ /// Wrappers for getting the \c Use of a call argument.
const Use &getArgOperandUse(unsigned i) const {
assert(i < getNumArgOperands() && "Out of bounds!");
return getOperandUse(i);
@@ -1613,61 +1631,59 @@ public:
(ID << 2));
}
- /// getAttributes - Return the parameter attributes for this call.
+ /// Return the parameter attributes for this call.
///
- const AttributeSet &getAttributes() const { return AttributeList; }
+ AttributeSet getAttributes() const { return AttributeList; }
- /// setAttributes - Set the parameter attributes for this call.
+ /// Set the parameter attributes for this call.
///
- void setAttributes(const AttributeSet &Attrs) { AttributeList = Attrs; }
+ void setAttributes(AttributeSet Attrs) { AttributeList = Attrs; }
- /// addAttribute - adds the attribute to the list of attributes.
+ /// adds the attribute to the list of attributes.
void addAttribute(unsigned i, Attribute::AttrKind Kind);
- /// addAttribute - adds the attribute to the list of attributes.
- void addAttribute(unsigned i, StringRef Kind, StringRef Value);
-
- /// addAttribute - adds the attribute to the list of attributes.
+ /// adds the attribute to the list of attributes.
void addAttribute(unsigned i, Attribute Attr);
- /// removeAttribute - removes the attribute from the list of attributes.
+ /// removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attribute::AttrKind Kind);
- /// removeAttribute - removes the attribute from the list of attributes.
+ /// removes the attribute from the list of attributes.
void removeAttribute(unsigned i, StringRef Kind);
- /// removeAttribute - removes the attribute from the list of attributes.
- void removeAttribute(unsigned i, Attribute Attr);
-
- /// \brief adds the dereferenceable attribute to the list of attributes.
+ /// adds the dereferenceable attribute to the list of attributes.
void addDereferenceableAttr(unsigned i, uint64_t Bytes);
- /// \brief adds the dereferenceable_or_null attribute to the list of
+ /// adds the dereferenceable_or_null attribute to the list of
/// attributes.
void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
- /// \brief Determine whether this call has the given attribute.
+ /// Determine whether this call has the given attribute.
bool hasFnAttr(Attribute::AttrKind Kind) const {
assert(Kind != Attribute::NoBuiltin &&
"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin");
return hasFnAttrImpl(Kind);
}
- /// \brief Determine whether this call has the given attribute.
+ /// Determine whether this call has the given attribute.
bool hasFnAttr(StringRef Kind) const {
return hasFnAttrImpl(Kind);
}
- /// \brief Determine whether the call or the callee has the given attributes.
+ /// Determine whether the call or the callee has the given attributes.
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const;
- /// \brief Get the attribute of a given kind at a position.
- Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const;
+ /// Get the attribute of a given kind at a position.
+ Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
+ return getAttributes().getAttribute(i, Kind);
+ }
- /// \brief Get the attribute of a given kind at a position.
- Attribute getAttribute(unsigned i, StringRef Kind) const;
+ /// Get the attribute of a given kind at a position.
+ Attribute getAttribute(unsigned i, StringRef Kind) const {
+ return getAttributes().getAttribute(i, Kind);
+ }
- /// \brief Return true if the data operand at index \p i has the attribute \p
+ /// Return true if the data operand at index \p i has the attribute \p
/// A.
///
/// Data operands include call arguments and values used in operand bundles,
@@ -1682,18 +1698,18 @@ public:
/// (\p i - 1) in the operand list.
bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
- /// \brief Extract the alignment for a call or parameter (0=unknown).
+ /// Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
- /// \brief Extract the number of dereferenceable bytes for a call or
+ /// Extract the number of dereferenceable bytes for a call or
/// parameter (0=unknown).
uint64_t getDereferenceableBytes(unsigned i) const {
return AttributeList.getDereferenceableBytes(i);
}
- /// \brief Extract the number of dereferenceable_or_null bytes for a call or
+ /// Extract the number of dereferenceable_or_null bytes for a call or
/// parameter (0=unknown).
uint64_t getDereferenceableOrNullBytes(unsigned i) const {
return AttributeList.getDereferenceableOrNullBytes(i);
@@ -1706,20 +1722,20 @@ public:
return AttributeList.hasAttribute(n, Attribute::NoAlias);
}
- /// \brief Return true if the call should not be treated as a call to a
+ /// Return true if the call should not be treated as a call to a
/// builtin.
bool isNoBuiltin() const {
return hasFnAttrImpl(Attribute::NoBuiltin) &&
!hasFnAttrImpl(Attribute::Builtin);
}
- /// \brief Return true if the call should not be inlined.
+ /// Return true if the call should not be inlined.
bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
void setIsNoInline() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoInline);
}
- /// \brief Return true if the call can return twice
+ /// Return true if the call can return twice
bool canReturnTwice() const {
return hasFnAttr(Attribute::ReturnsTwice);
}
@@ -1727,7 +1743,7 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ReturnsTwice);
}
- /// \brief Determine if the call does not access memory.
+ /// Determine if the call does not access memory.
bool doesNotAccessMemory() const {
return hasFnAttr(Attribute::ReadNone);
}
@@ -1735,7 +1751,7 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
}
- /// \brief Determine if the call does not access or only reads memory.
+ /// Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
}
@@ -1743,7 +1759,7 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
}
- /// \brief Determine if the call does not access or only writes memory.
+ /// Determine if the call does not access or only writes memory.
bool doesNotReadMemory() const {
return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
}
@@ -1760,35 +1776,34 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly);
}
- /// \brief Determine if the call cannot return.
+ /// Determine if the call cannot return.
bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
void setDoesNotReturn() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn);
}
- /// \brief Determine if the call cannot unwind.
+ /// Determine if the call cannot unwind.
bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
void setDoesNotThrow() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
}
- /// \brief Determine if the call cannot be duplicated.
+ /// Determine if the call cannot be duplicated.
bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
void setCannotDuplicate() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate);
}
- /// \brief Determine if the call is convergent
+ /// Determine if the call is convergent
bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
void setConvergent() {
addAttribute(AttributeSet::FunctionIndex, Attribute::Convergent);
}
void setNotConvergent() {
- removeAttribute(AttributeSet::FunctionIndex,
- Attribute::get(getContext(), Attribute::Convergent));
+ removeAttribute(AttributeSet::FunctionIndex, Attribute::Convergent);
}
- /// \brief Determine if the call returns a structure through first
+ /// Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
if (getNumArgOperands() == 0)
@@ -1798,24 +1813,24 @@ public:
return paramHasAttr(1, Attribute::StructRet);
}
- /// \brief Determine if any call argument is an aggregate passed by value.
+ /// Determine if any call argument is an aggregate passed by value.
bool hasByValArgument() const {
return AttributeList.hasAttrSomewhere(Attribute::ByVal);
}
- /// getCalledFunction - Return the function called, or null if this is an
+ /// Return the function called, or null if this is an
/// indirect function invocation.
///
Function *getCalledFunction() const {
return dyn_cast<Function>(Op<-1>());
}
- /// getCalledValue - Get a pointer to the function that is invoked by this
+ /// Get a pointer to the function that is invoked by this
/// instruction.
const Value *getCalledValue() const { return Op<-1>(); }
Value *getCalledValue() { return Op<-1>(); }
- /// setCalledFunction - Set the function called.
+ /// Set the function called.
void setCalledFunction(Value* Fn) {
setCalledFunction(
cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
@@ -1828,7 +1843,7 @@ public:
Op<-1>() = Fn;
}
- /// isInlineAsm - Check if this call is an inline asm statement.
+ /// Check if this call is an inline asm statement.
bool isInlineAsm() const {
return isa<InlineAsm>(Op<-1>());
}
@@ -1842,17 +1857,17 @@ public:
}
private:
- template <typename AttrKind> bool hasFnAttrImpl(AttrKind A) const {
- if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A))
+ template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
+ if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, Kind))
return true;
// Operand bundles override attributes on the called function, but don't
// override attributes directly present on the call instruction.
- if (isFnAttrDisallowedByOpBundle(A))
+ if (isFnAttrDisallowedByOpBundle(Kind))
return false;
if (const Function *F = getCalledFunction())
- return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A);
+ return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Kind);
return false;
}
@@ -1899,16 +1914,9 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)
// SelectInst Class
//===----------------------------------------------------------------------===//
-/// SelectInst - This class represents the LLVM 'select' instruction.
+/// This class represents the LLVM 'select' instruction.
///
class SelectInst : public Instruction {
- void init(Value *C, Value *S1, Value *S2) {
- assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
- Op<0>() = C;
- Op<1>() = S1;
- Op<2>() = S2;
- }
-
SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(S1->getType(), Instruction::Select,
@@ -1916,6 +1924,7 @@ class SelectInst : public Instruction {
init(C, S1, S2);
setName(NameStr);
}
+
SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(S1->getType(), Instruction::Select,
@@ -1924,17 +1933,30 @@ class SelectInst : public Instruction {
setName(NameStr);
}
+ void init(Value *C, Value *S1, Value *S2) {
+ assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
+ Op<0>() = C;
+ Op<1>() = S1;
+ Op<2>() = S2;
+ }
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
SelectInst *cloneImpl() const;
public:
static SelectInst *Create(Value *C, Value *S1, Value *S2,
const Twine &NameStr = "",
- Instruction *InsertBefore = nullptr) {
- return new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
+ Instruction *InsertBefore = nullptr,
+ Instruction *MDFrom = nullptr) {
+ SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
+ if (MDFrom)
+ Sel->copyMetadata(*MDFrom);
+ return Sel;
}
+
static SelectInst *Create(Value *C, Value *S1, Value *S2,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
@@ -1952,7 +1974,7 @@ public:
void setTrueValue(Value *V) { Op<1>() = V; }
void setFalseValue(Value *V) { Op<2>() = V; }
- /// areInvalidOperands - Return a string if the specified operands are invalid
+ /// Return a string if the specified operands are invalid
/// for a select operation, otherwise return null.
static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
@@ -1982,13 +2004,14 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
// VAArgInst Class
//===----------------------------------------------------------------------===//
-/// VAArgInst - This class represents the va_arg llvm instruction, which returns
+/// This class represents the va_arg llvm instruction, which returns
/// an argument of the specified type given a va_list and increments that list
///
class VAArgInst : public UnaryInstruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
VAArgInst *cloneImpl() const;
public:
@@ -1997,6 +2020,7 @@ public:
: UnaryInstruction(Ty, VAArg, List, InsertBefore) {
setName(NameStr);
}
+
VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
@@ -2020,7 +2044,7 @@ public:
// ExtractElementInst Class
//===----------------------------------------------------------------------===//
-/// ExtractElementInst - This instruction extracts a single (scalar)
+/// This instruction extracts a single (scalar)
/// element from a VectorType value
///
class ExtractElementInst : public Instruction {
@@ -2032,6 +2056,7 @@ class ExtractElementInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
ExtractElementInst *cloneImpl() const;
public:
@@ -2040,13 +2065,14 @@ public:
Instruction *InsertBefore = nullptr) {
return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
}
+
static ExtractElementInst *Create(Value *Vec, Value *Idx,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
}
- /// isValidOperands - Return true if an extractelement instruction can be
+ /// Return true if an extractelement instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *Vec, const Value *Idx);
@@ -2082,7 +2108,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
// InsertElementInst Class
//===----------------------------------------------------------------------===//
-/// InsertElementInst - This instruction inserts a single (scalar)
+/// This instruction inserts a single (scalar)
/// element into a VectorType value
///
class InsertElementInst : public Instruction {
@@ -2095,6 +2121,7 @@ class InsertElementInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
InsertElementInst *cloneImpl() const;
public:
@@ -2103,18 +2130,19 @@ public:
Instruction *InsertBefore = nullptr) {
return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
}
+
static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
}
- /// isValidOperands - Return true if an insertelement instruction can be
+ /// Return true if an insertelement instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *Vec, const Value *NewElt,
const Value *Idx);
- /// getType - Overload to return most specific vector type.
+ /// Overload to return most specific vector type.
///
VectorType *getType() const {
return cast<VectorType>(Instruction::getType());
@@ -2143,32 +2171,34 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
// ShuffleVectorInst Class
//===----------------------------------------------------------------------===//
-/// ShuffleVectorInst - This instruction constructs a fixed permutation of two
+/// This instruction constructs a fixed permutation of two
/// input vectors.
///
class ShuffleVectorInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
ShuffleVectorInst *cloneImpl() const;
public:
- // allocate space for exactly three operands
- void *operator new(size_t s) {
- return User::operator new(s, 3);
- }
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr = "",
Instruction *InsertBefor = nullptr);
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr, BasicBlock *InsertAtEnd);
- /// isValidOperands - Return true if a shufflevector instruction can be
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+
+ /// Return true if a shufflevector instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *V1, const Value *V2,
const Value *Mask);
- /// getType - Overload to return most specific vector type.
+ /// Overload to return most specific vector type.
///
VectorType *getType() const {
return cast<VectorType>(Instruction::getType());
@@ -2181,19 +2211,22 @@ public:
return cast<Constant>(getOperand(2));
}
- /// getMaskValue - Return the index from the shuffle mask for the specified
- /// output result. This is either -1 if the element is undef or a number less
- /// than 2*numelements.
- static int getMaskValue(Constant *Mask, unsigned i);
+ /// Return the shuffle mask value for the specified element of the mask.
+ /// Return -1 if the element is undef.
+ static int getMaskValue(Constant *Mask, unsigned Elt);
- int getMaskValue(unsigned i) const {
- return getMaskValue(getMask(), i);
+ /// Return the shuffle mask value of this instruction for the given element
+ /// index. Return -1 if the element is undef.
+ int getMaskValue(unsigned Elt) const {
+ return getMaskValue(getMask(), Elt);
}
- /// getShuffleMask - Return the full mask for this instruction, where each
- /// element is the element number and undef's are returned as -1.
+ /// Convert the input shuffle mask operand to a vector of integers. Undefined
+ /// elements of the mask are returned as -1.
static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
+ /// Return the mask for this instruction as a vector of integers. Undefined
+ /// elements of the mask are returned as -1.
void getShuffleMask(SmallVectorImpl<int> &Result) const {
return getShuffleMask(getMask(), Result);
}
@@ -2224,15 +2257,13 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
// ExtractValueInst Class
//===----------------------------------------------------------------------===//
-/// ExtractValueInst - This instruction extracts a struct member or array
+/// This instruction extracts a struct member or array
/// element value from an aggregate value.
///
class ExtractValueInst : public UnaryInstruction {
SmallVector<unsigned, 4> Indices;
ExtractValueInst(const ExtractValueInst &EVI);
- void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
-
/// Constructors - Create a extractvalue instruction with a base aggregate
/// value and a list of indices. The first ctor can optionally insert before
/// an existing instruction, the second appends the new instruction to the
@@ -2248,9 +2279,12 @@ class ExtractValueInst : public UnaryInstruction {
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
+ void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
ExtractValueInst *cloneImpl() const;
public:
@@ -2261,6 +2295,7 @@ public:
return new
ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
}
+
static ExtractValueInst *Create(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
@@ -2268,7 +2303,7 @@ public:
return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
}
- /// getIndexedType - Returns the type of the element that would be extracted
+ /// Returns the type of the element that would be extracted
/// with an extractvalue instruction with the specified parameters.
///
/// Null is returned if the indices are invalid for the specified type.
@@ -2320,6 +2355,7 @@ ExtractValueInst::ExtractValueInst(Value *Agg,
ExtractValue, Agg, InsertBefore) {
init(Idxs, NameStr);
}
+
ExtractValueInst::ExtractValueInst(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
@@ -2333,16 +2369,13 @@ ExtractValueInst::ExtractValueInst(Value *Agg,
// InsertValueInst Class
//===----------------------------------------------------------------------===//
-/// InsertValueInst - This instruction inserts a struct field of array element
+/// This instruction inserts a struct field of array element
/// value into an aggregate value.
///
class InsertValueInst : public Instruction {
SmallVector<unsigned, 4> Indices;
- void *operator new(size_t, unsigned) = delete;
InsertValueInst(const InsertValueInst &IVI);
- void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
- const Twine &NameStr);
/// Constructors - Create a insertvalue instruction with a base aggregate
/// value, a value to insert, and a list of indices. The first ctor can
@@ -2364,9 +2397,13 @@ class InsertValueInst : public Instruction {
InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
BasicBlock *InsertAtEnd);
+ void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+ const Twine &NameStr);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
InsertValueInst *cloneImpl() const;
public:
@@ -2375,12 +2412,15 @@ public:
return User::operator new(s, 2);
}
+ void *operator new(size_t, unsigned) = delete;
+
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
}
+
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
@@ -2454,6 +2494,7 @@ InsertValueInst::InsertValueInst(Value *Agg,
2, InsertBefore) {
init(Agg, Val, Idxs, NameStr);
}
+
InsertValueInst::InsertValueInst(Value *Agg,
Value *Val,
ArrayRef<unsigned> Idxs,
@@ -2476,17 +2517,13 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
// scientist's overactive imagination.
//
class PHINode : public Instruction {
- void anchor() override;
-
- void *operator new(size_t, unsigned) = delete;
- /// ReservedSpace - The number of operands actually allocated. NumOperands is
+ /// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
+
PHINode(const PHINode &PN);
// allocate space for exactly zero operands
- void *operator new(size_t s) {
- return User::operator new(s);
- }
+
explicit PHINode(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr)
@@ -2504,7 +2541,18 @@ class PHINode : public Instruction {
allocHungoffUses(ReservedSpace);
}
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+
+ void anchor() override;
+
protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+
+ PHINode *cloneImpl() const;
+
// allocHungoffUses - this is more complicated than the generic
// User::allocHungoffUses, because we have to allocate Uses for the incoming
// values and pointers to the incoming blocks, all in one allocation.
@@ -2512,11 +2560,9 @@ protected:
User::allocHungoffUses(N, /* IsPhi */ true);
}
- // Note: Instruction needs to be a friend here to call cloneImpl.
- friend class Instruction;
- PHINode *cloneImpl() const;
-
public:
+ void *operator new(size_t, unsigned) = delete;
+
/// Constructors - NumReservedValues is a hint for the number of incoming
/// edges that this phi node will have (use 0 if you really have no idea).
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
@@ -2524,6 +2570,7 @@ public:
Instruction *InsertBefore = nullptr) {
return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
}
+
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
@@ -2570,11 +2617,11 @@ public:
const_op_range incoming_values() const { return operands(); }
- /// getNumIncomingValues - Return the number of incoming edges
+ /// Return the number of incoming edges
///
unsigned getNumIncomingValues() const { return getNumOperands(); }
- /// getIncomingValue - Return incoming value number x
+ /// Return incoming value number x
///
Value *getIncomingValue(unsigned i) const {
return getOperand(i);
@@ -2592,13 +2639,13 @@ public:
return i;
}
- /// getIncomingBlock - Return incoming basic block number @p i.
+ /// Return incoming basic block number @p i.
///
BasicBlock *getIncomingBlock(unsigned i) const {
return block_begin()[i];
}
- /// getIncomingBlock - Return incoming basic block corresponding
+ /// Return incoming basic block corresponding
/// to an operand of the PHI.
///
BasicBlock *getIncomingBlock(const Use &U) const {
@@ -2606,7 +2653,7 @@ public:
return getIncomingBlock(unsigned(&U - op_begin()));
}
- /// getIncomingBlock - Return incoming basic block corresponding
+ /// Return incoming basic block corresponding
/// to value use iterator.
///
BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
@@ -2618,7 +2665,7 @@ public:
block_begin()[i] = BB;
}
- /// addIncoming - Add an incoming value to the end of the PHI list
+ /// Add an incoming value to the end of the PHI list
///
void addIncoming(Value *V, BasicBlock *BB) {
if (getNumOperands() == ReservedSpace)
@@ -2629,7 +2676,7 @@ public:
setIncomingBlock(getNumOperands() - 1, BB);
}
- /// removeIncomingValue - Remove an incoming value. This is useful if a
+ /// Remove an incoming value. This is useful if a
/// predecessor basic block is deleted. The value removed is returned.
///
/// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
@@ -2645,7 +2692,7 @@ public:
return removeIncomingValue(Idx, DeletePHIIfEmpty);
}
- /// getBasicBlockIndex - Return the first index of the specified basic
+ /// Return the first index of the specified basic
/// block in the value list for this PHI. Returns -1 if no instance.
///
int getBasicBlockIndex(const BasicBlock *BB) const {
@@ -2661,11 +2708,11 @@ public:
return getIncomingValue(Idx);
}
- /// hasConstantValue - If the specified PHI node always merges together the
+ /// If the specified PHI node always merges together the
/// same value, return the value, otherwise return null.
Value *hasConstantValue() const;
- /// hasConstantOrUndefValue - Whether the specified PHI node always merges
+ /// Whether the specified PHI node always merges
/// together the same value, assuming undefs are equal to a unique
/// non-undef value.
bool hasConstantOrUndefValue() const;
@@ -2693,7 +2740,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
//===----------------------------------------------------------------------===//
//===---------------------------------------------------------------------------
-/// LandingPadInst - The landingpad instruction holds all of the information
+/// The landingpad instruction holds all of the information
/// necessary to generate correct exception handling. The landingpad instruction
/// cannot be moved from the top of a landing pad block, which itself is
/// accessible only from the 'unwind' edge of an invoke. This uses the
@@ -2701,34 +2748,38 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
/// cleanup.
///
class LandingPadInst : public Instruction {
- /// ReservedSpace - The number of operands actually allocated. NumOperands is
+ /// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
+
LandingPadInst(const LandingPadInst &LP);
public:
enum ClauseType { Catch, Filter };
private:
- void *operator new(size_t, unsigned) = delete;
+ explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, Instruction *InsertBefore);
+ explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
// Allocate space for exactly zero operands.
void *operator new(size_t s) {
return User::operator new(s);
}
+
void growOperands(unsigned Size);
void init(unsigned NumReservedValues, const Twine &NameStr);
- explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
- const Twine &NameStr, Instruction *InsertBefore);
- explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
- const Twine &NameStr, BasicBlock *InsertAtEnd);
-
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
LandingPadInst *cloneImpl() const;
public:
+ void *operator new(size_t, unsigned) = delete;
+
/// Constructors - NumReservedClauses is a hint for the number of incoming
/// clauses that this landingpad will have (use 0 if you really have no idea).
static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
@@ -2740,12 +2791,12 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// isCleanup - Return 'true' if this landingpad instruction is a
+ /// Return 'true' if this landingpad instruction is a
/// cleanup. I.e., it should be run when unwinding even if its landing pad
/// doesn't catch the exception.
bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
- /// setCleanup - Indicate that this landingpad instruction is a cleanup.
+ /// Indicate that this landingpad instruction is a cleanup.
void setCleanup(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
(V ? 1 : 0));
@@ -2760,20 +2811,20 @@ public:
return cast<Constant>(getOperandList()[Idx]);
}
- /// isCatch - Return 'true' if the clause and index Idx is a catch clause.
+ /// Return 'true' if the clause and index Idx is a catch clause.
bool isCatch(unsigned Idx) const {
return !isa<ArrayType>(getOperandList()[Idx]->getType());
}
- /// isFilter - Return 'true' if the clause and index Idx is a filter clause.
+ /// Return 'true' if the clause and index Idx is a filter clause.
bool isFilter(unsigned Idx) const {
return isa<ArrayType>(getOperandList()[Idx]->getType());
}
- /// getNumClauses - Get the number of clauses for this landing pad.
+ /// Get the number of clauses for this landing pad.
unsigned getNumClauses() const { return getNumOperands(); }
- /// reserveClauses - Grow the size of the operand list to accommodate the new
+ /// Grow the size of the operand list to accommodate the new
/// number of clauses.
void reserveClauses(unsigned Size) { growOperands(Size); }
@@ -2797,7 +2848,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
//===----------------------------------------------------------------------===//
//===---------------------------------------------------------------------------
-/// ReturnInst - Return a value (possibly void), from a function. Execution
+/// Return a value (possibly void), from a function. Execution
/// does not continue in this function any longer.
///
class ReturnInst : public TerminatorInst {
@@ -2823,21 +2874,25 @@ private:
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
ReturnInst *cloneImpl() const;
public:
+ ~ReturnInst() override;
+
static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
Instruction *InsertBefore = nullptr) {
return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
}
+
static ReturnInst* Create(LLVMContext &C, Value *retVal,
BasicBlock *InsertAtEnd) {
return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
}
+
static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
return new(0) ReturnInst(C, InsertAtEnd);
}
- ~ReturnInst() override;
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -2874,7 +2929,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
//===----------------------------------------------------------------------===//
//===---------------------------------------------------------------------------
-/// BranchInst - Conditional or Unconditional Branch instruction.
+/// Conditional or Unconditional Branch instruction.
///
class BranchInst : public TerminatorInst {
/// Ops list - Branches are strange. The operands are ordered:
@@ -2882,7 +2937,6 @@ class BranchInst : public TerminatorInst {
/// they don't have to check for cond/uncond branchness. These are mostly
/// accessed relative from op_end().
BranchInst(const BranchInst &BI);
- void AssertOK();
// BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
// BranchInst(BB *B) - 'br B'
// BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
@@ -2897,9 +2951,12 @@ class BranchInst : public TerminatorInst {
BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
BasicBlock *InsertAtEnd);
+ void AssertOK();
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
BranchInst *cloneImpl() const;
public:
@@ -2907,13 +2964,16 @@ public:
Instruction *InsertBefore = nullptr) {
return new(1) BranchInst(IfTrue, InsertBefore);
}
+
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
Value *Cond, Instruction *InsertBefore = nullptr) {
return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
}
+
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
return new(1) BranchInst(IfTrue, InsertAtEnd);
}
+
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
Value *Cond, BasicBlock *InsertAtEnd) {
return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
@@ -2947,7 +3007,7 @@ public:
*(&Op<-1>() - idx) = NewSucc;
}
- /// \brief Swap the successors of this branch instruction.
+ /// Swap the successors of this branch instruction.
///
/// Swaps the successors of the branch instruction. This also swaps any
/// branch weight metadata associated with the instruction so that it
@@ -2982,19 +3042,14 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
/// Multiway switch
///
class SwitchInst : public TerminatorInst {
- void *operator new(size_t, unsigned) = delete;
unsigned ReservedSpace;
+
// Operand[0] = Value to switch on
// Operand[1] = Default basic block destination
// Operand[2n ] = Value to match
// Operand[2n+1] = BasicBlock to go to on match
SwitchInst(const SwitchInst &SI);
- void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
- void growOperands();
- // allocate space for exactly zero operands
- void *operator new(size_t s) {
- return User::operator new(s);
- }
+
/// Create a new switch instruction, specifying a value to switch on and a
/// default destination. The number of additional cases can be specified here
/// to make memory allocation more efficient. This constructor can also
@@ -3009,12 +3064,23 @@ class SwitchInst : public TerminatorInst {
SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
BasicBlock *InsertAtEnd);
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+
+ void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
+ void growOperands();
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
SwitchInst *cloneImpl() const;
public:
+ void *operator new(size_t, unsigned) = delete;
+
// -2
static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
@@ -3111,7 +3177,6 @@ public:
ConstCaseIt;
class CaseIt : public CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> {
-
typedef CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> ParentTy;
public:
@@ -3135,6 +3200,7 @@ public:
Instruction *InsertBefore = nullptr) {
return new SwitchInst(Value, Default, NumCases, InsertBefore);
}
+
static SwitchInst *Create(Value *Value, BasicBlock *Default,
unsigned NumCases, BasicBlock *InsertAtEnd) {
return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
@@ -3166,6 +3232,7 @@ public:
CaseIt case_begin() {
return CaseIt(this, 0);
}
+
/// Returns a read-only iterator that points to the first case in the
/// SwitchInst.
ConstCaseIt case_begin() const {
@@ -3177,6 +3244,7 @@ public:
CaseIt case_end() {
return CaseIt(this, getNumCases());
}
+
/// Returns a read-only iterator that points one past the last in the
/// SwitchInst.
ConstCaseIt case_end() const {
@@ -3286,44 +3354,49 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
//===----------------------------------------------------------------------===//
//===---------------------------------------------------------------------------
-/// IndirectBrInst - Indirect Branch Instruction.
+/// Indirect Branch Instruction.
///
class IndirectBrInst : public TerminatorInst {
- void *operator new(size_t, unsigned) = delete;
unsigned ReservedSpace;
- // Operand[0] = Value to switch on
- // Operand[1] = Default basic block destination
- // Operand[2n ] = Value to match
- // Operand[2n+1] = BasicBlock to go to on match
+
+ // Operand[0] = Address to jump to
+ // Operand[n+1] = n-th destination
IndirectBrInst(const IndirectBrInst &IBI);
- void init(Value *Address, unsigned NumDests);
- void growOperands();
- // allocate space for exactly zero operands
- void *operator new(size_t s) {
- return User::operator new(s);
- }
- /// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an
+
+ /// Create a new indirectbr instruction, specifying an
/// Address to jump to. The number of expected destinations can be specified
/// here to make memory allocation more efficient. This constructor can also
/// autoinsert before another instruction.
IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
- /// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an
+ /// Create a new indirectbr instruction, specifying an
/// Address to jump to. The number of expected destinations can be specified
/// here to make memory allocation more efficient. This constructor also
/// autoinserts at the end of the specified BasicBlock.
IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+
+ void init(Value *Address, unsigned NumDests);
+ void growOperands();
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
IndirectBrInst *cloneImpl() const;
public:
+ void *operator new(size_t, unsigned) = delete;
+
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
Instruction *InsertBefore = nullptr) {
return new IndirectBrInst(Address, NumDests, InsertBefore);
}
+
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
BasicBlock *InsertAtEnd) {
return new IndirectBrInst(Address, NumDests, InsertAtEnd);
@@ -3337,19 +3410,19 @@ public:
const Value *getAddress() const { return getOperand(0); }
void setAddress(Value *V) { setOperand(0, V); }
- /// getNumDestinations - return the number of possible destinations in this
+ /// return the number of possible destinations in this
/// indirectbr instruction.
unsigned getNumDestinations() const { return getNumOperands()-1; }
- /// getDestination - Return the specified destination.
+ /// Return the specified destination.
BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
- /// addDestination - Add a destination.
+ /// Add a destination.
///
void addDestination(BasicBlock *Dest);
- /// removeDestination - This method removes the specified successor from the
+ /// This method removes the specified successor from the
/// indirectbr instruction.
void removeDestination(unsigned i);
@@ -3385,28 +3458,21 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
// InvokeInst Class
//===----------------------------------------------------------------------===//
-/// InvokeInst - Invoke instruction. The SubclassData field is used to hold the
+/// Invoke instruction. The SubclassData field is used to hold the
/// calling convention of the call.
///
class InvokeInst : public TerminatorInst,
public OperandBundleUser<InvokeInst, User::op_iterator> {
+ friend class OperandBundleUser<InvokeInst, User::op_iterator>;
+
AttributeSet AttributeList;
FunctionType *FTy;
+
InvokeInst(const InvokeInst &BI);
- void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
- const Twine &NameStr) {
- init(cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType()),
- Func, IfNormal, IfException, Args, Bundles, NameStr);
- }
- void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
- BasicBlock *IfException, ArrayRef<Value *> Args,
- ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
/// Construct an InvokeInst given a range of arguments.
///
- /// \brief Construct an InvokeInst from a range of arguments
+ /// Construct an InvokeInst from a range of arguments
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
unsigned Values, const Twine &NameStr,
@@ -3422,18 +3488,30 @@ class InvokeInst : public TerminatorInst,
const Twine &NameStr, Instruction *InsertBefore);
/// Construct an InvokeInst given a range of arguments.
///
- /// \brief Construct an InvokeInst from a range of arguments
+ /// Construct an InvokeInst from a range of arguments
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
unsigned Values, const Twine &NameStr,
BasicBlock *InsertAtEnd);
- friend class OperandBundleUser<InvokeInst, User::op_iterator>;
bool hasDescriptor() const { return HasDescriptor; }
+ void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+ ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr) {
+ init(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, Bundles, NameStr);
+ }
+
+ void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
InvokeInst *cloneImpl() const;
public:
@@ -3446,6 +3524,7 @@ public:
Func, IfNormal, IfException, Args, None, NameStr,
InsertBefore);
}
+
static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
@@ -3456,6 +3535,7 @@ public:
Func, IfNormal, IfException, Args, Bundles, NameStr,
InsertBefore);
}
+
static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
const Twine &NameStr,
@@ -3464,6 +3544,7 @@ public:
return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, None,
Values, NameStr, InsertBefore);
}
+
static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
@@ -3476,6 +3557,7 @@ public:
InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, Values,
NameStr, InsertBefore);
}
+
static InvokeInst *Create(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, const Twine &NameStr,
@@ -3496,7 +3578,7 @@ public:
InsertAtEnd);
}
- /// \brief Create a clone of \p II with a different set of operand bundles and
+ /// Create a clone of \p II with a different set of operand bundles and
/// insert it before \p InsertPt.
///
/// The returned invoke instruction is identical to \p II in every way except
@@ -3515,7 +3597,7 @@ public:
this->FTy = FTy;
}
- /// getNumArgOperands - Return the number of invoke arguments.
+ /// Return the number of invoke arguments.
///
unsigned getNumArgOperands() const {
return getNumOperands() - getNumTotalBundleOperands() - 3;
@@ -3532,35 +3614,35 @@ public:
setOperand(i, v);
}
- /// \brief Return the iterator pointing to the beginning of the argument list.
+ /// Return the iterator pointing to the beginning of the argument list.
op_iterator arg_begin() { return op_begin(); }
- /// \brief Return the iterator pointing to the end of the argument list.
+ /// Return the iterator pointing to the end of the argument list.
op_iterator arg_end() {
// [ invoke args ], [ operand bundles ], normal dest, unwind dest, callee
return op_end() - getNumTotalBundleOperands() - 3;
- };
+ }
- /// \brief Iteration adapter for range-for loops.
+ /// Iteration adapter for range-for loops.
iterator_range<op_iterator> arg_operands() {
return make_range(arg_begin(), arg_end());
}
- /// \brief Return the iterator pointing to the beginning of the argument list.
+ /// Return the iterator pointing to the beginning of the argument list.
const_op_iterator arg_begin() const { return op_begin(); }
- /// \brief Return the iterator pointing to the end of the argument list.
+ /// Return the iterator pointing to the end of the argument list.
const_op_iterator arg_end() const {
// [ invoke args ], [ operand bundles ], normal dest, unwind dest, callee
return op_end() - getNumTotalBundleOperands() - 3;
- };
+ }
- /// \brief Iteration adapter for range-for loops.
+ /// Iteration adapter for range-for loops.
iterator_range<const_op_iterator> arg_operands() const {
return make_range(arg_begin(), arg_end());
}
- /// \brief Wrappers for getting the \c Use of a invoke argument.
+ /// Wrappers for getting the \c Use of a invoke argument.
const Use &getArgOperandUse(unsigned i) const {
assert(i < getNumArgOperands() && "Out of bounds!");
return getOperandUse(i);
@@ -3585,58 +3667,59 @@ public:
setInstructionSubclassData(ID);
}
- /// getAttributes - Return the parameter attributes for this invoke.
+ /// Return the parameter attributes for this invoke.
///
- const AttributeSet &getAttributes() const { return AttributeList; }
+ AttributeSet getAttributes() const { return AttributeList; }
- /// setAttributes - Set the parameter attributes for this invoke.
+ /// Set the parameter attributes for this invoke.
///
- void setAttributes(const AttributeSet &Attrs) { AttributeList = Attrs; }
+ void setAttributes(AttributeSet Attrs) { AttributeList = Attrs; }
- /// addAttribute - adds the attribute to the list of attributes.
+ /// adds the attribute to the list of attributes.
void addAttribute(unsigned i, Attribute::AttrKind Kind);
- /// addAttribute - adds the attribute to the list of attributes.
+ /// adds the attribute to the list of attributes.
void addAttribute(unsigned i, Attribute Attr);
- /// removeAttribute - removes the attribute from the list of attributes.
+ /// removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attribute::AttrKind Kind);
- /// removeAttribute - removes the attribute from the list of attributes.
+ /// removes the attribute from the list of attributes.
void removeAttribute(unsigned i, StringRef Kind);
- /// removeAttribute - removes the attribute from the list of attributes.
- void removeAttribute(unsigned i, Attribute Attr);
-
- /// \brief adds the dereferenceable attribute to the list of attributes.
+ /// adds the dereferenceable attribute to the list of attributes.
void addDereferenceableAttr(unsigned i, uint64_t Bytes);
- /// \brief adds the dereferenceable_or_null attribute to the list of
+ /// adds the dereferenceable_or_null attribute to the list of
/// attributes.
void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
- /// \brief Determine whether this call has the given attribute.
+ /// Determine whether this call has the given attribute.
bool hasFnAttr(Attribute::AttrKind Kind) const {
assert(Kind != Attribute::NoBuiltin &&
"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin");
return hasFnAttrImpl(Kind);
}
- /// \brief Determine whether this call has the given attribute.
+ /// Determine whether this call has the given attribute.
bool hasFnAttr(StringRef Kind) const {
return hasFnAttrImpl(Kind);
}
- /// \brief Determine whether the call or the callee has the given attributes.
+ /// Determine whether the call or the callee has the given attributes.
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const;
- /// \brief Get the attribute of a given kind at a position.
- Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const;
+ /// Get the attribute of a given kind at a position.
+ Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
+ return getAttributes().getAttribute(i, Kind);
+ }
- /// \brief Get the attribute of a given kind at a position.
- Attribute getAttribute(unsigned i, StringRef Kind) const;
+ /// Get the attribute of a given kind at a position.
+ Attribute getAttribute(unsigned i, StringRef Kind) const {
+ return getAttributes().getAttribute(i, Kind);
+ }
- /// \brief Return true if the data operand at index \p i has the attribute \p
+ /// Return true if the data operand at index \p i has the attribute \p
/// A.
///
/// Data operands include invoke arguments and values used in operand bundles,
@@ -3652,18 +3735,18 @@ public:
/// (\p i - 1) in the operand list.
bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
- /// \brief Extract the alignment for a call or parameter (0=unknown).
+ /// Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
- /// \brief Extract the number of dereferenceable bytes for a call or
+ /// Extract the number of dereferenceable bytes for a call or
/// parameter (0=unknown).
uint64_t getDereferenceableBytes(unsigned i) const {
return AttributeList.getDereferenceableBytes(i);
}
- /// \brief Extract the number of dereferenceable_or_null bytes for a call or
+ /// Extract the number of dereferenceable_or_null bytes for a call or
/// parameter (0=unknown).
uint64_t getDereferenceableOrNullBytes(unsigned i) const {
return AttributeList.getDereferenceableOrNullBytes(i);
@@ -3676,7 +3759,7 @@ public:
return AttributeList.hasAttribute(n, Attribute::NoAlias);
}
- /// \brief Return true if the call should not be treated as a call to a
+ /// Return true if the call should not be treated as a call to a
/// builtin.
bool isNoBuiltin() const {
// We assert in hasFnAttr if one passes in Attribute::NoBuiltin, so we have
@@ -3685,13 +3768,13 @@ public:
!hasFnAttrImpl(Attribute::Builtin);
}
- /// \brief Return true if the call should not be inlined.
+ /// Return true if the call should not be inlined.
bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
void setIsNoInline() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoInline);
}
- /// \brief Determine if the call does not access memory.
+ /// Determine if the call does not access memory.
bool doesNotAccessMemory() const {
return hasFnAttr(Attribute::ReadNone);
}
@@ -3699,7 +3782,7 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
}
- /// \brief Determine if the call does not access or only reads memory.
+ /// Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
}
@@ -3707,7 +3790,7 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
}
- /// \brief Determine if the call does not access or only writes memory.
+ /// Determine if the call does not access or only writes memory.
bool doesNotReadMemory() const {
return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
}
@@ -3724,35 +3807,34 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly);
}
- /// \brief Determine if the call cannot return.
+ /// Determine if the call cannot return.
bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
void setDoesNotReturn() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn);
}
- /// \brief Determine if the call cannot unwind.
+ /// Determine if the call cannot unwind.
bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
void setDoesNotThrow() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
}
- /// \brief Determine if the invoke cannot be duplicated.
+ /// Determine if the invoke cannot be duplicated.
bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
void setCannotDuplicate() {
addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate);
}
- /// \brief Determine if the invoke is convergent
+ /// Determine if the invoke is convergent
bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
void setConvergent() {
addAttribute(AttributeSet::FunctionIndex, Attribute::Convergent);
}
void setNotConvergent() {
- removeAttribute(AttributeSet::FunctionIndex,
- Attribute::get(getContext(), Attribute::Convergent));
+ removeAttribute(AttributeSet::FunctionIndex, Attribute::Convergent);
}
- /// \brief Determine if the call returns a structure through first
+ /// Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
if (getNumArgOperands() == 0)
@@ -3762,24 +3844,24 @@ public:
return paramHasAttr(1, Attribute::StructRet);
}
- /// \brief Determine if any call argument is an aggregate passed by value.
+ /// Determine if any call argument is an aggregate passed by value.
bool hasByValArgument() const {
return AttributeList.hasAttrSomewhere(Attribute::ByVal);
}
- /// getCalledFunction - Return the function called, or null if this is an
+ /// Return the function called, or null if this is an
/// indirect function invocation.
///
Function *getCalledFunction() const {
return dyn_cast<Function>(Op<-3>());
}
- /// getCalledValue - Get a pointer to the function that is invoked by this
+ /// Get a pointer to the function that is invoked by this
/// instruction
const Value *getCalledValue() const { return Op<-3>(); }
Value *getCalledValue() { return Op<-3>(); }
- /// setCalledFunction - Set the function called.
+ /// Set the function called.
void setCalledFunction(Value* Fn) {
setCalledFunction(
cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
@@ -3806,7 +3888,7 @@ public:
Op<-1>() = reinterpret_cast<Value*>(B);
}
- /// getLandingPadInst - Get the landingpad instruction from the landing pad
+ /// Get the landingpad instruction from the landing pad
/// block (the unwind destination).
LandingPadInst *getLandingPadInst() const;
@@ -3835,17 +3917,17 @@ private:
unsigned getNumSuccessorsV() const override;
void setSuccessorV(unsigned idx, BasicBlock *B) override;
- template <typename AttrKind> bool hasFnAttrImpl(AttrKind A) const {
- if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A))
+ template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
+ if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, Kind))
return true;
// Operand bundles override attributes on the called function, but don't
// override attributes directly present on the invoke instruction.
- if (isFnAttrDisallowedByOpBundle(A))
+ if (isFnAttrDisallowedByOpBundle(Kind))
return false;
if (const Function *F = getCalledFunction())
- return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A);
+ return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Kind);
return false;
}
@@ -3869,6 +3951,7 @@ InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
InsertBefore) {
init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
}
+
InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
BasicBlock *IfException, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, unsigned Values,
@@ -3888,7 +3971,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value)
//===----------------------------------------------------------------------===//
//===---------------------------------------------------------------------------
-/// ResumeInst - Resume the propagation of an exception.
+/// Resume the propagation of an exception.
///
class ResumeInst : public TerminatorInst {
ResumeInst(const ResumeInst &RI);
@@ -3899,12 +3982,14 @@ class ResumeInst : public TerminatorInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
ResumeInst *cloneImpl() const;
public:
static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
return new(1) ResumeInst(Exn, InsertBefore);
}
+
static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
return new(1) ResumeInst(Exn, InsertAtEnd);
}
@@ -3942,19 +4027,16 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
// CatchSwitchInst Class
//===----------------------------------------------------------------------===//
class CatchSwitchInst : public TerminatorInst {
- void *operator new(size_t, unsigned) = delete;
- /// ReservedSpace - The number of operands actually allocated. NumOperands is
+ /// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
+
// Operand[0] = Outer scope
// Operand[1] = Unwind block destination
// Operand[n] = BasicBlock to go to on match
CatchSwitchInst(const CatchSwitchInst &CSI);
- void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
- void growOperands(unsigned Size);
- // allocate space for exactly zero operands
- void *operator new(size_t s) { return User::operator new(s); }
- /// CatchSwitchInst ctor - Create a new switch instruction, specifying a
+
+ /// Create a new switch instruction, specifying a
/// default destination. The number of additional handlers can be specified
/// here to make memory allocation more efficient.
/// This constructor can also autoinsert before another instruction.
@@ -3962,7 +4044,7 @@ class CatchSwitchInst : public TerminatorInst {
unsigned NumHandlers, const Twine &NameStr,
Instruction *InsertBefore);
- /// CatchSwitchInst ctor - Create a new switch instruction, specifying a
+ /// Create a new switch instruction, specifying a
/// default destination. The number of additional handlers can be specified
/// here to make memory allocation more efficient.
/// This constructor also autoinserts at the end of the specified BasicBlock.
@@ -3970,12 +4052,21 @@ class CatchSwitchInst : public TerminatorInst {
unsigned NumHandlers, const Twine &NameStr,
BasicBlock *InsertAtEnd);
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) { return User::operator new(s); }
+
+ void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
+ void growOperands(unsigned Size);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
CatchSwitchInst *cloneImpl() const;
public:
+ void *operator new(size_t, unsigned) = delete;
+
static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumHandlers,
const Twine &NameStr = "",
@@ -3983,6 +4074,7 @@ public:
return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
InsertBefore);
}
+
static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumHandlers, const Twine &NameStr,
BasicBlock *InsertAtEnd) {
@@ -4011,7 +4103,7 @@ public:
setOperand(1, UnwindDest);
}
- /// getNumHandlers - return the number of 'handlers' in this catchswitch
+ /// return the number of 'handlers' in this catchswitch
/// instruction, except the default handler
unsigned getNumHandlers() const {
if (hasUnwindDest())
@@ -4029,8 +4121,6 @@ public:
typedef std::pointer_to_unary_function<Value *, BasicBlock *> DerefFnTy;
typedef mapped_iterator<op_iterator, DerefFnTy> handler_iterator;
typedef iterator_range<handler_iterator> handler_range;
-
-
typedef std::pointer_to_unary_function<const Value *, const BasicBlock *>
ConstDerefFnTy;
typedef mapped_iterator<const_op_iterator, ConstDerefFnTy> const_handler_iterator;
@@ -4043,6 +4133,7 @@ public:
++It;
return handler_iterator(It, DerefFnTy(handler_helper));
}
+
/// Returns an iterator that points to the first handler in the
/// CatchSwitchInst.
const_handler_iterator handler_begin() const {
@@ -4057,23 +4148,24 @@ public:
handler_iterator handler_end() {
return handler_iterator(op_end(), DerefFnTy(handler_helper));
}
+
/// Returns an iterator that points one past the last handler in the
/// CatchSwitchInst.
const_handler_iterator handler_end() const {
return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
}
- /// handlers - iteration adapter for range-for loops.
+ /// iteration adapter for range-for loops.
handler_range handlers() {
return make_range(handler_begin(), handler_end());
}
- /// handlers - iteration adapter for range-for loops.
+ /// iteration adapter for range-for loops.
const_handler_range handlers() const {
return make_range(handler_begin(), handler_end());
}
- /// addHandler - Add an entry to the switch instruction...
+ /// Add an entry to the switch instruction...
/// Note:
/// This action invalidates handler_end(). Old handler_end() iterator will
/// point to the added handler.
@@ -4136,6 +4228,7 @@ public:
return new (Values)
CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
}
+
static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
unsigned Values = 1 + Args.size();
@@ -4143,7 +4236,7 @@ public:
CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
}
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::CleanupPad;
}
@@ -4176,6 +4269,7 @@ public:
return new (Values)
CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
}
+
static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
unsigned Values = 1 + Args.size();
@@ -4192,7 +4286,7 @@ public:
Op<-1>() = CatchSwitch;
}
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::CatchPad;
}
@@ -4207,14 +4301,15 @@ public:
class CatchReturnInst : public TerminatorInst {
CatchReturnInst(const CatchReturnInst &RI);
-
- void init(Value *CatchPad, BasicBlock *BB);
CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
+ void init(Value *CatchPad, BasicBlock *BB);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
CatchReturnInst *cloneImpl() const;
public:
@@ -4224,6 +4319,7 @@ public:
assert(BB);
return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
}
+
static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
BasicBlock *InsertAtEnd) {
assert(CatchPad);
@@ -4281,16 +4377,17 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
class CleanupReturnInst : public TerminatorInst {
private:
CleanupReturnInst(const CleanupReturnInst &RI);
-
- void init(Value *CleanupPad, BasicBlock *UnwindBB);
CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
Instruction *InsertBefore = nullptr);
CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
BasicBlock *InsertAtEnd);
+ void init(Value *CleanupPad, BasicBlock *UnwindBB);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
CleanupReturnInst *cloneImpl() const;
public:
@@ -4304,6 +4401,7 @@ public:
return new (Values)
CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
}
+
static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
BasicBlock *InsertAtEnd) {
assert(CleanupPad);
@@ -4371,25 +4469,27 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
//===----------------------------------------------------------------------===//
//===---------------------------------------------------------------------------
-/// UnreachableInst - This function has undefined behavior. In particular, the
+/// This function has undefined behavior. In particular, the
/// presence of this instruction indicates some higher level knowledge that the
/// end of the block cannot be reached.
///
class UnreachableInst : public TerminatorInst {
- void *operator new(size_t, unsigned) = delete;
-
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
UnreachableInst *cloneImpl() const;
public:
+ explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
+ explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
+
// allocate space for exactly zero operands
void *operator new(size_t s) {
return User::operator new(s, 0);
}
- explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
- explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
+
+ void *operator new(size_t, unsigned) = delete;
unsigned getNumSuccessors() const { return 0; }
@@ -4411,16 +4511,17 @@ private:
// TruncInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a truncation of integer types.
+/// This class represents a truncation of integer types.
class TruncInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical TruncInst
+
+ /// Clone an identical TruncInst
TruncInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
TruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The (smaller) type to truncate to
@@ -4428,7 +4529,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
TruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The (smaller) type to truncate to
@@ -4436,7 +4537,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Trunc;
}
@@ -4449,16 +4550,17 @@ public:
// ZExtInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents zero extension of integer types.
+/// This class represents zero extension of integer types.
class ZExtInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical ZExtInst
+
+ /// Clone an identical ZExtInst
ZExtInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
ZExtInst(
Value *S, ///< The value to be zero extended
Type *Ty, ///< The type to zero extend to
@@ -4466,7 +4568,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end semantics.
+ /// Constructor with insert-at-end semantics.
ZExtInst(
Value *S, ///< The value to be zero extended
Type *Ty, ///< The type to zero extend to
@@ -4474,7 +4576,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == ZExt;
}
@@ -4487,16 +4589,17 @@ public:
// SExtInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a sign extension of integer types.
+/// This class represents a sign extension of integer types.
class SExtInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical SExtInst
+
+ /// Clone an identical SExtInst
SExtInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
SExtInst(
Value *S, ///< The value to be sign extended
Type *Ty, ///< The type to sign extend to
@@ -4504,7 +4607,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
SExtInst(
Value *S, ///< The value to be sign extended
Type *Ty, ///< The type to sign extend to
@@ -4512,7 +4615,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == SExt;
}
@@ -4525,16 +4628,17 @@ public:
// FPTruncInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a truncation of floating point types.
+/// This class represents a truncation of floating point types.
class FPTruncInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical FPTruncInst
+
+ /// Clone an identical FPTruncInst
FPTruncInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The type to truncate to
@@ -4542,7 +4646,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The type to truncate to
@@ -4550,7 +4654,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPTrunc;
}
@@ -4563,16 +4667,17 @@ public:
// FPExtInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents an extension of floating point types.
+/// This class represents an extension of floating point types.
class FPExtInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical FPExtInst
+
+ /// Clone an identical FPExtInst
FPExtInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
FPExtInst(
Value *S, ///< The value to be extended
Type *Ty, ///< The type to extend to
@@ -4580,7 +4685,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
FPExtInst(
Value *S, ///< The value to be extended
Type *Ty, ///< The type to extend to
@@ -4588,7 +4693,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPExt;
}
@@ -4601,16 +4706,17 @@ public:
// UIToFPInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a cast unsigned integer to floating point.
+/// This class represents a cast unsigned integer to floating point.
class UIToFPInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical UIToFPInst
+
+ /// Clone an identical UIToFPInst
UIToFPInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
UIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4618,7 +4724,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
UIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4626,7 +4732,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == UIToFP;
}
@@ -4639,16 +4745,17 @@ public:
// SIToFPInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a cast from signed integer to floating point.
+/// This class represents a cast from signed integer to floating point.
class SIToFPInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical SIToFPInst
+
+ /// Clone an identical SIToFPInst
SIToFPInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
SIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4656,7 +4763,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
SIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4664,7 +4771,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == SIToFP;
}
@@ -4677,16 +4784,17 @@ public:
// FPToUIInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a cast from floating point to unsigned integer
+/// This class represents a cast from floating point to unsigned integer
class FPToUIInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical FPToUIInst
+
+ /// Clone an identical FPToUIInst
FPToUIInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
FPToUIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4694,7 +4802,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
FPToUIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4702,7 +4810,7 @@ public:
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPToUI;
}
@@ -4715,16 +4823,17 @@ public:
// FPToSIInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a cast from floating point to signed integer.
+/// This class represents a cast from floating point to signed integer.
class FPToSIInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical FPToSIInst
+
+ /// Clone an identical FPToSIInst
FPToSIInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
FPToSIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4732,7 +4841,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
FPToSIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4740,7 +4849,7 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPToSI;
}
@@ -4753,10 +4862,13 @@ public:
// IntToPtrInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a cast from an integer to a pointer.
+/// This class represents a cast from an integer to a pointer.
class IntToPtrInst : public CastInst {
public:
- /// \brief Constructor with insert-before-instruction semantics
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+
+ /// Constructor with insert-before-instruction semantics
IntToPtrInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4764,7 +4876,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
IntToPtrInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4772,12 +4884,10 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- // Note: Instruction needs to be a friend here to call cloneImpl.
- friend class Instruction;
- /// \brief Clone an identical IntToPtrInst
+ /// Clone an identical IntToPtrInst.
IntToPtrInst *cloneImpl() const;
- /// \brief Returns the address space of this instruction's pointer type.
+ /// Returns the address space of this instruction's pointer type.
unsigned getAddressSpace() const {
return getType()->getPointerAddressSpace();
}
@@ -4795,16 +4905,17 @@ public:
// PtrToIntInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a cast from a pointer to an integer
+/// This class represents a cast from a pointer to an integer.
class PtrToIntInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical PtrToIntInst
+
+ /// Clone an identical PtrToIntInst.
PtrToIntInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
PtrToIntInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4812,7 +4923,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
PtrToIntInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -4820,14 +4931,14 @@ public:
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// \brief Gets the pointer operand.
+ /// Gets the pointer operand.
Value *getPointerOperand() { return getOperand(0); }
- /// \brief Gets the pointer operand.
+ /// Gets the pointer operand.
const Value *getPointerOperand() const { return getOperand(0); }
- /// \brief Gets the operand index of the pointer operand.
+ /// Gets the operand index of the pointer operand.
static unsigned getPointerOperandIndex() { return 0U; }
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
@@ -4845,16 +4956,17 @@ public:
// BitCastInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a no-op cast from one type to another.
+/// This class represents a no-op cast from one type to another.
class BitCastInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical BitCastInst
+
+ /// Clone an identical BitCastInst.
BitCastInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
BitCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -4862,7 +4974,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
BitCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -4883,17 +4995,18 @@ public:
// AddrSpaceCastInst Class
//===----------------------------------------------------------------------===//
-/// \brief This class represents a conversion between pointers from
-/// one address space to another.
+/// This class represents a conversion between pointers from one address space
+/// to another.
class AddrSpaceCastInst : public CastInst {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
- /// \brief Clone an identical AddrSpaceCastInst
+
+ /// Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst *cloneImpl() const;
public:
- /// \brief Constructor with insert-before-instruction semantics
+ /// Constructor with insert-before-instruction semantics
AddrSpaceCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -4901,7 +5014,7 @@ public:
Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
- /// \brief Constructor with insert-at-end-of-block semantics
+ /// Constructor with insert-at-end-of-block semantics
AddrSpaceCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -4917,32 +5030,32 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
- /// \brief Gets the pointer operand.
+ /// Gets the pointer operand.
Value *getPointerOperand() {
return getOperand(0);
}
- /// \brief Gets the pointer operand.
+ /// Gets the pointer operand.
const Value *getPointerOperand() const {
return getOperand(0);
}
- /// \brief Gets the operand index of the pointer operand.
+ /// Gets the operand index of the pointer operand.
static unsigned getPointerOperandIndex() {
return 0U;
}
- /// \brief Returns the address space of the pointer operand.
+ /// Returns the address space of the pointer operand.
unsigned getSrcAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
- /// \brief Returns the address space of the result.
+ /// Returns the address space of the result.
unsigned getDestAddressSpace() const {
return getType()->getPointerAddressSpace();
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_INSTRUCTIONS_H
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 52044e0a0cc8..b14a54503e52 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -25,20 +25,28 @@
#define LLVM_IR_INTRINSICINST_H
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstdint>
namespace llvm {
+
/// A wrapper class for inspecting calls to intrinsic functions.
/// This allows the standard isa/dyncast/cast functionality to work with calls
/// to intrinsic functions.
class IntrinsicInst : public CallInst {
- IntrinsicInst() = delete;
- IntrinsicInst(const IntrinsicInst&) = delete;
- void operator=(const IntrinsicInst&) = delete;
public:
+ IntrinsicInst() = delete;
+ IntrinsicInst(const IntrinsicInst &) = delete;
+ IntrinsicInst &operator=(const IntrinsicInst &) = delete;
+
/// Return the intrinsic ID of this intrinsic.
Intrinsic::ID getIntrinsicID() const {
return getCalledFunction()->getIntrinsicID();
@@ -81,9 +89,11 @@ namespace llvm {
class DbgDeclareInst : public DbgInfoIntrinsic {
public:
Value *getAddress() const { return getVariableLocation(); }
+
DILocalVariable *getVariable() const {
return cast<DILocalVariable>(getRawVariable());
}
+
DIExpression *getExpression() const {
return cast<DIExpression>(getRawExpression());
}
@@ -91,6 +101,7 @@ namespace llvm {
Metadata *getRawVariable() const {
return cast<MetadataAsValue>(getArgOperand(1))->getMetadata();
}
+
Metadata *getRawExpression() const {
return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
}
@@ -110,13 +121,16 @@ namespace llvm {
Value *getValue() const {
return getVariableLocation(/* AllowNullOp = */ false);
}
+
uint64_t getOffset() const {
return cast<ConstantInt>(
const_cast<Value*>(getArgOperand(1)))->getZExtValue();
}
+
DILocalVariable *getVariable() const {
return cast<DILocalVariable>(getRawVariable());
}
+
DIExpression *getExpression() const {
return cast<DIExpression>(getRawExpression());
}
@@ -124,6 +138,7 @@ namespace llvm {
Metadata *getRawVariable() const {
return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
}
+
Metadata *getRawExpression() const {
return cast<MetadataAsValue>(getArgOperand(3))->getMetadata();
}
@@ -159,6 +174,7 @@ namespace llvm {
ConstantInt *getVolatileCst() const {
return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4)));
}
+
bool isVolatile() const {
return !getVolatileCst()->isZero();
}
@@ -268,7 +284,6 @@ namespace llvm {
}
};
-
/// This class wraps the llvm.memcpy intrinsic.
class MemCpyInst : public MemTransferInst {
public:
@@ -359,6 +374,18 @@ namespace llvm {
ConstantInt *getIndex() const {
return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
}
+
+ Value *getStep() const;
+ };
+
+ class InstrProfIncrementInstStep : public InstrProfIncrementInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_increment_step;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
};
/// This represents the llvm.instrprof_value_profile intrinsic.
@@ -393,6 +420,7 @@ namespace llvm {
return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4)));
}
};
-} // namespace llvm
-#endif
+} // end namespace llvm
+
+#endif // LLVM_IR_INTRINSICINST_H
diff --git a/include/llvm/IR/Intrinsics.h b/include/llvm/IR/Intrinsics.h
index 7a87c2167710..d07358445dab 100644
--- a/include/llvm/IR/Intrinsics.h
+++ b/include/llvm/IR/Intrinsics.h
@@ -45,7 +45,16 @@ namespace Intrinsic {
};
/// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
- std::string getName(ID id, ArrayRef<Type*> Tys = None);
+ /// Note, this version is for intrinsics with no overloads. Use the other
+ /// version of getName if overloads are required.
+ StringRef getName(ID id);
+
+ /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
+ /// Note, this version of getName supports overloads, but is less efficient
+ /// than the StringRef version of this function. If no overloads are
+ /// requried, it is safe to use this version, but better to use the StringRef
+ /// version.
+ std::string getName(ID id, ArrayRef<Type*> Tys);
/// Return the function type for an intrinsic.
FunctionType *getType(LLVMContext &Context, ID id,
@@ -79,10 +88,10 @@ namespace Intrinsic {
StringRef Name);
/// Map a GCC builtin name to an intrinsic ID.
- ID getIntrinsicForGCCBuiltin(const char *Prefix, const char *BuiltinName);
+ ID getIntrinsicForGCCBuiltin(const char *Prefix, StringRef BuiltinName);
/// Map a MS builtin name to an intrinsic ID.
- ID getIntrinsicForMSBuiltin(const char *Prefix, const char *BuiltinName);
+ ID getIntrinsicForMSBuiltin(const char *Prefix, StringRef BuiltinName);
/// This is a type descriptor which explains the type requirements of an
/// intrinsic. This is returned by getIntrinsicInfoTableEntries.
@@ -91,7 +100,7 @@ namespace Intrinsic {
Void, VarArg, MMX, Token, Metadata, Half, Float, Double,
Integer, Vector, Pointer, Struct,
Argument, ExtendArgument, TruncArgument, HalfVecArgument,
- SameVecWidthArgument, PtrToArgument, VecOfPtrsToElt
+ SameVecWidthArgument, PtrToArgument, PtrToElt, VecOfPtrsToElt
} Kind;
union {
@@ -114,7 +123,7 @@ namespace Intrinsic {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument || Kind == PtrToArgument ||
- Kind == VecOfPtrsToElt);
+ Kind == PtrToElt || Kind == VecOfPtrsToElt);
return Argument_Info >> 3;
}
ArgKind getArgumentKind() const {
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index e9264ec59b55..33fd17f20dbe 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -43,6 +43,15 @@ def IntrWriteMem : IntrinsicProperty;
// reads from and (possibly volatile) writes to memory, it has no side effects.
def IntrArgMemOnly : IntrinsicProperty;
+// IntrInaccessibleMemOnly -- This intrinsic only accesses memory that is not
+// accessible by the module being compiled. This is a weaker form of IntrNoMem.
+def IntrInaccessibleMemOnly : IntrinsicProperty;
+
+// IntrInaccessibleMemOrArgMemOnly -- This intrinsic only accesses memory that
+// its pointer-typed arguments point to or memory that is not accessible
+// by the module being compiled. This is a weaker form of IntrArgMemOnly.
+def IntrInaccessibleMemOrArgMemOnly : IntrinsicProperty;
+
// Commutative - This intrinsic is commutative: X op Y == Y op X.
def Commutative : IntrinsicProperty;
@@ -133,6 +142,7 @@ class LLVMVectorSameWidth<int num, LLVMType elty>
ValueType ElTy = elty.VT;
}
class LLVMPointerTo<int num> : LLVMMatchType<num>;
+class LLVMPointerToElt<int num> : LLVMMatchType<num>;
class LLVMVectorOfPointersToElt<int num> : LLVMMatchType<num>;
// Match the type of another intrinsic parameter that is expected to be a
@@ -290,6 +300,7 @@ def int_gcwrite : Intrinsic<[],
//===--------------------- Code Generator Intrinsics ----------------------===//
//
def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
[IntrReadMem], "llvm.read_register">;
@@ -346,6 +357,12 @@ def int_instrprof_increment : Intrinsic<[],
llvm_i32_ty, llvm_i32_ty],
[]>;
+// A counter increment with step for instrumentation based profiling.
+def int_instrprof_increment_step : Intrinsic<[],
+ [llvm_ptr_ty, llvm_i64_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ []>;
+
// A call to profile runtime for value profiling of target expressions
// through instrumentation based profiling.
def int_instrprof_value_profile : Intrinsic<[],
@@ -554,11 +571,11 @@ def int_lifetime_end : Intrinsic<[],
[llvm_i64_ty, llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<1>]>;
def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
- [llvm_i64_ty, llvm_ptr_ty],
+ [llvm_i64_ty, llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<1>]>;
def int_invariant_end : Intrinsic<[],
[llvm_descriptor_ty, llvm_i64_ty,
- llvm_ptr_ty],
+ llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<2>]>;
def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty],
@@ -597,7 +614,50 @@ def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty],
[llvm_token_ty, llvm_i32_ty, llvm_i32_ty],
[IntrReadMem]>;
-//===-------------------------- Other Intrinsics --------------------------===//
+//===------------------------ Coroutine Intrinsics ---------------===//
+// These are documented in docs/Coroutines.rst
+
+// Coroutine Structure Intrinsics.
+
+def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty,
+ llvm_ptr_ty, llvm_ptr_ty],
+ [IntrArgMemOnly, IntrReadMem,
+ ReadNone<1>, ReadOnly<2>, NoCapture<2>]>;
+def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>;
+def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
+ [WriteOnly<1>]>;
+
+def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
+ [IntrReadMem, IntrArgMemOnly, ReadOnly<1>,
+ NoCapture<1>]>;
+def int_coro_end : Intrinsic<[], [llvm_ptr_ty, llvm_i1_ty], []>;
+
+def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
+
+def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>;
+def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
+
+def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty],
+ [IntrNoMem, ReadNone<0>, ReadNone<1>]>;
+
+// Coroutine Manipulation Intrinsics.
+
+def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
+def int_coro_destroy : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
+def int_coro_done : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+ [IntrArgMemOnly, ReadOnly<0>, NoCapture<0>]>;
+def int_coro_promise : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i1_ty],
+ [IntrNoMem, NoCapture<0>]>;
+
+// Coroutine Lowering Intrinsics. Used internally by coroutine passes.
+
+def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty],
+ [IntrReadMem, IntrArgMemOnly, ReadOnly<0>,
+ NoCapture<0>]>;
+
+///===-------------------------- Other Intrinsics --------------------------===//
//
def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
GCCBuiltin<"__builtin_flt_rounds">;
@@ -668,13 +728,25 @@ def int_masked_gather: Intrinsic<[llvm_anyvector_ty],
[LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
LLVMVectorSameWidth<0, llvm_i1_ty>,
LLVMMatchType<0>],
- [IntrReadMem]>;
+ [IntrReadMem]>;
def int_masked_scatter: Intrinsic<[],
[llvm_anyvector_ty,
LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
LLVMVectorSameWidth<0, llvm_i1_ty>]>;
+def int_masked_expandload: Intrinsic<[llvm_anyvector_ty],
+ [LLVMPointerToElt<0>,
+ LLVMVectorSameWidth<0, llvm_i1_ty>,
+ LLVMMatchType<0>],
+ [IntrReadMem]>;
+
+def int_masked_compressstore: Intrinsic<[],
+ [llvm_anyvector_ty,
+ LLVMPointerToElt<0>,
+ LLVMVectorSameWidth<0, llvm_i1_ty>],
+ [IntrArgMemOnly]>;
+
// Test whether a pointer is associated with a type metadata identifier.
def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
[IntrNoMem]>;
@@ -687,6 +759,15 @@ def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
+//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
+//
+
+def int_memcpy_element_atomic : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyptr_ty,
+ llvm_i64_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+ WriteOnly<0>, ReadOnly<1>]>;
+
//===----------------------------------------------------------------------===//
// Target-specific intrinsics
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsAMDGPU.td b/include/llvm/IR/IntrinsicsAMDGPU.td
index 9bf2a4dd5a1d..078959ce15d0 100644
--- a/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -41,8 +41,6 @@ defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
-def int_r600_read_workdim : AMDGPUReadPreloadRegisterIntrinsic;
-
def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
Intrinsic<[], [], [IntrConvergent]>;
@@ -70,13 +68,48 @@ def int_r600_recipsqrt_clamped : Intrinsic<
let TargetPrefix = "amdgcn" in {
+//===----------------------------------------------------------------------===//
+// ABI Special Intrinsics
+//===----------------------------------------------------------------------===//
+
defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
<"__builtin_amdgcn_workgroup_id">;
+def int_amdgcn_dispatch_ptr :
+ GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+
+def int_amdgcn_queue_ptr :
+ GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+
+def int_amdgcn_kernarg_segment_ptr :
+ GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+
+def int_amdgcn_implicitarg_ptr :
+ GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+
+def int_amdgcn_groupstaticsize :
+ GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+
+def int_amdgcn_dispatch_id :
+ GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Instruction Intrinsics
+//===----------------------------------------------------------------------===//
+
def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
Intrinsic<[], [], [IntrConvergent]>;
+def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
+ Intrinsic<[], [], [IntrConvergent]>;
+
def int_amdgcn_s_waitcnt : Intrinsic<[], [llvm_i32_ty], []>;
def int_amdgcn_div_scale : Intrinsic<
@@ -115,10 +148,18 @@ def int_amdgcn_log_clamp : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
>;
+def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]
+>;
+
def int_amdgcn_rcp : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
>;
+def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]
+>;
+
def int_amdgcn_rsq : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
>;
@@ -140,7 +181,7 @@ def int_amdgcn_frexp_mant : Intrinsic<
>;
def int_amdgcn_frexp_exp : Intrinsic<
- [llvm_i32_ty], [llvm_anyfloat_ty], [IntrNoMem]
+ [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]
>;
// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
@@ -174,6 +215,11 @@ def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
[llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
>;
+// v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
+// should be used.
+def int_amdgcn_sffbh :
+ Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
// TODO: Do we want an ordering for these?
def int_amdgcn_atomic_inc : Intrinsic<[llvm_anyint_ty],
[llvm_anyptr_ty, LLVMMatchType<0>],
@@ -186,34 +232,130 @@ def int_amdgcn_atomic_dec : Intrinsic<[llvm_anyint_ty],
>;
class AMDGPUImageLoad : Intrinsic <
- [llvm_v4f32_ty], // vdata(VGPR)
+ [llvm_anyfloat_ty], // vdata(VGPR)
[llvm_anyint_ty, // vaddr(VGPR)
- llvm_v8i32_ty, // rsrc(SGPR)
+ llvm_anyint_ty, // rsrc(SGPR)
llvm_i32_ty, // dmask(imm)
- llvm_i1_ty, // r128(imm)
- llvm_i1_ty, // da(imm)
llvm_i1_ty, // glc(imm)
- llvm_i1_ty], // slc(imm)
+ llvm_i1_ty, // slc(imm)
+ llvm_i1_ty, // lwe(imm)
+ llvm_i1_ty], // da(imm)
[IntrReadMem]>;
def int_amdgcn_image_load : AMDGPUImageLoad;
def int_amdgcn_image_load_mip : AMDGPUImageLoad;
+def int_amdgcn_image_getresinfo : AMDGPUImageLoad;
class AMDGPUImageStore : Intrinsic <
[],
- [llvm_v4f32_ty, // vdata(VGPR)
+ [llvm_anyfloat_ty, // vdata(VGPR)
llvm_anyint_ty, // vaddr(VGPR)
- llvm_v8i32_ty, // rsrc(SGPR)
+ llvm_anyint_ty, // rsrc(SGPR)
llvm_i32_ty, // dmask(imm)
- llvm_i1_ty, // r128(imm)
- llvm_i1_ty, // da(imm)
llvm_i1_ty, // glc(imm)
- llvm_i1_ty], // slc(imm)
+ llvm_i1_ty, // slc(imm)
+ llvm_i1_ty, // lwe(imm)
+ llvm_i1_ty], // da(imm)
[]>;
def int_amdgcn_image_store : AMDGPUImageStore;
def int_amdgcn_image_store_mip : AMDGPUImageStore;
+class AMDGPUImageSample : Intrinsic <
+ [llvm_anyfloat_ty], // vdata(VGPR)
+ [llvm_anyfloat_ty, // vaddr(VGPR)
+ llvm_anyint_ty, // rsrc(SGPR)
+ llvm_v4i32_ty, // sampler(SGPR)
+ llvm_i32_ty, // dmask(imm)
+ llvm_i1_ty, // unorm(imm)
+ llvm_i1_ty, // glc(imm)
+ llvm_i1_ty, // slc(imm)
+ llvm_i1_ty, // lwe(imm)
+ llvm_i1_ty], // da(imm)
+ [IntrReadMem]>;
+
+// Basic sample
+def int_amdgcn_image_sample : AMDGPUImageSample;
+def int_amdgcn_image_sample_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_d : AMDGPUImageSample;
+def int_amdgcn_image_sample_d_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_l : AMDGPUImageSample;
+def int_amdgcn_image_sample_b : AMDGPUImageSample;
+def int_amdgcn_image_sample_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_lz : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd_cl : AMDGPUImageSample;
+
+// Sample with comparison
+def int_amdgcn_image_sample_c : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_l : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_lz : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd_cl : AMDGPUImageSample;
+
+// Sample with offsets
+def int_amdgcn_image_sample_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_d_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_d_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_l_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_b_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_lz_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_cd_cl_o : AMDGPUImageSample;
+
+// Sample with comparison and offsets
+def int_amdgcn_image_sample_c_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_d_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_l_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_lz_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd_o : AMDGPUImageSample;
+def int_amdgcn_image_sample_c_cd_cl_o : AMDGPUImageSample;
+
+// Basic gather4
+def int_amdgcn_image_gather4 : AMDGPUImageSample;
+def int_amdgcn_image_gather4_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_l : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_lz : AMDGPUImageSample;
+
+// Gather4 with comparison
+def int_amdgcn_image_gather4_c : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_l : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b_cl : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_lz : AMDGPUImageSample;
+
+// Gather4 with offsets
+def int_amdgcn_image_gather4_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_l_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_lz_o : AMDGPUImageSample;
+
+// Gather4 with comparison and offsets
+def int_amdgcn_image_gather4_c_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_l_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_b_cl_o : AMDGPUImageSample;
+def int_amdgcn_image_gather4_c_lz_o : AMDGPUImageSample;
+
+def int_amdgcn_image_getlod : AMDGPUImageSample;
+
class AMDGPUImageAtomic : Intrinsic <
[llvm_i32_ty],
[llvm_i32_ty, // vdata(VGPR)
@@ -298,9 +440,6 @@ def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
llvm_i1_ty], // slc(imm)
[]>;
-def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic;
-
-
def int_amdgcn_buffer_wbinvl1_sc :
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
Intrinsic<[], [], []>;
@@ -322,35 +461,33 @@ def int_amdgcn_s_sleep :
Intrinsic<[], [llvm_i32_ty], []> {
}
+def int_amdgcn_s_incperflevel :
+ GCCBuiltin<"__builtin_amdgcn_s_incperflevel">,
+ Intrinsic<[], [llvm_i32_ty], []> {
+}
+
+def int_amdgcn_s_decperflevel :
+ GCCBuiltin<"__builtin_amdgcn_s_decperflevel">,
+ Intrinsic<[], [llvm_i32_ty], []> {
+}
+
def int_amdgcn_s_getreg :
GCCBuiltin<"__builtin_amdgcn_s_getreg">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
-def int_amdgcn_groupstaticsize :
- GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
-
-def int_amdgcn_dispatch_ptr :
- GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
-
-def int_amdgcn_queue_ptr :
- GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
-
-def int_amdgcn_kernarg_segment_ptr :
- GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
-
-def int_amdgcn_implicitarg_ptr :
- GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
+// param values: 0 = P10, 1 = P20, 2 = P0
+def int_amdgcn_interp_mov :
+ GCCBuiltin<"__builtin_amdgcn_interp_mov">,
+ Intrinsic<[llvm_float_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
def int_amdgcn_interp_p1 :
GCCBuiltin<"__builtin_amdgcn_interp_p1">,
Intrinsic<[llvm_float_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>; // This intrinsic reads from lds, but the memory
// values are constant, so it behaves like IntrNoMem.
@@ -358,7 +495,7 @@ def int_amdgcn_interp_p1 :
def int_amdgcn_interp_p2 :
GCCBuiltin<"__builtin_amdgcn_interp_p2">,
Intrinsic<[llvm_float_ty],
- [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>; // See int_amdgcn_v_interp_p1 for why this is
// IntrNoMem.
@@ -387,6 +524,54 @@ def int_amdgcn_lerp :
GCCBuiltin<"__builtin_amdgcn_lerp">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_amdgcn_sad_u8 :
+ GCCBuiltin<"__builtin_amdgcn_sad_u8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_msad_u8 :
+ GCCBuiltin<"__builtin_amdgcn_msad_u8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_sad_hi_u8 :
+ GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_sad_u16 :
+ GCCBuiltin<"__builtin_amdgcn_sad_u16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_qsad_pk_u16_u8 :
+ GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrNoMem]>;
+
+def int_amdgcn_mqsad_pk_u16_u8 :
+ GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrNoMem]>;
+
+def int_amdgcn_mqsad_u32_u8 :
+ GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_cvt_pk_u8_f32 :
+ GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_icmp :
+ Intrinsic<[llvm_i64_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_fcmp :
+ Intrinsic<[llvm_i64_ty], [llvm_anyfloat_ty, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_readfirstlane :
+ GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
+def int_amdgcn_readlane :
+ GCCBuiltin<"__builtin_amdgcn_readlane">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+
//===----------------------------------------------------------------------===//
// CI+ Intrinsics
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsNVVM.td b/include/llvm/IR/IntrinsicsNVVM.td
index 6919ec47eb9a..f035ac3c90ee 100644
--- a/include/llvm/IR/IntrinsicsNVVM.td
+++ b/include/llvm/IR/IntrinsicsNVVM.td
@@ -729,6 +729,39 @@ let TargetPrefix = "nvvm" in {
[LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
[IntrArgMemOnly, NoCapture<0>]>;
+ class SCOPED_ATOMIC2_impl<LLVMType elty>
+ : Intrinsic<[elty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>],
+ [IntrArgMemOnly, NoCapture<0>]>;
+ class SCOPED_ATOMIC3_impl<LLVMType elty>
+ : Intrinsic<[elty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [IntrArgMemOnly, NoCapture<0>]>;
+
+ multiclass PTXAtomicWithScope2<LLVMType elty> {
+ def _cta : SCOPED_ATOMIC2_impl<elty>;
+ def _sys : SCOPED_ATOMIC2_impl<elty>;
+ }
+ multiclass PTXAtomicWithScope3<LLVMType elty> {
+ def _cta : SCOPED_ATOMIC3_impl<elty>;
+ def _sys : SCOPED_ATOMIC3_impl<elty>;
+ }
+ multiclass PTXAtomicWithScope2_fi {
+ defm _f: PTXAtomicWithScope2<llvm_anyfloat_ty>;
+ defm _i: PTXAtomicWithScope2<llvm_anyint_ty>;
+ }
+ defm int_nvvm_atomic_add_gen : PTXAtomicWithScope2_fi;
+ defm int_nvvm_atomic_inc_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_dec_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_exch_gen_i: PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_xor_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_max_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_min_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_or_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_and_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
+ defm int_nvvm_atomic_cas_gen_i : PTXAtomicWithScope3<llvm_anyint_ty>;
+
// Bar.Sync
// The builtin for "bar.sync 0" is called __syncthreads. Unlike most of the
diff --git a/include/llvm/IR/IntrinsicsPowerPC.td b/include/llvm/IR/IntrinsicsPowerPC.td
index e195c0ebac3a..12e23b681ca4 100644
--- a/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/include/llvm/IR/IntrinsicsPowerPC.td
@@ -250,6 +250,12 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtuw : GCCBuiltin<"__builtin_altivec_vcmpgtuw">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
+ def int_ppc_altivec_vcmpnew : GCCBuiltin<"__builtin_altivec_vcmpnew">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpnezw : GCCBuiltin<"__builtin_altivec_vcmpnezw">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
def int_ppc_altivec_vcmpequh : GCCBuiltin<"__builtin_altivec_vcmpequh">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
@@ -260,6 +266,12 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtuh : GCCBuiltin<"__builtin_altivec_vcmpgtuh">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
+ def int_ppc_altivec_vcmpneh : GCCBuiltin<"__builtin_altivec_vcmpneh">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpnezh : GCCBuiltin<"__builtin_altivec_vcmpnezh">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
def int_ppc_altivec_vcmpequb : GCCBuiltin<"__builtin_altivec_vcmpequb">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
@@ -270,6 +282,12 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtub : GCCBuiltin<"__builtin_altivec_vcmpgtub">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
+ def int_ppc_altivec_vcmpneb : GCCBuiltin<"__builtin_altivec_vcmpneb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpnezb : GCCBuiltin<"__builtin_altivec_vcmpnezb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
// Predicate Comparisons. The first operand specifies interpretation of CR6.
def int_ppc_altivec_vcmpbfp_p : GCCBuiltin<"__builtin_altivec_vcmpbfp_p">,
@@ -304,6 +322,12 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtuw_p : GCCBuiltin<"__builtin_altivec_vcmpgtuw_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
[IntrNoMem]>;
+ def int_ppc_altivec_vcmpnew_p : GCCBuiltin<"__builtin_altivec_vcmpnew_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpnezw_p : GCCBuiltin<"__builtin_altivec_vcmpnezw_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
def int_ppc_altivec_vcmpequh_p : GCCBuiltin<"__builtin_altivec_vcmpequh_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
@@ -314,6 +338,12 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtuh_p : GCCBuiltin<"__builtin_altivec_vcmpgtuh_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
[IntrNoMem]>;
+ def int_ppc_altivec_vcmpneh_p : GCCBuiltin<"__builtin_altivec_vcmpneh_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpnezh_p : GCCBuiltin<"__builtin_altivec_vcmpnezh_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+ [IntrNoMem]>;
def int_ppc_altivec_vcmpequb_p : GCCBuiltin<"__builtin_altivec_vcmpequb_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
@@ -324,6 +354,23 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
def int_ppc_altivec_vcmpgtub_p : GCCBuiltin<"__builtin_altivec_vcmpgtub_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
[IntrNoMem]>;
+ def int_ppc_altivec_vcmpneb_p : GCCBuiltin<"__builtin_altivec_vcmpneb_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpnezb_p : GCCBuiltin<"__builtin_altivec_vcmpnezb_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vclzlsbb : GCCBuiltin<"__builtin_altivec_vclzlsbb">,
+ Intrinsic<[llvm_i32_ty],[llvm_v16i8_ty],[IntrNoMem]>;
+ def int_ppc_altivec_vctzlsbb : GCCBuiltin<"__builtin_altivec_vctzlsbb">,
+ Intrinsic<[llvm_i32_ty],[llvm_v16i8_ty],[IntrNoMem]>;
+ def int_ppc_altivec_vprtybw : GCCBuiltin<"__builtin_altivec_vprtybw">,
+ Intrinsic<[llvm_v4i32_ty],[llvm_v4i32_ty],[IntrNoMem]>;
+ def int_ppc_altivec_vprtybd : GCCBuiltin<"__builtin_altivec_vprtybd">,
+ Intrinsic<[llvm_v2i64_ty],[llvm_v2i64_ty],[IntrNoMem]>;
+ def int_ppc_altivec_vprtybq : GCCBuiltin<"__builtin_altivec_vprtybq">,
+ Intrinsic<[llvm_v1i128_ty],[llvm_v1i128_ty],[IntrNoMem]>;
+
}
// Vector average.
@@ -575,6 +622,8 @@ def int_ppc_altivec_vsl : PowerPC_Vec_WWW_Intrinsic<"vsl">;
def int_ppc_altivec_vslo : PowerPC_Vec_WWW_Intrinsic<"vslo">;
def int_ppc_altivec_vslb : PowerPC_Vec_BBB_Intrinsic<"vslb">;
+def int_ppc_altivec_vslv : PowerPC_Vec_BBB_Intrinsic<"vslv">;
+def int_ppc_altivec_vsrv : PowerPC_Vec_BBB_Intrinsic<"vsrv">;
def int_ppc_altivec_vslh : PowerPC_Vec_HHH_Intrinsic<"vslh">;
def int_ppc_altivec_vslw : PowerPC_Vec_WWW_Intrinsic<"vslw">;
@@ -657,6 +706,27 @@ def int_ppc_altivec_crypto_vpmsumw :
def int_ppc_altivec_crypto_vpmsumd :
PowerPC_Vec_DDD_Intrinsic<"crypto_vpmsumd">;
+// Absolute Difference intrinsics
+def int_ppc_altivec_vabsdub : PowerPC_Vec_BBB_Intrinsic<"vabsdub">;
+def int_ppc_altivec_vabsduh : PowerPC_Vec_HHH_Intrinsic<"vabsduh">;
+def int_ppc_altivec_vabsduw : PowerPC_Vec_WWW_Intrinsic<"vabsduw">;
+
+// Vector rotates
+def int_ppc_altivec_vrlwnm :
+ PowerPC_Vec_Intrinsic<"vrlwnm", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_altivec_vrlwmi :
+ PowerPC_Vec_Intrinsic<"vrlwmi", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_ppc_altivec_vrldnm :
+ PowerPC_Vec_Intrinsic<"vrldnm", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_altivec_vrldmi :
+ PowerPC_Vec_Intrinsic<"vrldmi", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
//===----------------------------------------------------------------------===//
// PowerPC VSX Intrinsic Definitions.
@@ -667,13 +737,32 @@ def int_ppc_vsx_lxvw4x :
Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvd2x :
Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvw4x_be :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvd2x_be :
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
+def int_ppc_vsx_lxvl :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem,
+ IntrArgMemOnly]>;
+def int_ppc_vsx_lxvll :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem,
+ IntrArgMemOnly]>;
+def int_ppc_vsx_stxvl :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i64_ty],
+ [IntrArgMemOnly]>;
+def int_ppc_vsx_stxvll :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i64_ty],
+ [IntrArgMemOnly]>;
// Vector store.
def int_ppc_vsx_stxvw4x :
Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], [IntrArgMemOnly]>;
def int_ppc_vsx_stxvd2x :
Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty], [IntrArgMemOnly]>;
-
+def int_ppc_vsx_stxvw4x_be :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], [IntrArgMemOnly]>;
+def int_ppc_vsx_stxvd2x_be :
+ Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty], [IntrArgMemOnly]>;
// Vector and scalar maximum.
def int_ppc_vsx_xvmaxdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmaxdp">;
def int_ppc_vsx_xvmaxsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvmaxsp">;
@@ -746,6 +835,67 @@ def int_ppc_vsx_xvcmpgtsp_p : GCCBuiltin<"__builtin_vsx_xvcmpgtsp_p">,
def int_ppc_vsx_xxleqv :
PowerPC_VSX_Intrinsic<"xxleqv", [llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xviexpdp :
+ PowerPC_VSX_Intrinsic<"xviexpdp",[llvm_v2f64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty],[IntrNoMem]>;
+def int_ppc_vsx_xviexpsp :
+ PowerPC_VSX_Intrinsic<"xviexpsp",[llvm_v4f32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty],[IntrNoMem]>;
+def int_ppc_vsx_xvcvdpsxws :
+ PowerPC_VSX_Intrinsic<"xvcvdpsxws", [llvm_v4i32_ty],
+ [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvdpuxws :
+ PowerPC_VSX_Intrinsic<"xvcvdpuxws", [llvm_v4i32_ty],
+ [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsxwdp :
+ PowerPC_VSX_Intrinsic<"xvcvsxwdp", [llvm_v2f64_ty],
+ [llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvuxwdp :
+ PowerPC_VSX_Intrinsic<"xvcvuxwdp", [llvm_v2f64_ty],
+ [llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvspdp :
+ PowerPC_VSX_Intrinsic<"xvcvspdp", [llvm_v2f64_ty],
+ [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsxdsp :
+ PowerPC_VSX_Intrinsic<"xvcvsxdsp", [llvm_v4f32_ty],
+ [llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvuxdsp :
+ PowerPC_VSX_Intrinsic<"xvcvuxdsp", [llvm_v4f32_ty],
+ [llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvdpsp :
+ PowerPC_VSX_Intrinsic<"xvcvdpsp", [llvm_v4f32_ty],
+ [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsphp :
+ PowerPC_VSX_Intrinsic<"xvcvsphp", [llvm_v4f32_ty],
+ [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxexpdp :
+ PowerPC_VSX_Intrinsic<"xvxexpdp", [llvm_v2i64_ty],
+ [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxexpsp :
+ PowerPC_VSX_Intrinsic<"xvxexpsp", [llvm_v4i32_ty],
+ [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxsigdp :
+ PowerPC_VSX_Intrinsic<"xvxsigdp", [llvm_v2i64_ty],
+ [llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvxsigsp :
+ PowerPC_VSX_Intrinsic<"xvxsigsp", [llvm_v4i32_ty],
+ [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvtstdcdp :
+ PowerPC_VSX_Intrinsic<"xvtstdcdp", [llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvtstdcsp :
+ PowerPC_VSX_Intrinsic<"xvtstdcsp", [llvm_v4i32_ty],
+ [llvm_v4f32_ty,llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvhpsp :
+ PowerPC_VSX_Intrinsic<"xvcvhpsp", [llvm_v4f32_ty],
+ [llvm_v8i16_ty],[IntrNoMem]>;
+def int_ppc_vsx_xxextractuw :
+ PowerPC_VSX_Intrinsic<"xxextractuw",[llvm_v2i64_ty],
+ [llvm_v2i64_ty,llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xxinsertw :
+ PowerPC_VSX_Intrinsic<"xxinsertw",[llvm_v4i32_ty],
+ [llvm_v4i32_ty,llvm_v2i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsSystemZ.td b/include/llvm/IR/IntrinsicsSystemZ.td
index bfc15b9bc09e..9be37d3645b2 100644
--- a/include/llvm/IR/IntrinsicsSystemZ.td
+++ b/include/llvm/IR/IntrinsicsSystemZ.td
@@ -382,6 +382,11 @@ let TargetPrefix = "s390" in {
//===----------------------------------------------------------------------===//
let TargetPrefix = "s390" in {
+ def int_s390_sfpc : GCCBuiltin<"__builtin_s390_sfpc">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+ def int_s390_efpc : GCCBuiltin<"__builtin_s390_efpc">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+
def int_s390_tdc : Intrinsic<[llvm_i32_ty], [llvm_anyfloat_ty, llvm_i64_ty],
[IntrNoMem]>;
}
diff --git a/include/llvm/IR/IntrinsicsX86.td b/include/llvm/IR/IntrinsicsX86.td
index b965f082b8d8..3a496cb6645c 100644
--- a/include/llvm/IR/IntrinsicsX86.td
+++ b/include/llvm/IR/IntrinsicsX86.td
@@ -145,18 +145,6 @@ let TargetPrefix = "x86" in {
// Arithmetic ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse_add_ss :
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty], [IntrNoMem]>;
- def int_x86_sse_sub_ss :
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty], [IntrNoMem]>;
- def int_x86_sse_mul_ss :
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty], [IntrNoMem]>;
- def int_x86_sse_div_ss :
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_sqrt_ss : GCCBuiltin<"__builtin_ia32_sqrtss">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
[IntrNoMem]>;
@@ -246,10 +234,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvttss2si64 : GCCBuiltin<"__builtin_ia32_cvttss2si64">,
Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
- def int_x86_sse_cvtsi2ss : GCCBuiltin<"__builtin_ia32_cvtsi2ss">,
+ def int_x86_sse_cvtsi2ss : // TODO: Remove this intrinsic.
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i32_ty], [IntrNoMem]>;
- def int_x86_sse_cvtsi642ss : GCCBuiltin<"__builtin_ia32_cvtsi642ss">,
+ def int_x86_sse_cvtsi642ss : // TODO: Remove this intrinsic.
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i64_ty], [IntrNoMem]>;
@@ -287,18 +275,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// FP arithmetic ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse2_add_sd :
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_v2f64_ty], [IntrNoMem]>;
- def int_x86_sse2_sub_sd :
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_v2f64_ty], [IntrNoMem]>;
- def int_x86_sse2_mul_sd :
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_v2f64_ty], [IntrNoMem]>;
- def int_x86_sse2_div_sd :
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_v2f64_ty], [IntrNoMem]>;
def int_x86_sse2_sqrt_sd : GCCBuiltin<"__builtin_ia32_sqrtsd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
[IntrNoMem]>;
@@ -489,16 +465,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
def int_x86_sse2_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_cvttsd2si64">,
Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
- def int_x86_sse2_cvtsi2sd : GCCBuiltin<"__builtin_ia32_cvtsi2sd">,
+ def int_x86_sse2_cvtsi2sd : // TODO: Remove this intrinsic.
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_i32_ty], [IntrNoMem]>;
- def int_x86_sse2_cvtsi642sd : GCCBuiltin<"__builtin_ia32_cvtsi642sd">,
+ def int_x86_sse2_cvtsi642sd : // TODO: Remove this intrinsic.
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_i64_ty], [IntrNoMem]>;
def int_x86_sse2_cvtsd2ss : GCCBuiltin<"__builtin_ia32_cvtsd2ss">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_v2f64_ty], [IntrNoMem]>;
- def int_x86_sse2_cvtss2sd : GCCBuiltin<"__builtin_ia32_cvtss2sd">,
+ def int_x86_sse2_cvtss2sd : // TODO: Remove this intrinsic.
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">,
@@ -1340,58 +1316,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_vpermilvar_pd_256 :
- GCCBuiltin<"__builtin_ia32_vpermilvarpd256_mask">,
- Intrinsic<[llvm_v4f64_ty],
- [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpermilvar_pd_512 :
- GCCBuiltin<"__builtin_ia32_vpermilvarpd512_mask">,
- Intrinsic<[llvm_v8f64_ty],
- [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpermilvar_pd_128 :
- GCCBuiltin<"__builtin_ia32_vpermilvarpd_mask">,
- Intrinsic<[llvm_v2f64_ty],
- [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpermilvar_ps_256 :
- GCCBuiltin<"__builtin_ia32_vpermilvarps256_mask">,
- Intrinsic<[llvm_v8f32_ty],
- [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_vpermilvar_ps_512 :
- GCCBuiltin<"__builtin_ia32_vpermilvarps512_mask">,
- Intrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ def int_x86_avx512_vpermilvar_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarpd512">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8i64_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_vpermilvar_ps_128 :
- GCCBuiltin<"__builtin_ia32_vpermilvarps_mask">,
- Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ def int_x86_avx512_vpermilvar_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarps512">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_pshuf_b_128 :
- GCCBuiltin<"__builtin_ia32_pshufb128_mask">,
- Intrinsic<[llvm_v16i8_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_pshuf_b_256 :
- GCCBuiltin<"__builtin_ia32_pshufb256_mask">,
- Intrinsic<[llvm_v32i8_ty],
- [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_pshuf_b_512 :
- GCCBuiltin<"__builtin_ia32_pshufb512_mask">,
- Intrinsic<[llvm_v64i8_ty],
- [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ def int_x86_avx512_pshuf_b_512 :
+ GCCBuiltin<"__builtin_ia32_pshufb512">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_shuf_f32x4_256 :
@@ -1441,42 +1378,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8i64_ty],
[llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty],
[IntrNoMem]>;
-
- def int_x86_avx512_mask_shuf_pd_128 :
- GCCBuiltin<"__builtin_ia32_shufpd128_mask">,
- Intrinsic<[llvm_v2f64_ty],
- [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_shuf_pd_256 :
- GCCBuiltin<"__builtin_ia32_shufpd256_mask">,
- Intrinsic<[llvm_v4f64_ty],
- [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_shuf_pd_512 :
- GCCBuiltin<"__builtin_ia32_shufpd512_mask">,
- Intrinsic<[llvm_v8f64_ty],
- [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_shuf_ps_128 :
- GCCBuiltin<"__builtin_ia32_shufps128_mask">,
- Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_shuf_ps_256 :
- GCCBuiltin<"__builtin_ia32_shufps256_mask">,
- Intrinsic<[llvm_v8f32_ty],
- [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_shuf_ps_512 :
- GCCBuiltin<"__builtin_ia32_shufps512_mask">,
- Intrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
- [IntrNoMem]>;
}
// Vector blend
@@ -1493,7 +1394,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_dp_ps_256 : GCCBuiltin<"__builtin_ia32_dpps256">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem, Commutative]>;
}
// Vector compare
@@ -1694,16 +1595,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[], [], []>;
}
-// Vector load with broadcast
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx_vbroadcastf128_pd_256 :
- GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
- def int_x86_avx_vbroadcastf128_ps_256 :
- GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
-}
-
// SIMD load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">,
@@ -1726,20 +1617,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[IntrReadMem, IntrArgMemOnly]>;
}
-// Conditional move ops
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_mask_move_ss :
- GCCBuiltin<"__builtin_ia32_movss_mask">,
- Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
- def int_x86_avx512_mask_move_sd :
- GCCBuiltin<"__builtin_ia32_movsd_mask">,
- Intrinsic<[llvm_v2f64_ty],
- [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-}
-
// Conditional store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
@@ -1818,154 +1695,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v32i8_ty], [IntrNoMem, Commutative]>;
}
-// Vector min, max
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_mask_pmaxs_b_128 : GCCBuiltin<"__builtin_ia32_pmaxsb128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_b_256 : GCCBuiltin<"__builtin_ia32_pmaxsb256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_b_512 : GCCBuiltin<"__builtin_ia32_pmaxsb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_b_128 : GCCBuiltin<"__builtin_ia32_pmaxub128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_b_256 : GCCBuiltin<"__builtin_ia32_pmaxub256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_b_512 : GCCBuiltin<"__builtin_ia32_pmaxub512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_w_128 : GCCBuiltin<"__builtin_ia32_pmaxsw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_w_256 : GCCBuiltin<"__builtin_ia32_pmaxsw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_w_512 : GCCBuiltin<"__builtin_ia32_pmaxsw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty],[IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_w_128 : GCCBuiltin<"__builtin_ia32_pmaxuw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_w_256 : GCCBuiltin<"__builtin_ia32_pmaxuw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_w_512 : GCCBuiltin<"__builtin_ia32_pmaxuw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty],[IntrNoMem]>;
- def int_x86_avx512_mask_pmins_b_128 : GCCBuiltin<"__builtin_ia32_pminsb128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty,llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_b_256 : GCCBuiltin<"__builtin_ia32_pminsb256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_b_512 : GCCBuiltin<"__builtin_ia32_pminsb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_b_128 : GCCBuiltin<"__builtin_ia32_pminub128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_b_256 : GCCBuiltin<"__builtin_ia32_pminub256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_b_512 : GCCBuiltin<"__builtin_ia32_pminub512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_w_128 : GCCBuiltin<"__builtin_ia32_pminsw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_w_256 : GCCBuiltin<"__builtin_ia32_pminsw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_w_512 : GCCBuiltin<"__builtin_ia32_pminsw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty],[IntrNoMem]>;
- def int_x86_avx512_mask_pminu_w_128 : GCCBuiltin<"__builtin_ia32_pminuw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_w_256 : GCCBuiltin<"__builtin_ia32_pminuw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_w_512 : GCCBuiltin<"__builtin_ia32_pminuw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_d_512 : GCCBuiltin<"__builtin_ia32_pmaxud512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_d_256 : GCCBuiltin<"__builtin_ia32_pmaxud256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_d_128 : GCCBuiltin<"__builtin_ia32_pmaxud128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_d_512 : GCCBuiltin<"__builtin_ia32_pmaxsd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_d_256 : GCCBuiltin<"__builtin_ia32_pmaxsd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_d_128 : GCCBuiltin<"__builtin_ia32_pmaxsd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_q_512 : GCCBuiltin<"__builtin_ia32_pmaxuq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_q_256 : GCCBuiltin<"__builtin_ia32_pmaxuq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxu_q_128 : GCCBuiltin<"__builtin_ia32_pmaxuq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_q_512 : GCCBuiltin<"__builtin_ia32_pmaxsq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_q_256 : GCCBuiltin<"__builtin_ia32_pmaxsq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmaxs_q_128 : GCCBuiltin<"__builtin_ia32_pmaxsq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_d_512 : GCCBuiltin<"__builtin_ia32_pminud512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_d_256 : GCCBuiltin<"__builtin_ia32_pminud256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_d_128 : GCCBuiltin<"__builtin_ia32_pminud128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_d_512 : GCCBuiltin<"__builtin_ia32_pminsd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_d_256 : GCCBuiltin<"__builtin_ia32_pminsd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_d_128 : GCCBuiltin<"__builtin_ia32_pminsd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_q_512 : GCCBuiltin<"__builtin_ia32_pminuq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_q_256 : GCCBuiltin<"__builtin_ia32_pminuq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pminu_q_128 : GCCBuiltin<"__builtin_ia32_pminuq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_q_512 : GCCBuiltin<"__builtin_ia32_pminsq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_q_256 : GCCBuiltin<"__builtin_ia32_pminsq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmins_q_128 : GCCBuiltin<"__builtin_ia32_pminsq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-}
-
// Integer shift ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_psll_w : GCCBuiltin<"__builtin_ia32_psllw256">,
@@ -2018,165 +1747,76 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_w_128 : GCCBuiltin<"__builtin_ia32_psrlw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_w_256 : GCCBuiltin<"__builtin_ia32_psrlw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v8i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_w_512 : GCCBuiltin<"__builtin_ia32_psrlw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_wi_128 : GCCBuiltin<"__builtin_ia32_psrlwi128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_i32_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_wi_256 : GCCBuiltin<"__builtin_ia32_psrlwi256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_i32_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_wi_512 : GCCBuiltin<"__builtin_ia32_psrlwi512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_i32_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psra_q_128 : GCCBuiltin<"__builtin_ia32_psraq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_psra_q_256 : GCCBuiltin<"__builtin_ia32_psraq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_w_128 : GCCBuiltin<"__builtin_ia32_psraw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_w_256 : GCCBuiltin<"__builtin_ia32_psraw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v8i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_w_512 : GCCBuiltin<"__builtin_ia32_psraw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_wi_128 : GCCBuiltin<"__builtin_ia32_psrawi128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_i32_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_wi_256 : GCCBuiltin<"__builtin_ia32_psrawi256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_i32_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_wi_512 : GCCBuiltin<"__builtin_ia32_psrawi512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_i32_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrai_q_128 : GCCBuiltin<"__builtin_ia32_psraqi128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrai_q_256 : GCCBuiltin<"__builtin_ia32_psraqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_d : GCCBuiltin<"__builtin_ia32_pslld512_mask">,
+ def int_x86_avx512_psll_w_512 : GCCBuiltin<"__builtin_ia32_psllw512">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_psll_d_512 : GCCBuiltin<"__builtin_ia32_pslld512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_q : GCCBuiltin<"__builtin_ia32_psllq512_mask">,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psll_q_512 : GCCBuiltin<"__builtin_ia32_psllq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_d : GCCBuiltin<"__builtin_ia32_psrld512_mask">,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrl_w_512 : GCCBuiltin<"__builtin_ia32_psrlw512">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrl_d_512 : GCCBuiltin<"__builtin_ia32_psrld512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq512_mask">,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrl_q_512 : GCCBuiltin<"__builtin_ia32_psrlq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_d : GCCBuiltin<"__builtin_ia32_psrad512_mask">,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_psra_w_512 : GCCBuiltin<"__builtin_ia32_psraw512">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_psra_d_512 : GCCBuiltin<"__builtin_ia32_psrad512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_q : GCCBuiltin<"__builtin_ia32_psraq512_mask">,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psra_q_512 : GCCBuiltin<"__builtin_ia32_psraq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ llvm_v2i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_w_128 : GCCBuiltin<"__builtin_ia32_psllw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_w_256 : GCCBuiltin<"__builtin_ia32_psllw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v8i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_w_512 : GCCBuiltin<"__builtin_ia32_psllw512_mask">,
+ def int_x86_avx512_pslli_w_512 : GCCBuiltin<"__builtin_ia32_psllwi512">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_wi_128 : GCCBuiltin<"__builtin_ia32_psllwi128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_i32_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_wi_256 : GCCBuiltin<"__builtin_ia32_psllwi256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_i32_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_wi_512 : GCCBuiltin<"__builtin_ia32_psllwi512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pslli_d_512 : GCCBuiltin<"__builtin_ia32_pslldi512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pslli_q_512 : GCCBuiltin<"__builtin_ia32_psllqi512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrli_w_512 : GCCBuiltin<"__builtin_ia32_psrlwi512">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_i32_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv16_hi : GCCBuiltin<"__builtin_ia32_psllv16hi_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv2_di : GCCBuiltin<"__builtin_ia32_psllv2di_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv32hi : GCCBuiltin<"__builtin_ia32_psllv32hi_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrli_d_512 : GCCBuiltin<"__builtin_ia32_psrldi512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrli_q_512 : GCCBuiltin<"__builtin_ia32_psrlqi512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrai_w_512 : GCCBuiltin<"__builtin_ia32_psrawi512">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv4_di : GCCBuiltin<"__builtin_ia32_psllv4di_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv4_si : GCCBuiltin<"__builtin_ia32_psllv4si_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv8_hi : GCCBuiltin<"__builtin_ia32_psllv8hi_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psllv8_si : GCCBuiltin<"__builtin_ia32_psllv8si_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_psra_d_128 : GCCBuiltin<"__builtin_ia32_psrad128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_d_256 : GCCBuiltin<"__builtin_ia32_psrad256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_di_128 : GCCBuiltin<"__builtin_ia32_psradi128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_di_256 : GCCBuiltin<"__builtin_ia32_psradi256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_di_512 : GCCBuiltin<"__builtin_ia32_psradi512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrai_d_512 : GCCBuiltin<"__builtin_ia32_psradi512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_q_128 : GCCBuiltin<"__builtin_ia32_psraq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_q_256 : GCCBuiltin<"__builtin_ia32_psraq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_qi_128 : GCCBuiltin<"__builtin_ia32_psraqi128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_qi_256 : GCCBuiltin<"__builtin_ia32_psraqi256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psra_qi_512 : GCCBuiltin<"__builtin_ia32_psraqi512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrai_q_512 : GCCBuiltin<"__builtin_ia32_psraqi512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_d_128: GCCBuiltin<"__builtin_ia32_psrld128_mask">,
- Intrinsic<[llvm_v4i32_ty], [ llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty ], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_d_256: GCCBuiltin<"__builtin_ia32_psrld256_mask">,
- Intrinsic<[llvm_v8i32_ty], [ llvm_v8i32_ty,
- llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty ], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_di_128: GCCBuiltin<"__builtin_ia32_psrldi128_mask">,
- Intrinsic<[llvm_v4i32_ty], [ llvm_v4i32_ty,
- llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty ], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_di_256: GCCBuiltin<"__builtin_ia32_psrldi256_mask">,
- Intrinsic<[llvm_v8i32_ty], [ llvm_v8i32_ty,
- llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty ], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_di_512: GCCBuiltin<"__builtin_ia32_psrldi512_mask">,
- Intrinsic<[llvm_v16i32_ty], [ llvm_v16i32_ty,
- llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty ], [IntrNoMem]>;
-
- def int_x86_avx512_mask_psrl_q_128: GCCBuiltin<"__builtin_ia32_psrlq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_q_256: GCCBuiltin<"__builtin_ia32_psrlq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_qi_128: GCCBuiltin<"__builtin_ia32_psrlqi128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_qi_256: GCCBuiltin<"__builtin_ia32_psrlqi256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrl_qi_512: GCCBuiltin<"__builtin_ia32_psrlqi512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_pmultishift_qb_128:
GCCBuiltin<"__builtin_ia32_vpmultishiftqb128_mask">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
@@ -2596,6 +2236,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
[IntrNoMem]>;
+ def int_x86_avx512_psllv_d_512 : GCCBuiltin<"__builtin_ia32_psllv16si">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psllv_q_512 : GCCBuiltin<"__builtin_ia32_psllv8di">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+ [IntrNoMem]>;
+
def int_x86_avx2_psrlv_d : GCCBuiltin<"__builtin_ia32_psrlv4si">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
@@ -2609,6 +2256,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
[IntrNoMem]>;
+ def int_x86_avx512_psrlv_d_512 : GCCBuiltin<"__builtin_ia32_psrlv16si">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psrlv_q_512 : GCCBuiltin<"__builtin_ia32_psrlv8di">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
+ [IntrNoMem]>;
+
def int_x86_avx2_psrav_d : GCCBuiltin<"__builtin_ia32_psrav4si">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
@@ -2616,105 +2270,48 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psllv_d : GCCBuiltin<"__builtin_ia32_psllv16si_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ def int_x86_avx512_psrav_d_512 : GCCBuiltin<"__builtin_ia32_psrav16si">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psllv_q : GCCBuiltin<"__builtin_ia32_psllv8di_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ def int_x86_avx512_psrav_q_128 : GCCBuiltin<"__builtin_ia32_psravq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psrav_d : GCCBuiltin<"__builtin_ia32_psrav16si_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ def int_x86_avx512_psrav_q_256 : GCCBuiltin<"__builtin_ia32_psravq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psrav_q : GCCBuiltin<"__builtin_ia32_psrav8di_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ def int_x86_avx512_psrav_q_512 : GCCBuiltin<"__builtin_ia32_psrav8di">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psrlv_d : GCCBuiltin<"__builtin_ia32_psrlv16si_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+
+ def int_x86_avx512_psllv_w_128 : GCCBuiltin<"__builtin_ia32_psllv8hi">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psrlv_q : GCCBuiltin<"__builtin_ia32_psrlv8di_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ def int_x86_avx512_psllv_w_256 : GCCBuiltin<"__builtin_ia32_psllv16hi">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psllv_w_512 : GCCBuiltin<"__builtin_ia32_psllv32hi">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_psll_d_128 : GCCBuiltin<"__builtin_ia32_pslld128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_d_256 : GCCBuiltin<"__builtin_ia32_pslld256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_di_128 : GCCBuiltin<"__builtin_ia32_pslldi128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_di_256 : GCCBuiltin<"__builtin_ia32_pslldi256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_di_512 : GCCBuiltin<"__builtin_ia32_pslldi512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_q_128 : GCCBuiltin<"__builtin_ia32_psllq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_q_256 : GCCBuiltin<"__builtin_ia32_psllq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_qi_128 : GCCBuiltin<"__builtin_ia32_psllqi128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_qi_256 : GCCBuiltin<"__builtin_ia32_psllqi256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psll_qi_512 : GCCBuiltin<"__builtin_ia32_psllqi512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_psrav16_hi : GCCBuiltin<"__builtin_ia32_psrav16hi_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrav32_hi : GCCBuiltin<"__builtin_ia32_psrav32hi_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrav4_si : GCCBuiltin<"__builtin_ia32_psrav4si_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrav8_hi : GCCBuiltin<"__builtin_ia32_psrav8hi_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrav8_si : GCCBuiltin<"__builtin_ia32_psrav8si_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrav_q_128 : GCCBuiltin<"__builtin_ia32_psravq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrav_q_256 : GCCBuiltin<"__builtin_ia32_psravq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrlv_w_128 : GCCBuiltin<"__builtin_ia32_psrlv8hi">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psrlv_w_256 : GCCBuiltin<"__builtin_ia32_psrlv16hi">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psrlv_w_512 : GCCBuiltin<"__builtin_ia32_psrlv32hi">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
+ [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv16_hi : GCCBuiltin<"__builtin_ia32_psrlv16hi_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv2_di : GCCBuiltin<"__builtin_ia32_psrlv2di_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv32hi : GCCBuiltin<"__builtin_ia32_psrlv32hi_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv4_di : GCCBuiltin<"__builtin_ia32_psrlv4di_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv4_si : GCCBuiltin<"__builtin_ia32_psrlv4si_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv8_hi : GCCBuiltin<"__builtin_ia32_psrlv8hi_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psrlv8_si : GCCBuiltin<"__builtin_ia32_psrlv8si_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_psrav_w_128 : GCCBuiltin<"__builtin_ia32_psrav8hi">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psrav_w_256 : GCCBuiltin<"__builtin_ia32_psrav16hi">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psrav_w_512 : GCCBuiltin<"__builtin_ia32_psrav32hi">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
+ [IntrNoMem]>;
def int_x86_avx512_mask_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128_mask">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
@@ -3268,6 +2865,18 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask3_vfmsub_sd :
+ GCCBuiltin<"__builtin_ia32_vfmsubsd3_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_ss :
+ GCCBuiltin<"__builtin_ia32_vfmsubss3_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
def int_x86_avx512_mask3_vfmsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmsubpd128_mask3">,
Intrinsic<[llvm_v2f64_ty],
@@ -3376,6 +2985,18 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask3_vfnmsub_sd :
+ GCCBuiltin<"__builtin_ia32_vfnmsubsd3_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_ss :
+ GCCBuiltin<"__builtin_ia32_vfnmsubss3_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
def int_x86_avx512_mask_vfnmsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask">,
Intrinsic<[llvm_v2f64_ty],
@@ -3758,16 +3379,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Addition
def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
@@ -3831,16 +3452,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Bitwise operations
def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
- [IntrNoMem]>;
+ [IntrNoMem, Commutative]>;
// Averages
def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">,
@@ -4140,6 +3761,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_x86_xsaves64 :
Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xgetbv :
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
+ def int_x86_xsetbv :
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
}
//===----------------------------------------------------------------------===//
@@ -4294,61 +3919,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_kortestc_w : GCCBuiltin<"__builtin_ia32_kortestchi">,
Intrinsic<[llvm_i32_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
-
- def int_x86_avx512_mask_pmovsxb_d_128 : GCCBuiltin<"__builtin_ia32_pmovsxbd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_d_256 : GCCBuiltin<"__builtin_ia32_pmovsxbd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v16i8_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_d_512 : GCCBuiltin<"__builtin_ia32_pmovsxbd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i8_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_q_128 : GCCBuiltin<"__builtin_ia32_pmovsxbq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_q_256 : GCCBuiltin<"__builtin_ia32_pmovsxbq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v16i8_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_q_512 : GCCBuiltin<"__builtin_ia32_pmovsxbq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v16i8_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_w_128 : GCCBuiltin<"__builtin_ia32_pmovsxbw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_w_256 : GCCBuiltin<"__builtin_ia32_pmovsxbw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i8_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxb_w_512 : GCCBuiltin<"__builtin_ia32_pmovsxbw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i8_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxd_q_128 : GCCBuiltin<"__builtin_ia32_pmovsxdq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxd_q_256 : GCCBuiltin<"__builtin_ia32_pmovsxdq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i32_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxd_q_512 : GCCBuiltin<"__builtin_ia32_pmovsxdq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i32_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxw_d_128 : GCCBuiltin<"__builtin_ia32_pmovsxwd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxw_d_256 : GCCBuiltin<"__builtin_ia32_pmovsxwd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i16_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxw_d_512 : GCCBuiltin<"__builtin_ia32_pmovsxwd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i16_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxw_q_128 : GCCBuiltin<"__builtin_ia32_pmovsxwq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxw_q_256 : GCCBuiltin<"__builtin_ia32_pmovsxwq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v8i16_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovsxw_q_512 : GCCBuiltin<"__builtin_ia32_pmovsxwq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i16_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
}
// Conversion ops
@@ -4403,9 +3973,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_cvtsi2ss64 : GCCBuiltin<"__builtin_ia32_cvtsi2ss64">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_cvtsi2sd32 : GCCBuiltin<"__builtin_ia32_cvtsi2sd32">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_cvtsi2sd64 : GCCBuiltin<"__builtin_ia32_cvtsi2sd64">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -4510,24 +4077,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Vector convert
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_mask_cvtdq2pd_128 :
- GCCBuiltin<"__builtin_ia32_cvtdq2pd128_mask">,
- Intrinsic<[llvm_v2f64_ty],
- [llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_cvtdq2pd_256 :
- GCCBuiltin<"__builtin_ia32_cvtdq2pd256_mask">,
- Intrinsic<[llvm_v4f64_ty],
- [llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_cvtdq2pd_512 :
- GCCBuiltin<"__builtin_ia32_cvtdq2pd512_mask">,
- Intrinsic<[llvm_v8f64_ty],
- [llvm_v8i32_ty, llvm_v8f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
def int_x86_avx512_mask_cvtdq2ps_128 :
GCCBuiltin<"__builtin_ia32_cvtdq2ps128_mask">,
Intrinsic<[llvm_v4f32_ty],
@@ -4918,24 +4467,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_cvtudq2pd_128 :
- GCCBuiltin<"__builtin_ia32_cvtudq2pd128_mask">,
- Intrinsic<[llvm_v2f64_ty],
- [llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_cvtudq2pd_256 :
- GCCBuiltin<"__builtin_ia32_cvtudq2pd256_mask">,
- Intrinsic<[llvm_v4f64_ty],
- [llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_cvtudq2pd_512 :
- GCCBuiltin<"__builtin_ia32_cvtudq2pd512_mask">,
- Intrinsic<[llvm_v8f64_ty],
- [llvm_v8i32_ty, llvm_v8f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
def int_x86_avx512_mask_cvtudq2ps_128 :
GCCBuiltin<"__builtin_ia32_cvtudq2ps128_mask">,
Intrinsic<[llvm_v4f32_ty],
@@ -5161,153 +4692,30 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2i64_ty], [llvm_i8_ty], [IntrNoMem]>;
}
-// Vector sign and zero extend
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_pmovzxbq : GCCBuiltin<"__builtin_ia32_pmovzxbq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_avx512_pmovzxwd : GCCBuiltin<"__builtin_ia32_pmovzxwd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i16_ty],
- [IntrNoMem]>;
- def int_x86_avx512_pmovzxbd : GCCBuiltin<"__builtin_ia32_pmovzxbd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i8_ty],
- [IntrNoMem]>;
- def int_x86_avx512_pmovzxwq : GCCBuiltin<"__builtin_ia32_pmovzxwq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i16_ty],
- [IntrNoMem]>;
- def int_x86_avx512_pmovzxdq : GCCBuiltin<"__builtin_ia32_pmovzxdq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i32_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_pmovzxb_d_128 : GCCBuiltin<"__builtin_ia32_pmovzxbd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_d_256 : GCCBuiltin<"__builtin_ia32_pmovzxbd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v16i8_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_d_512 : GCCBuiltin<"__builtin_ia32_pmovzxbd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i8_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_q_128 : GCCBuiltin<"__builtin_ia32_pmovzxbq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_q_256 : GCCBuiltin<"__builtin_ia32_pmovzxbq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v16i8_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_q_512 : GCCBuiltin<"__builtin_ia32_pmovzxbq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v16i8_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_w_128 : GCCBuiltin<"__builtin_ia32_pmovzxbw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_w_256 : GCCBuiltin<"__builtin_ia32_pmovzxbw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i8_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxb_w_512 : GCCBuiltin<"__builtin_ia32_pmovzxbw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i8_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxd_q_128 : GCCBuiltin<"__builtin_ia32_pmovzxdq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxd_q_256 : GCCBuiltin<"__builtin_ia32_pmovzxdq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i32_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxd_q_512 : GCCBuiltin<"__builtin_ia32_pmovzxdq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i32_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxw_d_128 : GCCBuiltin<"__builtin_ia32_pmovzxwd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxw_d_256 : GCCBuiltin<"__builtin_ia32_pmovzxwd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i16_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxw_d_512 : GCCBuiltin<"__builtin_ia32_pmovzxwd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i16_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxw_q_128 : GCCBuiltin<"__builtin_ia32_pmovzxwq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxw_q_256 : GCCBuiltin<"__builtin_ia32_pmovzxwq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v8i16_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmovzxw_q_512 : GCCBuiltin<"__builtin_ia32_pmovzxwq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i16_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
-
-}
-
// Arithmetic ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_mask_add_ps_128 : GCCBuiltin<"__builtin_ia32_addps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_add_ps_256 : GCCBuiltin<"__builtin_ia32_addps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_add_ps_512 : GCCBuiltin<"__builtin_ia32_addps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_add_pd_128 : GCCBuiltin<"__builtin_ia32_addpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_add_pd_256 : GCCBuiltin<"__builtin_ia32_addpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_add_pd_512 : GCCBuiltin<"__builtin_ia32_addpd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_sub_ps_128 : GCCBuiltin<"__builtin_ia32_subps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_sub_ps_256 : GCCBuiltin<"__builtin_ia32_subps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_sub_ps_512 : GCCBuiltin<"__builtin_ia32_subps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_sub_pd_128 : GCCBuiltin<"__builtin_ia32_subpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_sub_pd_256 : GCCBuiltin<"__builtin_ia32_subpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_sub_pd_512 : GCCBuiltin<"__builtin_ia32_subpd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_mul_ps_128 : GCCBuiltin<"__builtin_ia32_mulps_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_mul_ps_256 : GCCBuiltin<"__builtin_ia32_mulps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_mul_ps_512 : GCCBuiltin<"__builtin_ia32_mulps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_mul_pd_128 : GCCBuiltin<"__builtin_ia32_mulpd_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_mul_pd_256 : GCCBuiltin<"__builtin_ia32_mulpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_mul_pd_512 : GCCBuiltin<"__builtin_ia32_mulpd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_div_ps_128 : GCCBuiltin<"__builtin_ia32_divps_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_div_ps_256 : GCCBuiltin<"__builtin_ia32_divps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_div_ps_512 : GCCBuiltin<"__builtin_ia32_divps512_mask">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_div_pd_128 : GCCBuiltin<"__builtin_ia32_divpd_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_div_pd_256 : GCCBuiltin<"__builtin_ia32_divpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_div_pd_512 : GCCBuiltin<"__builtin_ia32_divpd512_mask">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -5701,106 +5109,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
-def int_x86_avx512_psad_bw_512 : GCCBuiltin<"__builtin_ia32_psadbw512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
- [IntrNoMem]>;
-}
-// FP logical ops
-let TargetPrefix = "x86" in {
- def int_x86_avx512_mask_and_pd_128 : GCCBuiltin<"__builtin_ia32_andpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_and_pd_256 : GCCBuiltin<"__builtin_ia32_andpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_and_pd_512 : GCCBuiltin<"__builtin_ia32_andpd512_mask">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_and_ps_128 : GCCBuiltin<"__builtin_ia32_andps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_and_ps_256 : GCCBuiltin<"__builtin_ia32_andps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_and_ps_512 : GCCBuiltin<"__builtin_ia32_andps512_mask">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_andn_pd_128 : GCCBuiltin<"__builtin_ia32_andnpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_andn_pd_256 : GCCBuiltin<"__builtin_ia32_andnpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_andn_pd_512 : GCCBuiltin<"__builtin_ia32_andnpd512_mask">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_andn_ps_128 : GCCBuiltin<"__builtin_ia32_andnps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_andn_ps_256 : GCCBuiltin<"__builtin_ia32_andnps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_andn_ps_512 : GCCBuiltin<"__builtin_ia32_andnps512_mask">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_or_pd_128 : GCCBuiltin<"__builtin_ia32_orpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_or_pd_256 : GCCBuiltin<"__builtin_ia32_orpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_or_pd_512 : GCCBuiltin<"__builtin_ia32_orpd512_mask">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_or_ps_128 : GCCBuiltin<"__builtin_ia32_orps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_or_ps_256 : GCCBuiltin<"__builtin_ia32_orps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_or_ps_512 : GCCBuiltin<"__builtin_ia32_orps512_mask">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_xor_pd_128 : GCCBuiltin<"__builtin_ia32_xorpd128_mask">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_xor_pd_256 : GCCBuiltin<"__builtin_ia32_xorpd256_mask">,
- Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
- llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_xor_pd_512 : GCCBuiltin<"__builtin_ia32_xorpd512_mask">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
- llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_xor_ps_128 : GCCBuiltin<"__builtin_ia32_xorps128_mask">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_xor_ps_256 : GCCBuiltin<"__builtin_ia32_xorps256_mask">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
- llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_xor_ps_512 : GCCBuiltin<"__builtin_ia32_xorps512_mask">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
- llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_psad_bw_512 : GCCBuiltin<"__builtin_ia32_psadbw512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
+ [IntrNoMem, Commutative]>;
}
// Integer arithmetic ops
let TargetPrefix = "x86" in {
- def int_x86_avx512_mask_padd_b_128 : GCCBuiltin<"__builtin_ia32_paddb128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_b_256 : GCCBuiltin<"__builtin_ia32_paddb256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_b_512 : GCCBuiltin<"__builtin_ia32_paddb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_w_128 : GCCBuiltin<"__builtin_ia32_paddw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_w_256 : GCCBuiltin<"__builtin_ia32_paddw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_w_512 : GCCBuiltin<"__builtin_ia32_paddw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_padds_b_128 : GCCBuiltin<"__builtin_ia32_paddsb128_mask">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
@@ -5837,42 +5151,6 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_mask_paddus_w_512 : GCCBuiltin<"__builtin_ia32_paddusw512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_d_128 : GCCBuiltin<"__builtin_ia32_paddd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_d_256 : GCCBuiltin<"__builtin_ia32_paddd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_d_512 : GCCBuiltin<"__builtin_ia32_paddd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_q_128 : GCCBuiltin<"__builtin_ia32_paddq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_q_256 : GCCBuiltin<"__builtin_ia32_paddq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_padd_q_512 : GCCBuiltin<"__builtin_ia32_paddq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_b_128 : GCCBuiltin<"__builtin_ia32_psubb128_mask">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_b_256 : GCCBuiltin<"__builtin_ia32_psubb256_mask">,
- Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
- llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_b_512 : GCCBuiltin<"__builtin_ia32_psubb512_mask">,
- Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
- llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_w_128 : GCCBuiltin<"__builtin_ia32_psubw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_w_256 : GCCBuiltin<"__builtin_ia32_psubw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_w_512 : GCCBuiltin<"__builtin_ia32_psubw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_psubs_b_128 : GCCBuiltin<"__builtin_ia32_psubsb128_mask">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
@@ -5909,69 +5187,10 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_mask_psubus_w_512 : GCCBuiltin<"__builtin_ia32_psubusw512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_d_128 : GCCBuiltin<"__builtin_ia32_psubd128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_d_256 : GCCBuiltin<"__builtin_ia32_psubd256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_d_512 : GCCBuiltin<"__builtin_ia32_psubd512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_q_128 : GCCBuiltin<"__builtin_ia32_psubq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_q_256 : GCCBuiltin<"__builtin_ia32_psubq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_psub_q_512 : GCCBuiltin<"__builtin_ia32_psubq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmulu_dq_128 : GCCBuiltin<"__builtin_ia32_pmuludq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmul_dq_128 : GCCBuiltin<"__builtin_ia32_pmuldq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmulu_dq_256 : GCCBuiltin<"__builtin_ia32_pmuludq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmul_dq_256 : GCCBuiltin<"__builtin_ia32_pmuldq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_w_128 : GCCBuiltin<"__builtin_ia32_pmullw128_mask">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
- llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_w_256 : GCCBuiltin<"__builtin_ia32_pmullw256_mask">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
- llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_w_512 : GCCBuiltin<"__builtin_ia32_pmullw512_mask">,
- Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
- llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_d_128 : GCCBuiltin<"__builtin_ia32_pmulld128_mask">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_d_256 : GCCBuiltin<"__builtin_ia32_pmulld256_mask">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_d_512 : GCCBuiltin<"__builtin_ia32_pmulld512_mask">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_q_128 : GCCBuiltin<"__builtin_ia32_pmullq128_mask">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_q_256 : GCCBuiltin<"__builtin_ia32_pmullq256_mask">,
- Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pmull_q_512 : GCCBuiltin<"__builtin_ia32_pmullq512_mask">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -6103,7 +5322,7 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather3div2_di :
GCCBuiltin<"__builtin_ia32_gather3div2di">,
- Intrinsic<[llvm_v4i32_ty],
+ Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly]>;
@@ -6115,7 +5334,7 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather3div4_di :
GCCBuiltin<"__builtin_ia32_gather3div4di">,
- Intrinsic<[llvm_v8i32_ty],
+ Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly]>;
@@ -6151,7 +5370,7 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather3siv2_di :
GCCBuiltin<"__builtin_ia32_gather3siv2di">,
- Intrinsic<[llvm_v4i32_ty],
+ Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly]>;
@@ -6163,7 +5382,7 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather3siv4_di :
GCCBuiltin<"__builtin_ia32_gather3siv4di">,
- Intrinsic<[llvm_v8i32_ty],
+ Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly]>;
@@ -6414,44 +5633,6 @@ let TargetPrefix = "x86" in {
[IntrNoMem]>;
}
-let TargetPrefix = "x86" in {
- def int_x86_avx512_mask_valign_q_512 :
- GCCBuiltin<"__builtin_ia32_alignq512_mask">,
- Intrinsic<[llvm_v8i64_ty],
- [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty,
- llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_valign_d_512 :
- GCCBuiltin<"__builtin_ia32_alignd512_mask">,
- Intrinsic<[llvm_v16i32_ty],
- [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty,
- llvm_i16_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_valign_q_256 :
- GCCBuiltin<"__builtin_ia32_alignq256_mask">,
- Intrinsic<[llvm_v4i64_ty],
- [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty,
- llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_valign_d_256 :
- GCCBuiltin<"__builtin_ia32_alignd256_mask">,
- Intrinsic<[llvm_v8i32_ty],
- [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty,
- llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_valign_q_128 :
- GCCBuiltin<"__builtin_ia32_alignq128_mask">,
- Intrinsic<[llvm_v2i64_ty],
- [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty,
- llvm_i8_ty], [IntrNoMem]>;
-
- def int_x86_avx512_mask_valign_d_128 :
- GCCBuiltin<"__builtin_ia32_alignd128_mask">,
- Intrinsic<[llvm_v4i32_ty],
- [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty,
- llvm_i8_ty], [IntrNoMem]>;
-}
-
// Compares
let TargetPrefix = "x86" in {
// 512-bit
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index dbf2b4562332..7f43d5df3c3f 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -15,25 +15,30 @@
#ifndef LLVM_IR_LLVMCONTEXT_H
#define LLVM_IR_LLVMCONTEXT_H
+#include "llvm-c/Types.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Options.h"
+#include <cstdint>
+#include <memory>
+#include <string>
namespace llvm {
-class LLVMContextImpl;
-class StringRef;
-class Twine;
-class Instruction;
-class Module;
-class MDString;
-class DICompositeType;
-class SMDiagnostic;
class DiagnosticInfo;
enum DiagnosticSeverity : char;
-template <typename T> class SmallVectorImpl;
class Function;
-class DebugLoc;
+class Instruction;
+class LLVMContextImpl;
+class Module;
class OptBisect;
+template <typename T> class SmallVectorImpl;
+class SMDiagnostic;
+class StringRef;
+class Twine;
+
+namespace yaml {
+class Output;
+} // end namespace yaml
/// This is an important class for using LLVM in a threaded context. It
/// (opaquely) owns and manages the core "global" data of LLVM's core
@@ -44,6 +49,8 @@ class LLVMContext {
public:
LLVMContextImpl *const pImpl;
LLVMContext();
+ LLVMContext(LLVMContext &) = delete;
+ LLVMContext &operator=(const LLVMContext &) = delete;
~LLVMContext();
// Pinned metadata names, which always have the same value. This is a
@@ -69,6 +76,8 @@ public:
MD_align = 17, // "align"
MD_loop = 18, // "llvm.loop"
MD_type = 19, // "type"
+ MD_section_prefix = 20, // "section_prefix"
+ MD_absolute_symbol = 21, // "absolute_symbol"
};
/// Known operand bundle tag IDs, which always have the same value. All
@@ -181,6 +190,17 @@ public:
/// diagnostics.
void setDiagnosticHotnessRequested(bool Requested);
+ /// \brief Return the YAML file used by the backend to save optimization
+ /// diagnostics. If null, diagnostics are not saved in a file but only
+ /// emitted via the diagnostic handler.
+ yaml::Output *getDiagnosticsOutputFile();
+ /// Set the diagnostics output file used for optimization diagnostics.
+ ///
+ /// By default or if invoked with null, diagnostics are not saved in a file
+ /// but only emitted via the diagnostic handler. Even if an output file is
+ /// set, the handler is invoked for each diagnostic message.
+ void setDiagnosticsOutputFile(std::unique_ptr<yaml::Output> F);
+
/// \brief Get the prefix that should be printed in front of a diagnostic of
/// the given \p Severity
static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity);
@@ -244,8 +264,8 @@ public:
/// analysis.
OptBisect &getOptBisect();
private:
- LLVMContext(LLVMContext&) = delete;
- void operator=(LLVMContext&) = delete;
+ // Module needs access to the add/removeModule methods.
+ friend class Module;
/// addModule - Register a module as being instantiated in this context. If
/// the context is deleted, the module will be deleted as well.
@@ -253,9 +273,6 @@ private:
/// removeModule - Unregister a module from this context.
void removeModule(Module*);
-
- // Module needs access to the add/removeModule methods.
- friend class Module;
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
@@ -271,6 +288,6 @@ inline LLVMContextRef *wrap(const LLVMContext **Tys) {
return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys));
}
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_LLVMCONTEXT_H
diff --git a/include/llvm/IR/LegacyPassManagers.h b/include/llvm/IR/LegacyPassManagers.h
index 530fd7166498..b22f9302298d 100644
--- a/include/llvm/IR/LegacyPassManagers.h
+++ b/include/llvm/IR/LegacyPassManagers.h
@@ -486,9 +486,7 @@ public:
// Print passes managed by this manager
void dumpPassStructure(unsigned Offset) override;
- const char *getPassName() const override {
- return "Function Pass Manager";
- }
+ StringRef getPassName() const override { return "Function Pass Manager"; }
FunctionPass *getContainedPass(unsigned N) {
assert ( N < PassVector.size() && "Pass number out of range!");
diff --git a/include/llvm/IR/LegacyPassNameParser.h b/include/llvm/IR/LegacyPassNameParser.h
index 39ae80d797c7..fd9d468b06cb 100644
--- a/include/llvm/IR/LegacyPassNameParser.h
+++ b/include/llvm/IR/LegacyPassNameParser.h
@@ -60,20 +60,20 @@ public:
inline bool ignorablePass(const PassInfo *P) const {
// Ignore non-selectable and non-constructible passes! Ignore
// non-optimizations.
- return P->getPassArgument() == nullptr || *P->getPassArgument() == 0 ||
- P->getNormalCtor() == nullptr || ignorablePassImpl(P);
+ return P->getPassArgument().empty() || P->getNormalCtor() == nullptr ||
+ ignorablePassImpl(P);
}
// Implement the PassRegistrationListener callbacks used to populate our map
//
void passRegistered(const PassInfo *P) override {
if (ignorablePass(P)) return;
- if (findOption(P->getPassArgument()) != getNumOptions()) {
+ if (findOption(P->getPassArgument().data()) != getNumOptions()) {
errs() << "Two passes with the same argument (-"
<< P->getPassArgument() << ") attempted to be registered!\n";
llvm_unreachable(nullptr);
}
- addLiteralOption(P->getPassArgument(), P, P->getPassName());
+ addLiteralOption(P->getPassArgument().data(), P, P->getPassName().data());
}
void passEnumerate(const PassInfo *P) override { passRegistered(P); }
@@ -89,7 +89,7 @@ private:
// ValLessThan - Provide a sorting comparator for Values elements...
static int ValLessThan(const PassNameParser::OptionInfo *VT1,
const PassNameParser::OptionInfo *VT2) {
- return std::strcmp(VT1->Name, VT2->Name);
+ return VT1->Name < VT2->Name;
}
};
@@ -130,7 +130,7 @@ template<const char *Args>
class PassArgFilter {
public:
bool operator()(const PassInfo &P) const {
- return(std::strstr(Args, P.getPassArgument()));
+ return StringRef(Args).contains(P.getPassArgument());
}
};
diff --git a/include/llvm/IR/MDBuilder.h b/include/llvm/IR/MDBuilder.h
index 35341e3271ff..bab8728ed49f 100644
--- a/include/llvm/IR/MDBuilder.h
+++ b/include/llvm/IR/MDBuilder.h
@@ -66,6 +66,9 @@ public:
/// Return metadata containing the entry count for a function.
MDNode *createFunctionEntryCount(uint64_t Count);
+ /// Return metadata containing the section prefix for a function.
+ MDNode *createFunctionSectionPrefix(StringRef Prefix);
+
//===------------------------------------------------------------------===//
// Range metadata.
//===------------------------------------------------------------------===//
diff --git a/include/llvm/IR/Mangler.h b/include/llvm/IR/Mangler.h
index 349218e33817..0eb91a3b0600 100644
--- a/include/llvm/IR/Mangler.h
+++ b/include/llvm/IR/Mangler.h
@@ -29,12 +29,7 @@ class Mangler {
/// This keeps track of the number we give to anonymous ones.
mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;
- /// This simple counter is used to unique value names.
- mutable unsigned NextAnonGlobalID;
-
public:
- Mangler() : NextAnonGlobalID(1) {}
-
/// Print the appropriate prefix and the specified global variable's name.
/// If the global variable doesn't have a name, this fills in a unique name
/// for the global.
diff --git a/include/llvm/IR/Metadata.def b/include/llvm/IR/Metadata.def
index 607f5ef125c9..03cdcab7dc47 100644
--- a/include/llvm/IR/Metadata.def
+++ b/include/llvm/IR/Metadata.def
@@ -82,6 +82,7 @@ HANDLE_MDNODE_BRANCH(MDNode)
HANDLE_MDNODE_LEAF_UNIQUABLE(MDTuple)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocation)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIExpression)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariableExpression)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DINode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(GenericDINode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubrange)
diff --git a/include/llvm/IR/Metadata.h b/include/llvm/IR/Metadata.h
index 91f43d342d27..46c785a1c05d 100644
--- a/include/llvm/IR/Metadata.h
+++ b/include/llvm/IR/Metadata.h
@@ -18,19 +18,30 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
#include <type_traits>
+#include <utility>
namespace llvm {
-class LLVMContext;
class Module;
class ModuleSlotTracker;
@@ -69,6 +80,7 @@ protected:
: SubclassID(ID), Storage(Storage), SubclassData16(0), SubclassData32(0) {
static_assert(sizeof(*this) == 8, "Metdata fields poorly packed");
}
+
~Metadata() = default;
/// \brief Default handling of a changed operand, which asserts.
@@ -263,6 +275,7 @@ private:
public:
ReplaceableMetadataImpl(LLVMContext &Context)
: Context(Context), NextIndex(0) {}
+
~ReplaceableMetadataImpl() {
assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata");
}
@@ -325,6 +338,7 @@ protected:
: Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) {
assert(V && "Expected valid value");
}
+
~ValueAsMetadata() = default;
public:
@@ -378,6 +392,7 @@ public:
static ConstantAsMetadata *get(Constant *C) {
return ValueAsMetadata::getConstant(C);
}
+
static ConstantAsMetadata *getIfExists(Constant *C) {
return ValueAsMetadata::getConstantIfExists(C);
}
@@ -403,6 +418,7 @@ public:
static LocalAsMetadata *get(Value *Local) {
return ValueAsMetadata::getLocal(Local);
}
+
static LocalAsMetadata *getIfExists(Value *Local) {
return ValueAsMetadata::getLocalIfExists(Local);
}
@@ -463,6 +479,7 @@ public:
namespace mdconst {
namespace detail {
+
template <class T> T &make();
template <class T, class Result> struct HasDereference {
typedef char Yes[1];
@@ -484,6 +501,7 @@ template <class V, class M> struct IsValidReference {
static const bool value = std::is_base_of<Constant, V>::value &&
std::is_convertible<M, const Metadata &>::value;
};
+
} // end namespace detail
/// \brief Check whether Metadata has a Value.
@@ -568,14 +586,14 @@ dyn_extract_or_null(Y &&MD) {
class MDString : public Metadata {
friend class StringMapEntry<MDString>;
- MDString(const MDString &) = delete;
- MDString &operator=(MDString &&) = delete;
- MDString &operator=(const MDString &) = delete;
-
StringMapEntry<MDString> *Entry;
MDString() : Metadata(MDStringKind, Uniqued), Entry(nullptr) {}
public:
+ MDString(const MDString &) = delete;
+ MDString &operator=(MDString &&) = delete;
+ MDString &operator=(const MDString &) = delete;
+
static MDString *get(LLVMContext &Context, StringRef Str);
static MDString *get(LLVMContext &Context, const char *Str) {
return get(Context, Str ? StringRef(Str) : StringRef());
@@ -634,15 +652,18 @@ struct DenseMapInfo<AAMDNodes> {
return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(),
nullptr, nullptr);
}
+
static inline AAMDNodes getTombstoneKey() {
return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(),
nullptr, nullptr);
}
+
static unsigned getHashValue(const AAMDNodes &Val) {
return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^
DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^
DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias);
}
+
static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) {
return LHS == RHS;
}
@@ -656,15 +677,14 @@ struct DenseMapInfo<AAMDNodes> {
///
/// In particular, this is used by \a MDNode.
class MDOperand {
+ Metadata *MD = nullptr;
+
+public:
+ MDOperand() = default;
MDOperand(MDOperand &&) = delete;
MDOperand(const MDOperand &) = delete;
MDOperand &operator=(MDOperand &&) = delete;
MDOperand &operator=(const MDOperand &) = delete;
-
- Metadata *MD;
-
-public:
- MDOperand() : MD(nullptr) {}
~MDOperand() { untrack(); }
Metadata *get() const { return MD; }
@@ -691,6 +711,7 @@ private:
MetadataTracking::track(MD);
}
}
+
void untrack() {
assert(static_cast<void *>(this) == &MD && "Expected same address");
if (MD)
@@ -715,13 +736,6 @@ template <> struct simplify_type<const MDOperand> {
class ContextAndReplaceableUses {
PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr;
- ContextAndReplaceableUses() = delete;
- ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete;
- ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete;
- ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete;
- ContextAndReplaceableUses &
- operator=(const ContextAndReplaceableUses &) = delete;
-
public:
ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {}
ContextAndReplaceableUses(
@@ -729,6 +743,12 @@ public:
: Ptr(ReplaceableUses.release()) {
assert(getReplaceableUses() && "Expected non-null replaceable uses");
}
+ ContextAndReplaceableUses() = delete;
+ ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete;
+ ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete;
+ ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete;
+ ContextAndReplaceableUses &
+ operator=(const ContextAndReplaceableUses &) = delete;
~ContextAndReplaceableUses() { delete getReplaceableUses(); }
operator LLVMContext &() { return getContext(); }
@@ -737,11 +757,13 @@ public:
bool hasReplaceableUses() const {
return Ptr.is<ReplaceableMetadataImpl *>();
}
+
LLVMContext &getContext() const {
if (hasReplaceableUses())
return getReplaceableUses()->getContext();
return *Ptr.get<LLVMContext *>();
}
+
ReplaceableMetadataImpl *getReplaceableUses() const {
if (hasReplaceableUses())
return Ptr.get<ReplaceableMetadataImpl *>();
@@ -809,10 +831,6 @@ class MDNode : public Metadata {
friend class ReplaceableMetadataImpl;
friend class LLVMContextImpl;
- MDNode(const MDNode &) = delete;
- void operator=(const MDNode &) = delete;
- void *operator new(size_t) = delete;
-
unsigned NumOperands;
unsigned NumUnresolved;
@@ -847,6 +865,10 @@ protected:
}
public:
+ MDNode(const MDNode &) = delete;
+ void operator=(const MDNode &) = delete;
+ void *operator new(size_t) = delete;
+
static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs);
static inline MDTuple *getIfExists(LLVMContext &Context,
ArrayRef<Metadata *> MDs);
@@ -1002,9 +1024,11 @@ public:
op_iterator op_begin() const {
return const_cast<MDNode *>(this)->mutable_begin();
}
+
op_iterator op_end() const {
return const_cast<MDNode *>(this)->mutable_end();
}
+
op_range operands() const { return op_range(op_begin(), op_end()); }
const MDOperand &getOperand(unsigned I) const {
@@ -1054,6 +1078,7 @@ class MDTuple : public MDNode {
: MDNode(C, MDTupleKind, Storage, Vals) {
setHash(Hash);
}
+
~MDTuple() { dropAllReferences(); }
void setHash(unsigned Hash) { SubclassData32 = Hash; }
@@ -1074,6 +1099,7 @@ public:
static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
return getImpl(Context, MDs, Uniqued);
}
+
static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false);
}
@@ -1106,12 +1132,15 @@ public:
MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
return MDTuple::get(Context, MDs);
}
+
MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
return MDTuple::getIfExists(Context, MDs);
}
+
MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
return MDTuple::getDistinct(Context, MDs);
}
+
TempMDTuple MDNode::getTemporary(LLVMContext &Context,
ArrayRef<Metadata *> MDs) {
return MDTuple::getTemporary(Context, MDs);
@@ -1133,16 +1162,20 @@ class TypedMDOperandIterator
public:
TypedMDOperandIterator() = default;
explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {}
+
T *operator*() const { return cast_or_null<T>(*I); }
+
TypedMDOperandIterator &operator++() {
++I;
return *this;
}
+
TypedMDOperandIterator operator++(int) {
TypedMDOperandIterator Temp(*this);
++I;
return Temp;
}
+
bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; }
bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; }
};
@@ -1213,16 +1246,16 @@ class DistinctMDOperandPlaceholder : public Metadata {
Metadata **Use = nullptr;
- DistinctMDOperandPlaceholder() = delete;
- DistinctMDOperandPlaceholder(DistinctMDOperandPlaceholder &&) = delete;
- DistinctMDOperandPlaceholder(const DistinctMDOperandPlaceholder &) = delete;
-
public:
explicit DistinctMDOperandPlaceholder(unsigned ID)
: Metadata(DistinctMDOperandPlaceholderKind, Distinct) {
SubclassData32 = ID;
}
+ DistinctMDOperandPlaceholder() = delete;
+ DistinctMDOperandPlaceholder(DistinctMDOperandPlaceholder &&) = delete;
+ DistinctMDOperandPlaceholder(const DistinctMDOperandPlaceholder &) = delete;
+
~DistinctMDOperandPlaceholder() {
if (Use)
*Use = nullptr;
@@ -1247,10 +1280,8 @@ public:
///
/// TODO: Inherit from Metadata.
class NamedMDNode : public ilist_node<NamedMDNode> {
- friend struct ilist_traits<NamedMDNode>;
friend class LLVMContextImpl;
friend class Module;
- NamedMDNode(const NamedMDNode &) = delete;
std::string Name;
Module *Parent;
@@ -1263,30 +1294,35 @@ class NamedMDNode : public ilist_node<NamedMDNode> {
template<class T1, class T2>
class op_iterator_impl :
public std::iterator<std::bidirectional_iterator_tag, T2> {
- const NamedMDNode *Node;
- unsigned Idx;
+ const NamedMDNode *Node = nullptr;
+ unsigned Idx = 0;
+
op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) { }
friend class NamedMDNode;
public:
- op_iterator_impl() : Node(nullptr), Idx(0) { }
+ op_iterator_impl() = default;
bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; }
bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; }
+
op_iterator_impl &operator++() {
++Idx;
return *this;
}
+
op_iterator_impl operator++(int) {
op_iterator_impl tmp(*this);
operator++();
return tmp;
}
+
op_iterator_impl &operator--() {
--Idx;
return *this;
}
+
op_iterator_impl operator--(int) {
op_iterator_impl tmp(*this);
operator--();
@@ -1297,13 +1333,16 @@ class NamedMDNode : public ilist_node<NamedMDNode> {
};
public:
+ NamedMDNode(const NamedMDNode &) = delete;
+ ~NamedMDNode();
+
/// \brief Drop all references and remove the node from parent module.
void eraseFromParent();
- /// \brief Remove all uses and clear node vector.
- void dropAllReferences();
-
- ~NamedMDNode();
+ /// Remove all uses and clear node vector.
+ void dropAllReferences() { clearOperands(); }
+ /// Drop all references to this node's operands.
+ void clearOperands();
/// \brief Get the module that holds this named metadata collection.
inline Module *getParent() { return Parent; }
@@ -1338,6 +1377,6 @@ public:
}
};
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_IR_METADATA_H
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index 632b27e2d0dd..79870b9455a6 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -26,36 +26,18 @@
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/DataTypes.h"
-#include <system_error>
namespace llvm {
template <typename T> class Optional;
+class Error;
class FunctionType;
class GVMaterializer;
class LLVMContext;
+class MemoryBuffer;
class RandomNumberGenerator;
class StructType;
template <class PtrType> class SmallPtrSetImpl;
-template<> struct ilist_traits<NamedMDNode>
- : public ilist_default_traits<NamedMDNode> {
- // createSentinel is used to get hold of a node that marks the end of
- // the list...
- NamedMDNode *createSentinel() const {
- return static_cast<NamedMDNode*>(&Sentinel);
- }
- static void destroySentinel(NamedMDNode*) {}
-
- NamedMDNode *provideInitialHead() const { return createSentinel(); }
- NamedMDNode *ensureHead(NamedMDNode*) const { return createSentinel(); }
- static void noteHead(NamedMDNode*, NamedMDNode*) {}
- void addNodeToList(NamedMDNode *) {}
- void removeNodeFromList(NamedMDNode *) {}
-
-private:
- mutable ilist_node<NamedMDNode> Sentinel;
-};
-
/// A Module instance is used to store all the information related to an
/// LLVM module. Modules are the top level container of all other LLVM
/// Intermediate Representation (IR) objects. Each module directly contains a
@@ -177,6 +159,9 @@ private:
std::string GlobalScopeAsm; ///< Inline Asm at global scope.
ValueSymbolTable *ValSymTab; ///< Symbol table for values
ComdatSymTabType ComdatSymTab; ///< Symbol table for COMDATs
+ std::unique_ptr<MemoryBuffer>
+ OwnedMemoryBuffer; ///< Memory buffer directly owned by this
+ ///< module, for legacy clients only.
std::unique_ptr<GVMaterializer>
Materializer; ///< Used to materialize GlobalValues
std::string ModuleID; ///< Human readable identifier for the module
@@ -469,16 +454,14 @@ public:
GVMaterializer *getMaterializer() const { return Materializer.get(); }
bool isMaterialized() const { return !getMaterializer(); }
- /// Make sure the GlobalValue is fully read. If the module is corrupt, this
- /// returns true and fills in the optional string with information about the
- /// problem. If successful, this returns false.
- std::error_code materialize(GlobalValue *GV);
+ /// Make sure the GlobalValue is fully read.
+ llvm::Error materialize(GlobalValue *GV);
/// Make sure all GlobalValues in this Module are fully read and clear the
/// Materializer.
- std::error_code materializeAll();
+ llvm::Error materializeAll();
- std::error_code materializeMetadata();
+ llvm::Error materializeMetadata();
/// @}
/// @name Direct access to the globals list, functions list, and symbol table
@@ -603,73 +586,33 @@ public:
return make_range(ifunc_begin(), ifunc_end());
}
-/// @}
-/// @name Convenience iterators
-/// @{
-
- template <bool IsConst> class global_object_iterator_t {
- friend Module;
-
- typename std::conditional<IsConst, const_iterator, iterator>::type
- function_i,
- function_e;
- typename std::conditional<IsConst, const_global_iterator,
- global_iterator>::type global_i;
-
- typedef
- typename std::conditional<IsConst, const Module, Module>::type ModuleTy;
-
- global_object_iterator_t(ModuleTy &M)
- : function_i(M.begin()), function_e(M.end()),
- global_i(M.global_begin()) {}
- global_object_iterator_t(ModuleTy &M, int)
- : function_i(M.end()), function_e(M.end()), global_i(M.global_end()) {}
-
- public:
- global_object_iterator_t &operator++() {
- if (function_i != function_e)
- ++function_i;
- else
- ++global_i;
- return *this;
- }
-
- typename std::conditional<IsConst, const GlobalObject, GlobalObject>::type &
- operator*() const {
- if (function_i != function_e)
- return *function_i;
- else
- return *global_i;
- }
-
- bool operator!=(const global_object_iterator_t &other) const {
- return function_i != other.function_i || global_i != other.global_i;
- }
- };
+ /// @}
+ /// @name Convenience iterators
+ /// @{
- typedef global_object_iterator_t</*IsConst=*/false> global_object_iterator;
- typedef global_object_iterator_t</*IsConst=*/true>
+ typedef concat_iterator<GlobalObject, iterator, global_iterator>
+ global_object_iterator;
+ typedef concat_iterator<const GlobalObject, const_iterator,
+ const_global_iterator>
const_global_object_iterator;
- global_object_iterator global_object_begin() {
- return global_object_iterator(*this);
+ iterator_range<global_object_iterator> global_objects() {
+ return concat<GlobalObject>(functions(), globals());
}
- global_object_iterator global_object_end() {
- return global_object_iterator(*this, 0);
+ iterator_range<const_global_object_iterator> global_objects() const {
+ return concat<const GlobalObject>(functions(), globals());
}
- const_global_object_iterator global_object_begin() const {
- return const_global_object_iterator(*this);
- }
- const_global_object_iterator global_object_end() const {
- return const_global_object_iterator(*this, 0);
+ global_object_iterator global_object_begin() {
+ return global_objects().begin();
}
+ global_object_iterator global_object_end() { return global_objects().end(); }
- iterator_range<global_object_iterator> global_objects() {
- return make_range(global_object_begin(), global_object_end());
+ const_global_object_iterator global_object_begin() const {
+ return global_objects().begin();
}
- iterator_range<const_global_object_iterator> global_objects() const {
- return make_range(global_object_begin(), global_object_end());
+ const_global_object_iterator global_object_end() const {
+ return global_objects().end();
}
/// @}
@@ -821,6 +764,9 @@ public:
/// \brief Returns profile summary metadata
Metadata *getProfileSummary();
/// @}
+
+ /// Take ownership of the given memory buffer.
+ void setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB);
};
/// \brief Given "llvm.used" or "llvm.compiler.used" as a global name, collect
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index 45d9bf7af706..2cfe673d970f 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -30,23 +30,19 @@ namespace llvm {
/// \brief Class to accumulate and hold information about a callee.
struct CalleeInfo {
- /// The static number of callsites calling corresponding function.
- unsigned CallsiteCount;
- /// The cumulative profile count of calls to corresponding function
- /// (if using PGO, otherwise 0).
- uint64_t ProfileCount;
- CalleeInfo() : CallsiteCount(0), ProfileCount(0) {}
- CalleeInfo(unsigned CallsiteCount, uint64_t ProfileCount)
- : CallsiteCount(CallsiteCount), ProfileCount(ProfileCount) {}
- CalleeInfo &operator+=(uint64_t RHSProfileCount) {
- CallsiteCount++;
- ProfileCount += RHSProfileCount;
- return *this;
+ enum class HotnessType : uint8_t { Unknown = 0, Cold = 1, None = 2, Hot = 3 };
+ HotnessType Hotness = HotnessType::Unknown;
+
+ CalleeInfo() = default;
+ explicit CalleeInfo(HotnessType Hotness) : Hotness(Hotness) {}
+
+ void updateHotness(const HotnessType OtherHotness) {
+ Hotness = std::max(Hotness, OtherHotness);
}
};
-/// Struct to hold value either by GUID or Value*, depending on whether this
-/// is a combined or per-module index, respectively.
+/// Struct to hold value either by GUID or GlobalValue*. Values in combined
+/// indexes as well as indirect calls are GUIDs, all others are GlobalValues.
struct ValueInfo {
/// The value representation used in this instance.
enum ValueInfoKind {
@@ -57,9 +53,9 @@ struct ValueInfo {
/// Union of the two possible value types.
union ValueUnion {
GlobalValue::GUID Id;
- const Value *V;
+ const GlobalValue *GV;
ValueUnion(GlobalValue::GUID Id) : Id(Id) {}
- ValueUnion(const Value *V) : V(V) {}
+ ValueUnion(const GlobalValue *GV) : GV(GV) {}
};
/// The value being represented.
@@ -68,29 +64,45 @@ struct ValueInfo {
ValueInfoKind Kind;
/// Constructor for a GUID value
ValueInfo(GlobalValue::GUID Id = 0) : TheValue(Id), Kind(VI_GUID) {}
- /// Constructor for a Value* value
- ValueInfo(const Value *V) : TheValue(V), Kind(VI_Value) {}
+ /// Constructor for a GlobalValue* value
+ ValueInfo(const GlobalValue *V) : TheValue(V), Kind(VI_Value) {}
/// Accessor for GUID value
GlobalValue::GUID getGUID() const {
assert(Kind == VI_GUID && "Not a GUID type");
return TheValue.Id;
}
- /// Accessor for Value* value
- const Value *getValue() const {
+ /// Accessor for GlobalValue* value
+ const GlobalValue *getValue() const {
assert(Kind == VI_Value && "Not a Value type");
- return TheValue.V;
+ return TheValue.GV;
}
bool isGUID() const { return Kind == VI_GUID; }
};
+template <> struct DenseMapInfo<ValueInfo> {
+ static inline ValueInfo getEmptyKey() { return ValueInfo((GlobalValue *)-1); }
+ static inline ValueInfo getTombstoneKey() {
+ return ValueInfo((GlobalValue *)-2);
+ }
+ static bool isEqual(ValueInfo L, ValueInfo R) {
+ if (L.isGUID() != R.isGUID())
+ return false;
+ return L.isGUID() ? (L.getGUID() == R.getGUID())
+ : (L.getValue() == R.getValue());
+ }
+ static unsigned getHashValue(ValueInfo I) {
+ return I.isGUID() ? I.getGUID() : (uintptr_t)I.getValue();
+ }
+};
+
/// \brief Function and variable summary information to aid decisions and
/// implementation of importing.
class GlobalValueSummary {
public:
/// \brief Sububclass discriminator (for dyn_cast<> et al.)
- enum SummaryKind { AliasKind, FunctionKind, GlobalVarKind };
+ enum SummaryKind : unsigned { AliasKind, FunctionKind, GlobalVarKind };
- /// Group flags (Linkage, hasSection, isOptSize, etc.) as a bitfield.
+ /// Group flags (Linkage, noRename, isOptSize, etc.) as a bitfield.
struct GVFlags {
/// \brief The linkage type of the associated global value.
///
@@ -101,20 +113,47 @@ public:
/// types based on global summary-based analysis.
unsigned Linkage : 4;
- /// Indicate if the global value is located in a specific section.
- unsigned HasSection : 1;
+ /// Indicate if the global value cannot be renamed (in a specific section,
+ /// possibly referenced from inline assembly, etc).
+ unsigned NoRename : 1;
+
+ /// Indicate if a function contains inline assembly (which is opaque),
+ /// that may reference a local value. This is used to prevent importing
+ /// of this function, since we can't promote and rename the uses of the
+ /// local in the inline assembly. Use a flag rather than bloating the
+ /// summary with references to every possible local value in the
+ /// llvm.used set.
+ unsigned HasInlineAsmMaybeReferencingInternal : 1;
+
+ /// Indicate if the function is not viable to inline.
+ unsigned IsNotViableToInline : 1;
/// Convenience Constructors
- explicit GVFlags(GlobalValue::LinkageTypes Linkage, bool HasSection)
- : Linkage(Linkage), HasSection(HasSection) {}
+ explicit GVFlags(GlobalValue::LinkageTypes Linkage, bool NoRename,
+ bool HasInlineAsmMaybeReferencingInternal,
+ bool IsNotViableToInline)
+ : Linkage(Linkage), NoRename(NoRename),
+ HasInlineAsmMaybeReferencingInternal(
+ HasInlineAsmMaybeReferencingInternal),
+ IsNotViableToInline(IsNotViableToInline) {}
+
GVFlags(const GlobalValue &GV)
- : Linkage(GV.getLinkage()), HasSection(GV.hasSection()) {}
+ : Linkage(GV.getLinkage()), NoRename(GV.hasSection()),
+ HasInlineAsmMaybeReferencingInternal(false) {
+ IsNotViableToInline = false;
+ if (const auto *F = dyn_cast<Function>(&GV))
+ // Inliner doesn't handle variadic functions.
+ // FIXME: refactor this to use the same code that inliner is using.
+ IsNotViableToInline = F->isVarArg();
+ }
};
private:
/// Kind of summary for use in dyn_cast<> et al.
SummaryKind Kind;
+ GVFlags Flags;
+
/// This is the hash of the name of the symbol in the original file. It is
/// identical to the GUID for global symbols, but differs for local since the
/// GUID includes the module level id in the hash.
@@ -129,8 +168,6 @@ private:
/// module path string table.
StringRef ModulePath;
- GVFlags Flags;
-
/// List of values referenced by this global value's definition
/// (either by the initializer of a global variable, or referenced
/// from within a function). This does not include functions called, which
@@ -139,7 +176,8 @@ private:
protected:
/// GlobalValueSummary constructor.
- GlobalValueSummary(SummaryKind K, GVFlags Flags) : Kind(K), Flags(Flags) {}
+ GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs)
+ : Kind(K), Flags(Flags), RefEdgeList(std::move(Refs)) {}
public:
virtual ~GlobalValueSummary() = default;
@@ -175,31 +213,34 @@ public:
Flags.Linkage = Linkage;
}
+ bool isNotViableToInline() const { return Flags.IsNotViableToInline; }
+
/// Return true if this summary is for a GlobalValue that needs promotion
/// to be referenced from another module.
bool needsRenaming() const { return GlobalValue::isLocalLinkage(linkage()); }
- /// Return true if this global value is located in a specific section.
- bool hasSection() const { return Flags.HasSection; }
+ /// Return true if this global value cannot be renamed (in a specific section,
+ /// possibly referenced from inline assembly, etc).
+ bool noRename() const { return Flags.NoRename; }
- /// Record a reference from this global value to the global value identified
- /// by \p RefGUID.
- void addRefEdge(GlobalValue::GUID RefGUID) { RefEdgeList.push_back(RefGUID); }
+ /// Flag that this global value cannot be renamed (in a specific section,
+ /// possibly referenced from inline assembly, etc).
+ void setNoRename() { Flags.NoRename = true; }
- /// Record a reference from this global value to the global value identified
- /// by \p RefV.
- void addRefEdge(const Value *RefV) { RefEdgeList.push_back(RefV); }
+ /// Return true if this global value possibly references another value
+ /// that can't be renamed.
+ bool hasInlineAsmMaybeReferencingInternal() const {
+ return Flags.HasInlineAsmMaybeReferencingInternal;
+ }
- /// Record a reference from this global value to each global value identified
- /// in \p RefEdges.
- void addRefEdges(DenseSet<const Value *> &RefEdges) {
- for (auto &RI : RefEdges)
- addRefEdge(RI);
+ /// Flag that this global value possibly references another value that
+ /// can't be renamed.
+ void setHasInlineAsmMaybeReferencingInternal() {
+ Flags.HasInlineAsmMaybeReferencingInternal = true;
}
/// Return the list of values referenced by this global value definition.
- std::vector<ValueInfo> &refs() { return RefEdgeList; }
- const std::vector<ValueInfo> &refs() const { return RefEdgeList; }
+ ArrayRef<ValueInfo> refs() const { return RefEdgeList; }
};
/// \brief Alias summary information.
@@ -208,7 +249,8 @@ class AliasSummary : public GlobalValueSummary {
public:
/// Summary constructors.
- AliasSummary(GVFlags Flags) : GlobalValueSummary(AliasKind, Flags) {}
+ AliasSummary(GVFlags Flags, std::vector<ValueInfo> Refs)
+ : GlobalValueSummary(AliasKind, Flags, std::move(Refs)) {}
/// Check if this is an alias summary.
static bool classof(const GlobalValueSummary *GVS) {
@@ -242,10 +284,17 @@ private:
/// List of <CalleeValueInfo, CalleeInfo> call edge pairs from this function.
std::vector<EdgeTy> CallGraphEdgeList;
+ /// List of type identifiers used by this function, represented as GUIDs.
+ std::vector<GlobalValue::GUID> TypeIdList;
+
public:
/// Summary constructors.
- FunctionSummary(GVFlags Flags, unsigned NumInsts)
- : GlobalValueSummary(FunctionKind, Flags), InstCount(NumInsts) {}
+ FunctionSummary(GVFlags Flags, unsigned NumInsts, std::vector<ValueInfo> Refs,
+ std::vector<EdgeTy> CGEdges,
+ std::vector<GlobalValue::GUID> TypeIds)
+ : GlobalValueSummary(FunctionKind, Flags, std::move(Refs)),
+ InstCount(NumInsts), CallGraphEdgeList(std::move(CGEdges)),
+ TypeIdList(std::move(TypeIds)) {}
/// Check if this is a function summary.
static bool classof(const GlobalValueSummary *GVS) {
@@ -255,38 +304,11 @@ public:
/// Get the instruction count recorded for this function.
unsigned instCount() const { return InstCount; }
- /// Record a call graph edge from this function to the function identified
- /// by \p CalleeGUID, with \p CalleeInfo including the cumulative profile
- /// count (across all calls from this function) or 0 if no PGO.
- void addCallGraphEdge(GlobalValue::GUID CalleeGUID, CalleeInfo Info) {
- CallGraphEdgeList.push_back(std::make_pair(CalleeGUID, Info));
- }
-
- /// Record a call graph edge from this function to each function GUID recorded
- /// in \p CallGraphEdges.
- void
- addCallGraphEdges(DenseMap<GlobalValue::GUID, CalleeInfo> &CallGraphEdges) {
- for (auto &EI : CallGraphEdges)
- addCallGraphEdge(EI.first, EI.second);
- }
-
- /// Record a call graph edge from this function to the function identified
- /// by \p CalleeV, with \p CalleeInfo including the cumulative profile
- /// count (across all calls from this function) or 0 if no PGO.
- void addCallGraphEdge(const Value *CalleeV, CalleeInfo Info) {
- CallGraphEdgeList.push_back(std::make_pair(CalleeV, Info));
- }
-
- /// Record a call graph edge from this function to each function recorded
- /// in \p CallGraphEdges.
- void addCallGraphEdges(DenseMap<const Value *, CalleeInfo> &CallGraphEdges) {
- for (auto &EI : CallGraphEdges)
- addCallGraphEdge(EI.first, EI.second);
- }
-
/// Return the list of <CalleeValueInfo, CalleeInfo> pairs.
- std::vector<EdgeTy> &calls() { return CallGraphEdgeList; }
- const std::vector<EdgeTy> &calls() const { return CallGraphEdgeList; }
+ ArrayRef<EdgeTy> calls() const { return CallGraphEdgeList; }
+
+ /// Returns the list of type identifiers used by this function.
+ ArrayRef<GlobalValue::GUID> type_tests() const { return TypeIdList; }
};
/// \brief Global variable summary information to aid decisions and
@@ -299,7 +321,8 @@ class GlobalVarSummary : public GlobalValueSummary {
public:
/// Summary constructors.
- GlobalVarSummary(GVFlags Flags) : GlobalValueSummary(GlobalVarKind, Flags) {}
+ GlobalVarSummary(GVFlags Flags, std::vector<ValueInfo> Refs)
+ : GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)) {}
/// Check if this is a global variable summary.
static bool classof(const GlobalValueSummary *GVS) {
@@ -348,13 +371,6 @@ private:
ModulePathStringTableTy ModulePathStringTable;
public:
- ModuleSummaryIndex() = default;
-
- // Disable the copy constructor and assignment operators, so
- // no unexpected copying/moving occurs.
- ModuleSummaryIndex(const ModuleSummaryIndex &) = delete;
- void operator=(const ModuleSummaryIndex &) = delete;
-
gvsummary_iterator begin() { return GlobalValueMap.begin(); }
const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
gvsummary_iterator end() { return GlobalValueMap.end(); }
diff --git a/include/llvm/IR/NoFolder.h b/include/llvm/IR/NoFolder.h
index 61f4817a9b62..def07ffe2ff6 100644
--- a/include/llvm/IR/NoFolder.h
+++ b/include/llvm/IR/NoFolder.h
@@ -24,6 +24,8 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
namespace llvm {
@@ -31,7 +33,7 @@ namespace llvm {
/// NoFolder - Create "constants" (actually, instructions) with no folding.
class NoFolder {
public:
- explicit NoFolder() {}
+ explicit NoFolder() = default;
//===--------------------------------------------------------------------===//
// Binary Operators
@@ -44,15 +46,19 @@ public:
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
+
Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNSWAdd(LHS, RHS);
}
+
Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNUWAdd(LHS, RHS);
}
+
Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFAdd(LHS, RHS);
}
+
Instruction *CreateSub(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
BinaryOperator *BO = BinaryOperator::CreateSub(LHS, RHS);
@@ -60,15 +66,19 @@ public:
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
+
Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNSWSub(LHS, RHS);
}
+
Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNUWSub(LHS, RHS);
}
+
Instruction *CreateFSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFSub(LHS, RHS);
}
+
Instruction *CreateMul(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
BinaryOperator *BO = BinaryOperator::CreateMul(LHS, RHS);
@@ -76,45 +86,57 @@ public:
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
+
Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNSWMul(LHS, RHS);
}
+
Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNUWMul(LHS, RHS);
}
+
Instruction *CreateFMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFMul(LHS, RHS);
}
+
Instruction *CreateUDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const {
if (!isExact)
return BinaryOperator::CreateUDiv(LHS, RHS);
return BinaryOperator::CreateExactUDiv(LHS, RHS);
}
+
Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateExactUDiv(LHS, RHS);
}
+
Instruction *CreateSDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const {
if (!isExact)
return BinaryOperator::CreateSDiv(LHS, RHS);
return BinaryOperator::CreateExactSDiv(LHS, RHS);
}
+
Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateExactSDiv(LHS, RHS);
}
+
Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFDiv(LHS, RHS);
}
+
Instruction *CreateURem(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateURem(LHS, RHS);
}
+
Instruction *CreateSRem(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateSRem(LHS, RHS);
}
+
Instruction *CreateFRem(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFRem(LHS, RHS);
}
+
Instruction *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
bool HasNSW = false) const {
BinaryOperator *BO = BinaryOperator::CreateShl(LHS, RHS);
@@ -122,24 +144,29 @@ public:
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
+
Instruction *CreateLShr(Constant *LHS, Constant *RHS,
bool isExact = false) const {
if (!isExact)
return BinaryOperator::CreateLShr(LHS, RHS);
return BinaryOperator::CreateExactLShr(LHS, RHS);
}
+
Instruction *CreateAShr(Constant *LHS, Constant *RHS,
bool isExact = false) const {
if (!isExact)
return BinaryOperator::CreateAShr(LHS, RHS);
return BinaryOperator::CreateExactAShr(LHS, RHS);
}
+
Instruction *CreateAnd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateAnd(LHS, RHS);
}
+
Instruction *CreateOr(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateOr(LHS, RHS);
}
+
Instruction *CreateXor(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateXor(LHS, RHS);
}
@@ -160,15 +187,19 @@ public:
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
+
Instruction *CreateNSWNeg(Constant *C) const {
return BinaryOperator::CreateNSWNeg(C);
}
+
Instruction *CreateNUWNeg(Constant *C) const {
return BinaryOperator::CreateNUWNeg(C);
}
+
Instruction *CreateFNeg(Constant *C) const {
return BinaryOperator::CreateFNeg(C);
}
+
Instruction *CreateNot(Constant *C) const {
return BinaryOperator::CreateNot(C);
}
@@ -181,12 +212,14 @@ public:
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
}
+
Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
return ConstantExpr::getGetElementPtr(Ty, C, Idx);
}
+
Instruction *CreateGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) const {
return GetElementPtrInst::Create(Ty, C, IdxList);
@@ -196,6 +229,7 @@ public:
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
}
+
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
Constant *Idx) const {
// This form of the function only exists to avoid ambiguous overload
@@ -203,6 +237,7 @@ public:
// ArrayRef<Value *>.
return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
}
+
Instruction *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) const {
return GetElementPtrInst::CreateInBounds(Ty, C, IdxList);
@@ -216,13 +251,16 @@ public:
Type *DestTy) const {
return CastInst::Create(Op, C, DestTy);
}
+
Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
return CastInst::CreatePointerCast(C, DestTy);
}
+
Instruction *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const {
return CastInst::CreateIntegerCast(C, DestTy, isSigned);
}
+
Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
return CastInst::CreateFPCast(C, DestTy);
}
@@ -230,15 +268,19 @@ public:
Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
}
+
Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
+
Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
+
Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
return CastInst::CreateZExtOrBitCast(C, DestTy);
}
+
Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
return CastInst::CreateSExtOrBitCast(C, DestTy);
}
@@ -255,6 +297,7 @@ public:
Constant *LHS, Constant *RHS) const {
return new ICmpInst(P, LHS, RHS);
}
+
Instruction *CreateFCmp(CmpInst::Predicate P,
Constant *LHS, Constant *RHS) const {
return new FCmpInst(P, LHS, RHS);
@@ -294,6 +337,6 @@ public:
}
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_NOFOLDER_H
diff --git a/include/llvm/IR/Operator.h b/include/llvm/IR/Operator.h
index 5880290f3d99..444ce93921f6 100644
--- a/include/llvm/IR/Operator.h
+++ b/include/llvm/IR/Operator.h
@@ -15,28 +15,22 @@
#ifndef LLVM_IR_OPERATOR_H
#define LLVM_IR_OPERATOR_H
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cstddef>
namespace llvm {
-class GetElementPtrInst;
-class BinaryOperator;
-class ConstantExpr;
-
/// This is a utility class that provides an abstraction for the common
/// functionality between Instructions and ConstantExprs.
class Operator : public User {
-private:
- // The Operator class is intended to be used as a utility, and is never itself
- // instantiated.
- void *operator new(size_t, unsigned) = delete;
- void *operator new(size_t s) = delete;
- Operator() = delete;
-
protected:
// NOTE: Cannot use = delete because it's not legal to delete
// an overridden method that's not deleted in the base class. Cannot leave
@@ -44,6 +38,13 @@ protected:
~Operator() override;
public:
+ // The Operator class is intended to be used as a utility, and is never itself
+ // instantiated.
+ Operator() = delete;
+
+ void *operator new(size_t, unsigned) = delete;
+ void *operator new(size_t s) = delete;
+
/// Return the opcode for this Instruction or ConstantExpr.
unsigned getOpcode() const {
if (const Instruction *I = dyn_cast<Instruction>(this))
@@ -81,6 +82,7 @@ public:
private:
friend class Instruction;
friend class ConstantExpr;
+
void setHasNoUnsignedWrap(bool B) {
SubclassOptionalData =
(SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
@@ -132,6 +134,7 @@ public:
private:
friend class Instruction;
friend class ConstantExpr;
+
void setIsExact(bool B) {
SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B * IsExact);
}
@@ -148,6 +151,7 @@ public:
OpC == Instruction::AShr ||
OpC == Instruction::LShr;
}
+
static inline bool classof(const ConstantExpr *CE) {
return isPossiblyExactOpcode(CE->getOpcode());
}
@@ -164,7 +168,9 @@ public:
class FastMathFlags {
private:
friend class FPMathOperator;
- unsigned Flags;
+
+ unsigned Flags = 0;
+
FastMathFlags(unsigned F) : Flags(F) { }
public:
@@ -176,8 +182,7 @@ public:
AllowReciprocal = (1 << 4)
};
- FastMathFlags() : Flags(0)
- { }
+ FastMathFlags() = default;
/// Whether any flag is set
bool any() const { return Flags != 0; }
@@ -210,7 +215,6 @@ public:
}
};
-
/// Utility class for floating point operations which can have
/// information about relaxed accuracy requirements attached to them.
class FPMathOperator : public Operator {
@@ -230,21 +234,25 @@ private:
setHasAllowReciprocal(true);
}
}
+
void setHasNoNaNs(bool B) {
SubclassOptionalData =
(SubclassOptionalData & ~FastMathFlags::NoNaNs) |
(B * FastMathFlags::NoNaNs);
}
+
void setHasNoInfs(bool B) {
SubclassOptionalData =
(SubclassOptionalData & ~FastMathFlags::NoInfs) |
(B * FastMathFlags::NoInfs);
}
+
void setHasNoSignedZeros(bool B) {
SubclassOptionalData =
(SubclassOptionalData & ~FastMathFlags::NoSignedZeros) |
(B * FastMathFlags::NoSignedZeros);
}
+
void setHasAllowReciprocal(bool B) {
SubclassOptionalData =
(SubclassOptionalData & ~FastMathFlags::AllowReciprocal) |
@@ -299,9 +307,9 @@ public:
return FastMathFlags(SubclassOptionalData);
}
- /// \brief Get the maximum error permitted by this operation in ULPs. An
- /// accuracy of 0.0 means that the operation should be performed with the
- /// default precision.
+ /// Get the maximum error permitted by this operation in ULPs. An accuracy of
+ /// 0.0 means that the operation should be performed with the default
+ /// precision.
float getFPAccuracy() const;
static inline bool classof(const Instruction *I) {
@@ -313,7 +321,6 @@ public:
}
};
-
/// A helper template for defining operators for individual opcodes.
template<typename SuperClass, unsigned Opc>
class ConcreteOperator : public SuperClass {
@@ -343,7 +350,6 @@ class ShlOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
};
-
class SDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
};
@@ -357,18 +363,18 @@ class LShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
};
-
class ZExtOperator : public ConcreteOperator<Operator, Instruction::ZExt> {};
-
class GEPOperator
: public ConcreteOperator<Operator, Instruction::GetElementPtr> {
+ friend class GetElementPtrInst;
+ friend class ConstantExpr;
+
enum {
- IsInBounds = (1 << 0)
+ IsInBounds = (1 << 0),
+ // InRangeIndex: bits 1-6
};
- friend class GetElementPtrInst;
- friend class ConstantExpr;
void setIsInBounds(bool B) {
SubclassOptionalData =
(SubclassOptionalData & ~IsInBounds) | (B * IsInBounds);
@@ -380,6 +386,13 @@ public:
return SubclassOptionalData & IsInBounds;
}
+ /// Returns the offset of the index with an inrange attachment, or None if
+ /// none.
+ Optional<unsigned> getInRangeIndex() const {
+ if (SubclassOptionalData >> 1 == 0) return None;
+ return (SubclassOptionalData >> 1) - 1;
+ }
+
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
inline op_iterator idx_end() { return op_end(); }
@@ -463,6 +476,7 @@ public:
const Value *getPointerOperand() const {
return getOperand(0);
}
+
static unsigned getPointerOperandIndex() {
return 0U; // get index for modifying correct operand
}
@@ -493,6 +507,6 @@ public:
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_OPERATOR_H
diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h
index 402d04a54a41..3e4edd893d3c 100644
--- a/include/llvm/IR/PassManager.h
+++ b/include/llvm/IR/PassManager.h
@@ -41,6 +41,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManagerInternal.h"
@@ -54,50 +55,113 @@
namespace llvm {
-/// \brief An abstract set of preserved analyses following a transformation pass
-/// run.
+/// A special type used by analysis passes to provide an address that
+/// identifies that particular analysis pass type.
///
-/// When a transformation pass is run, it can return a set of analyses whose
-/// results were preserved by that transformation. The default set is "none",
-/// and preserving analyses must be done explicitly.
+/// Analysis passes should have a static data member of this type and derive
+/// from the \c AnalysisInfoMixin to get a static ID method used to identify
+/// the analysis in the pass management infrastructure.
+struct alignas(8) AnalysisKey {};
+
+/// A special type used to provide an address that identifies a set of related
+/// analyses.
+///
+/// These sets are primarily used below to mark sets of analyses as preserved.
+/// An example would be analyses depending only on the CFG of a function.
+/// A transformation can mark that it is preserving the CFG of a function and
+/// then analyses can check for this rather than each transform having to fully
+/// enumerate every analysis preserved.
+struct alignas(8) AnalysisSetKey {};
+
+/// Class for tracking what analyses are preserved after a transformation pass
+/// runs over some unit of IR.
+///
+/// Transformation passes build and return these objects when run over the IR
+/// to communicate which analyses remain valid afterward. For most passes this
+/// is fairly simple: if they don't change anything all analyses are preserved,
+/// otherwise only a short list of analyses that have been explicitly updated
+/// are preserved.
+///
+/// This class also provides the ability to mark abstract *sets* of analyses as
+/// preserved. These sets allow passes to indicate that they preserve broad
+/// aspects of the IR (such as its CFG) and analyses to opt in to that being
+/// sufficient without the passes having to fully enumerate such analyses.
+///
+/// Finally, this class can represent "abandoning" an analysis, which marks it
+/// as not-preserved even if it would be covered by some abstract set of
+/// analyses.
///
-/// There is also an explicit all state which can be used (for example) when
-/// the IR is not mutated at all.
+/// Given a `PreservedAnalyses` object, an analysis will typically want to
+/// figure out whether it is preserved. In the example below, MyAnalysisType is
+/// preserved if it's not abandoned, and (a) it's explicitly marked as
+/// preserved, (b), the set AllAnalysesOn<MyIRUnit> is preserved, or (c) both
+/// AnalysisSetA and AnalysisSetB are preserved.
+///
+/// ```
+/// auto PAC = PA.getChecker<MyAnalysisType>();
+/// if (PAC.preserved() || PAC.preservedSet<AllAnalysesOn<MyIRUnit>>() ||
+/// (PAC.preservedSet<AnalysisSetA>() &&
+/// PAC.preservedSet<AnalysisSetB>())) {
+/// // The analysis has been successfully preserved ...
+/// }
+/// ```
class PreservedAnalyses {
public:
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- PreservedAnalyses() {}
- PreservedAnalyses(const PreservedAnalyses &Arg)
- : PreservedPassIDs(Arg.PreservedPassIDs) {}
- PreservedAnalyses(PreservedAnalyses &&Arg)
- : PreservedPassIDs(std::move(Arg.PreservedPassIDs)) {}
- friend void swap(PreservedAnalyses &LHS, PreservedAnalyses &RHS) {
- using std::swap;
- swap(LHS.PreservedPassIDs, RHS.PreservedPassIDs);
- }
- PreservedAnalyses &operator=(PreservedAnalyses RHS) {
- swap(*this, RHS);
- return *this;
- }
-
/// \brief Convenience factory function for the empty preserved set.
static PreservedAnalyses none() { return PreservedAnalyses(); }
/// \brief Construct a special preserved set that preserves all passes.
static PreservedAnalyses all() {
PreservedAnalyses PA;
- PA.PreservedPassIDs.insert((void *)AllPassesID);
+ PA.PreservedIDs.insert(&AllAnalysesKey);
return PA;
}
- /// \brief Mark a particular pass as preserved, adding it to the set.
- template <typename PassT> void preserve() { preserve(PassT::ID()); }
+ /// Mark an analysis as preserved.
+ template <typename AnalysisT> void preserve() { preserve(AnalysisT::ID()); }
- /// \brief Mark an abstract PassID as preserved, adding it to the set.
- void preserve(void *PassID) {
+ /// Mark an analysis as preserved using its ID.
+ void preserve(AnalysisKey *ID) {
+ // Clear this ID from the explicit not-preserved set if present.
+ NotPreservedAnalysisIDs.erase(ID);
+
+ // If we're not already preserving all analyses (other than those in
+ // NotPreservedAnalysisIDs).
+ if (!areAllPreserved())
+ PreservedIDs.insert(ID);
+ }
+
+ /// Mark an analysis set as preserved.
+ template <typename AnalysisSetT> void preserveSet() {
+ preserveSet(AnalysisSetT::ID());
+ }
+
+ /// Mark an analysis set as preserved using its ID.
+ void preserveSet(AnalysisSetKey *ID) {
+ // If we're not already in the saturated 'all' state, add this set.
if (!areAllPreserved())
- PreservedPassIDs.insert(PassID);
+ PreservedIDs.insert(ID);
+ }
+
+ /// Mark an analysis as abandoned.
+ ///
+ /// An abandoned analysis is not preserved, even if it is nominally covered
+ /// by some other set or was previously explicitly marked as preserved.
+ ///
+ /// Note that you can only abandon a specific analysis, not a *set* of
+ /// analyses.
+ template <typename AnalysisT> void abandon() { abandon(AnalysisT::ID()); }
+
+ /// Mark an analysis as abandoned using its ID.
+ ///
+ /// An abandoned analysis is not preserved, even if it is nominally covered
+ /// by some other set or was previously explicitly marked as preserved.
+ ///
+ /// Note that you can only abandon a specific analysis, not a *set* of
+ /// analyses.
+ void abandon(AnalysisKey *ID) {
+ PreservedIDs.erase(ID);
+ NotPreservedAnalysisIDs.insert(ID);
}
/// \brief Intersect this set with another in place.
@@ -108,12 +172,18 @@ public:
if (Arg.areAllPreserved())
return;
if (areAllPreserved()) {
- PreservedPassIDs = Arg.PreservedPassIDs;
+ *this = Arg;
return;
}
- for (void *P : PreservedPassIDs)
- if (!Arg.PreservedPassIDs.count(P))
- PreservedPassIDs.erase(P);
+ // The intersection requires the *union* of the explicitly not-preserved
+ // IDs and the *intersection* of the preserved IDs.
+ for (auto ID : Arg.NotPreservedAnalysisIDs) {
+ PreservedIDs.erase(ID);
+ NotPreservedAnalysisIDs.insert(ID);
+ }
+ for (auto ID : PreservedIDs)
+ if (!Arg.PreservedIDs.count(ID))
+ PreservedIDs.erase(ID);
}
/// \brief Intersect this set with a temporary other set in place.
@@ -124,54 +194,115 @@ public:
if (Arg.areAllPreserved())
return;
if (areAllPreserved()) {
- PreservedPassIDs = std::move(Arg.PreservedPassIDs);
+ *this = std::move(Arg);
return;
}
- for (void *P : PreservedPassIDs)
- if (!Arg.PreservedPassIDs.count(P))
- PreservedPassIDs.erase(P);
+ // The intersection requires the *union* of the explicitly not-preserved
+ // IDs and the *intersection* of the preserved IDs.
+ for (auto ID : Arg.NotPreservedAnalysisIDs) {
+ PreservedIDs.erase(ID);
+ NotPreservedAnalysisIDs.insert(ID);
+ }
+ for (auto ID : PreservedIDs)
+ if (!Arg.PreservedIDs.count(ID))
+ PreservedIDs.erase(ID);
}
- /// \brief Query whether a pass is marked as preserved by this set.
- template <typename PassT> bool preserved() const {
- return preserved(PassT::ID());
- }
+ /// A checker object that makes it easy to query for whether an analysis or
+ /// some set covering it is preserved.
+ class PreservedAnalysisChecker {
+ friend class PreservedAnalyses;
+
+ const PreservedAnalyses &PA;
+ AnalysisKey *const ID;
+ const bool IsAbandoned;
- /// \brief Query whether an abstract pass ID is marked as preserved by this
- /// set.
- bool preserved(void *PassID) const {
- return PreservedPassIDs.count((void *)AllPassesID) ||
- PreservedPassIDs.count(PassID);
+ /// A PreservedAnalysisChecker is tied to a particular Analysis because
+ /// `preserved()` and `preservedSet()` both return false if the Analysis
+ /// was abandoned.
+ PreservedAnalysisChecker(const PreservedAnalyses &PA, AnalysisKey *ID)
+ : PA(PA), ID(ID), IsAbandoned(PA.NotPreservedAnalysisIDs.count(ID)) {}
+
+ public:
+ /// Returns true if the checker's analysis was not abandoned and the
+ /// analysis is either is explicitly preserved or all analyses are
+ /// preserved.
+ bool preserved() {
+ return !IsAbandoned && (PA.PreservedIDs.count(&AllAnalysesKey) ||
+ PA.PreservedIDs.count(ID));
+ }
+
+ /// Returns true if the checker's analysis was not abandoned and either the
+ /// provided set type is either explicitly preserved or all analyses are
+ /// preserved.
+ template <typename AnalysisSetT> bool preservedSet() {
+ AnalysisSetKey *SetID = AnalysisSetT::ID();
+ return !IsAbandoned && (PA.PreservedIDs.count(&AllAnalysesKey) ||
+ PA.PreservedIDs.count(SetID));
+ }
+ };
+
+ /// Build a checker for this `PreservedAnalyses` and the specified analysis
+ /// type.
+ ///
+ /// You can use the returned object to query whether an analysis was
+ /// preserved. See the example in the comment on `PreservedAnalysis`.
+ template <typename AnalysisT> PreservedAnalysisChecker getChecker() const {
+ return PreservedAnalysisChecker(*this, AnalysisT::ID());
}
- /// \brief Query whether all of the analyses in the set are preserved.
- bool preserved(PreservedAnalyses Arg) {
- if (Arg.areAllPreserved())
- return areAllPreserved();
- for (void *P : Arg.PreservedPassIDs)
- if (!preserved(P))
- return false;
- return true;
+ /// Build a checker for this `PreservedAnalyses` and the specified analysis
+ /// ID.
+ ///
+ /// You can use the returned object to query whether an analysis was
+ /// preserved. See the example in the comment on `PreservedAnalysis`.
+ PreservedAnalysisChecker getChecker(AnalysisKey *ID) const {
+ return PreservedAnalysisChecker(*this, ID);
}
- /// \brief Test whether all passes are preserved.
+ /// Test whether all analyses are preserved (and none are abandoned).
///
- /// This is used primarily to optimize for the case of no changes which will
- /// common in many scenarios.
+ /// This lets analyses optimize for the common case where a transformation
+ /// made no changes to the IR.
bool areAllPreserved() const {
- return PreservedPassIDs.count((void *)AllPassesID);
+ return NotPreservedAnalysisIDs.empty() &&
+ PreservedIDs.count(&AllAnalysesKey);
+ }
+
+ /// Directly test whether a set of analyses is preserved.
+ ///
+ /// This is only true when no analyses have been explicitly abandoned.
+ template <typename AnalysisSetT> bool allAnalysesInSetPreserved() const {
+ return allAnalysesInSetPreserved(AnalysisSetT::ID());
+ }
+
+ /// Directly test whether a set of analyses is preserved.
+ ///
+ /// This is only true when no analyses have been explicitly abandoned.
+ bool allAnalysesInSetPreserved(AnalysisSetKey *SetID) const {
+ return NotPreservedAnalysisIDs.empty() &&
+ (PreservedIDs.count(&AllAnalysesKey) || PreservedIDs.count(SetID));
}
private:
- // Note that this must not be -1 or -2 as those are already used by the
- // SmallPtrSet.
- static const uintptr_t AllPassesID = (intptr_t)(-3);
+ /// A special key used to indicate all analyses.
+ static AnalysisSetKey AllAnalysesKey;
- SmallPtrSet<void *, 2> PreservedPassIDs;
+ /// The IDs of analyses and analysis sets that are preserved.
+ SmallPtrSet<void *, 2> PreservedIDs;
+
+ /// The IDs of explicitly not-preserved analyses.
+ ///
+ /// If an analysis in this set is covered by a set in `PreservedIDs`, we
+ /// consider it not-preserved. That is, `NotPreservedAnalysisIDs` always
+ /// "wins" over analysis sets in `PreservedIDs`.
+ ///
+ /// Also, a given ID should never occur both here and in `PreservedIDs`.
+ SmallPtrSet<AnalysisKey *, 2> NotPreservedAnalysisIDs;
};
// Forward declare the analysis manager template.
-template <typename IRUnitT> class AnalysisManager;
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
/// A CRTP mix-in to automatically provide informational APIs needed for
/// passes.
@@ -195,10 +326,14 @@ template <typename DerivedT> struct PassInfoMixin {
/// specifically used for analyses.
template <typename DerivedT>
struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
- /// Returns an opaque, unique ID for this pass type.
+ /// Returns an opaque, unique ID for this analysis type.
+ ///
+ /// This ID is a pointer type that is guaranteed to be 8-byte aligned and
+ /// thus suitable for use in sets, maps, and other data structures optimized
+ /// for pointer-like types using the alignment-provided low bits.
///
- /// Note that this requires the derived type provide a static member whose
- /// address can be converted to a void pointer.
+ /// Note that this requires the derived type provide a static \c AnalysisKey
+ /// member called \c Key.
///
/// FIXME: The only reason the derived type needs to provide this rather than
/// this mixin providing it is due to broken implementations which cannot
@@ -207,10 +342,35 @@ struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
/// instantiation. The only currently known platform with this limitation are
/// Windows DLL builds, specifically building each part of LLVM as a DLL. If
/// we ever remove that build configuration, this mixin can provide the
- /// static PassID as well.
- static void *ID() { return (void *)&DerivedT::PassID; }
+ /// static key as well.
+ static AnalysisKey *ID() { return &DerivedT::Key; }
+};
+
+/// A class template to provide analysis sets for IR units.
+///
+/// Analyses operate on units of IR. It is useful to be able to talk about
+/// preservation of all analyses for a given unit of IR as a set. This class
+/// template can be used with the \c PreservedAnalyses API for that purpose and
+/// the \c AnalysisManager will automatically check and use this set to skip
+/// invalidation events.
+///
+/// Note that you must provide an explicit instantiation declaration and
+/// definition for this template in order to get the correct behavior on
+/// Windows. Otherwise, the address of SetKey will not be stable.
+template <typename IRUnitT>
+class AllAnalysesOn {
+public:
+ static AnalysisSetKey *ID() { return &SetKey; }
+
+private:
+ static AnalysisSetKey SetKey;
};
+template <typename IRUnitT> AnalysisSetKey AllAnalysesOn<IRUnitT>::SetKey;
+
+extern template class AllAnalysesOn<Module>;
+extern template class AllAnalysesOn<Function>;
+
/// \brief Manages a sequence of passes over units of IR.
///
/// A pass manager contains a sequence of passes to run over units of IR. It is
@@ -222,15 +382,21 @@ struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
/// that analysis manager to each pass it runs, as well as calling the analysis
/// manager's invalidation routine with the PreservedAnalyses of each pass it
/// runs.
-template <typename IRUnitT>
-class PassManager : public PassInfoMixin<PassManager<IRUnitT>> {
+template <typename IRUnitT,
+ typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+ typename... ExtraArgTs>
+class PassManager : public PassInfoMixin<
+ PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...>> {
public:
/// \brief Construct a pass manager.
///
/// It can be passed a flag to get debug logging as the passes are run.
- PassManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
+ explicit PassManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+
+ // FIXME: These are equivalent to the default move constructor/move
+ // assignment. However, using = default triggers linker errors due to the
+ // explicit instantiations below. Find away to use the default and remove the
+ // duplicated code here.
PassManager(PassManager &&Arg)
: Passes(std::move(Arg.Passes)),
DebugLogging(std::move(Arg.DebugLogging)) {}
@@ -241,7 +407,8 @@ public:
}
/// \brief Run all of the passes in this manager over the IR.
- PreservedAnalyses run(IRUnitT &IR, AnalysisManager<IRUnitT> &AM) {
+ PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
+ ExtraArgTs... ExtraArgs) {
PreservedAnalyses PA = PreservedAnalyses::all();
if (DebugLogging)
@@ -252,16 +419,14 @@ public:
dbgs() << "Running pass: " << Passes[Idx]->name() << " on "
<< IR.getName() << "\n";
- PreservedAnalyses PassPA = Passes[Idx]->run(IR, AM);
+ PreservedAnalyses PassPA = Passes[Idx]->run(IR, AM, ExtraArgs...);
// Update the analysis manager as each pass runs and potentially
- // invalidates analyses. We also update the preserved set of analyses
- // based on what analyses we have already handled the invalidation for
- // here and don't need to invalidate when finished.
- PassPA = AM.invalidate(IR, std::move(PassPA));
+ // invalidates analyses.
+ AM.invalidate(IR, PassPA);
- // Finally, we intersect the final preserved analyses to compute the
- // aggregate preserved set for this pass manager.
+ // Finally, we intersect the preserved analyses to compute the aggregate
+ // preserved set for this pass manager.
PA.intersect(std::move(PassPA));
// FIXME: Historically, the pass managers all called the LLVM context's
@@ -271,6 +436,12 @@ public:
//IR.getContext().yield();
}
+ // Invaliadtion was handled after each pass in the above loop for the
+ // current unit of IR. Therefore, the remaining analysis results in the
+ // AnalysisManager are preserved. We mark this with a set so that we don't
+ // need to inspect each one individually.
+ PA.preserveSet<AllAnalysesOn<IRUnitT>>();
+
if (DebugLogging)
dbgs() << "Finished " << getTypeName<IRUnitT>() << " pass manager run.\n";
@@ -278,15 +449,15 @@ public:
}
template <typename PassT> void addPass(PassT Pass) {
- typedef detail::PassModel<IRUnitT, PassT> PassModelT;
+ typedef detail::PassModel<IRUnitT, PassT, PreservedAnalyses,
+ AnalysisManagerT, ExtraArgTs...>
+ PassModelT;
Passes.emplace_back(new PassModelT(std::move(Pass)));
}
private:
- typedef detail::PassConcept<IRUnitT> PassConceptT;
-
- PassManager(const PassManager &) = delete;
- PassManager &operator=(const PassManager &) = delete;
+ typedef detail::PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...>
+ PassConceptT;
std::vector<std::unique_ptr<PassConceptT>> Passes;
@@ -302,63 +473,190 @@ extern template class PassManager<Function>;
/// \brief Convenience typedef for a pass manager over functions.
typedef PassManager<Function> FunctionPassManager;
-namespace detail {
-
-/// \brief A CRTP base used to implement analysis managers.
-///
-/// This class template serves as the boiler plate of an analysis manager. Any
-/// analysis manager can be implemented on top of this base class. Any
-/// implementation will be required to provide specific hooks:
-///
-/// - getResultImpl
-/// - getCachedResultImpl
-/// - invalidateImpl
-///
-/// The details of the call pattern are within.
+/// \brief A generic analysis pass manager with lazy running and caching of
+/// results.
///
-/// Note that there is also a generic analysis manager template which implements
-/// the above required functions along with common datastructures used for
-/// managing analyses. This base class is factored so that if you need to
-/// customize the handling of a specific IR unit, you can do so without
-/// replicating *all* of the boilerplate.
-template <typename DerivedT, typename IRUnitT> class AnalysisManagerBase {
- DerivedT *derived_this() { return static_cast<DerivedT *>(this); }
- const DerivedT *derived_this() const {
- return static_cast<const DerivedT *>(this);
+/// This analysis manager can be used for any IR unit where the address of the
+/// IR unit sufficies as its identity. It manages the cache for a unit of IR via
+/// the address of each unit of IR cached.
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager {
+public:
+ class Invalidator;
+
+private:
+ // Now that we've defined our invalidator, we can build types for the concept
+ // types.
+ typedef detail::AnalysisResultConcept<IRUnitT, PreservedAnalyses, Invalidator>
+ ResultConceptT;
+ typedef detail::AnalysisPassConcept<IRUnitT, PreservedAnalyses, Invalidator,
+ ExtraArgTs...>
+ PassConceptT;
+
+ /// \brief List of function analysis pass IDs and associated concept pointers.
+ ///
+ /// Requires iterators to be valid across appending new entries and arbitrary
+ /// erases. Provides the analysis ID to enable finding iterators to a given entry
+ /// in maps below, and provides the storage for the actual result concept.
+ typedef std::list<std::pair<AnalysisKey *, std::unique_ptr<ResultConceptT>>>
+ AnalysisResultListT;
+
+ /// \brief Map type from IRUnitT pointer to our custom list type.
+ typedef DenseMap<IRUnitT *, AnalysisResultListT> AnalysisResultListMapT;
+
+ /// \brief Map type from a pair of analysis ID and IRUnitT pointer to an
+ /// iterator into a particular result list which is where the actual result
+ /// is stored.
+ typedef DenseMap<std::pair<AnalysisKey *, IRUnitT *>,
+ typename AnalysisResultListT::iterator>
+ AnalysisResultMapT;
+
+public:
+ /// API to communicate dependencies between analyses during invalidation.
+ ///
+ /// When an analysis result embeds handles to other analysis results, it
+ /// needs to be invalidated both when its own information isn't preserved and
+ /// if any of those embedded analysis results end up invalidated. We pass in
+ /// an \c Invalidator object from the analysis manager in order to let the
+ /// analysis results themselves define the dependency graph on the fly. This
+ /// avoids building an explicit data structure representation of the
+ /// dependencies between analysis results.
+ class Invalidator {
+ public:
+ /// Trigger the invalidation of some other analysis pass if not already
+ /// handled and return whether it will in fact be invalidated.
+ ///
+ /// This is expected to be called from within a given analysis result's \c
+ /// invalidate method to trigger a depth-first walk of all inter-analysis
+ /// dependencies. The same \p IR unit and \p PA passed to that result's \c
+ /// invalidate method should in turn be provided to this routine.
+ ///
+ /// The first time this is called for a given analysis pass, it will
+ /// trigger the corresponding result's \c invalidate method to be called.
+ /// Subsequent calls will use a cache of the results of that initial call.
+ /// It is an error to form cyclic dependencies between analysis results.
+ ///
+ /// This returns true if the given analysis pass's result is invalid and
+ /// any dependecies on it will become invalid as a result.
+ template <typename PassT>
+ bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
+ typedef detail::AnalysisResultModel<IRUnitT, PassT,
+ typename PassT::Result,
+ PreservedAnalyses, Invalidator>
+ ResultModelT;
+ return invalidateImpl<ResultModelT>(PassT::ID(), IR, PA);
+ }
+
+ /// A type-erased variant of the above invalidate method with the same core
+ /// API other than passing an analysis ID rather than an analysis type
+ /// parameter.
+ ///
+ /// This is sadly less efficient than the above routine, which leverages
+ /// the type parameter to avoid the type erasure overhead.
+ bool invalidate(AnalysisKey *ID, IRUnitT &IR, const PreservedAnalyses &PA) {
+ return invalidateImpl<>(ID, IR, PA);
+ }
+
+ private:
+ friend class AnalysisManager;
+
+ template <typename ResultT = ResultConceptT>
+ bool invalidateImpl(AnalysisKey *ID, IRUnitT &IR,
+ const PreservedAnalyses &PA) {
+ // If we've already visited this pass, return true if it was invalidated
+ // and false otherwise.
+ auto IMapI = IsResultInvalidated.find(ID);
+ if (IMapI != IsResultInvalidated.end())
+ return IMapI->second;
+
+ // Otherwise look up the result object.
+ auto RI = Results.find({ID, &IR});
+ assert(RI != Results.end() &&
+ "Trying to invalidate a dependent result that isn't in the "
+ "manager's cache is always an error, likely due to a stale result "
+ "handle!");
+
+ auto &Result = static_cast<ResultT &>(*RI->second->second);
+
+ // Insert into the map whether the result should be invalidated and
+ // return that. Note that we cannot re-use IMapI and must do a fresh
+ // insert here as calling the invalidate routine could (recursively)
+ // insert things into the map making any iterator or reference invalid.
+ bool Inserted;
+ std::tie(IMapI, Inserted) =
+ IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, *this)});
+ (void)Inserted;
+ assert(Inserted && "Should not have already inserted this ID, likely "
+ "indicates a dependency cycle!");
+ return IMapI->second;
+ }
+
+ Invalidator(SmallDenseMap<AnalysisKey *, bool, 8> &IsResultInvalidated,
+ const AnalysisResultMapT &Results)
+ : IsResultInvalidated(IsResultInvalidated), Results(Results) {}
+
+ SmallDenseMap<AnalysisKey *, bool, 8> &IsResultInvalidated;
+ const AnalysisResultMapT &Results;
+ };
+
+ /// \brief Construct an empty analysis manager.
+ ///
+ /// A flag can be passed to indicate that the manager should perform debug
+ /// logging.
+ AnalysisManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+ AnalysisManager(AnalysisManager &&) = default;
+ AnalysisManager &operator=(AnalysisManager &&) = default;
+
+ /// \brief Returns true if the analysis manager has an empty results cache.
+ bool empty() const {
+ assert(AnalysisResults.empty() == AnalysisResultLists.empty() &&
+ "The storage and index of analysis results disagree on how many "
+ "there are!");
+ return AnalysisResults.empty();
}
- AnalysisManagerBase(const AnalysisManagerBase &) = delete;
- AnalysisManagerBase &operator=(const AnalysisManagerBase &) = delete;
+ /// \brief Clear any results for a single unit of IR.
+ ///
+ /// This doesn't invalidate but directly clears the results. It is useful
+ /// when the IR is being removed and we want to clear out all the memory
+ /// pinned for it.
+ void clear(IRUnitT &IR) {
+ if (DebugLogging)
+ dbgs() << "Clearing all analysis results for: " << IR.getName() << "\n";
-protected:
- typedef detail::AnalysisResultConcept<IRUnitT> ResultConceptT;
- typedef detail::AnalysisPassConcept<IRUnitT> PassConceptT;
+ auto ResultsListI = AnalysisResultLists.find(&IR);
+ if (ResultsListI == AnalysisResultLists.end())
+ return;
+ // Clear the map pointing into the results list.
+ for (auto &IDAndResult : ResultsListI->second)
+ AnalysisResults.erase({IDAndResult.first, &IR});
- // FIXME: Provide template aliases for the models when we're using C++11 in
- // a mode supporting them.
+ // And actually destroy and erase the results associated with this IR.
+ AnalysisResultLists.erase(ResultsListI);
+ }
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- AnalysisManagerBase() {}
- AnalysisManagerBase(AnalysisManagerBase &&Arg)
- : AnalysisPasses(std::move(Arg.AnalysisPasses)) {}
- AnalysisManagerBase &operator=(AnalysisManagerBase &&RHS) {
- AnalysisPasses = std::move(RHS.AnalysisPasses);
- return *this;
+ /// \brief Clear the analysis result cache.
+ ///
+ /// This routine allows cleaning up when the set of IR units itself has
+ /// potentially changed, and thus we can't even look up a a result and
+ /// invalidate it directly. Notably, this does *not* call invalidate
+ /// functions as there is nothing to be done for them.
+ void clear() {
+ AnalysisResults.clear();
+ AnalysisResultLists.clear();
}
-public:
/// \brief Get the result of an analysis pass for this module.
///
/// If there is not a valid cached result in the manager already, this will
/// re-run the analysis to produce a valid result.
- template <typename PassT> typename PassT::Result &getResult(IRUnitT &IR) {
+ template <typename PassT>
+ typename PassT::Result &getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs) {
assert(AnalysisPasses.count(PassT::ID()) &&
"This analysis pass was not registered prior to being queried");
-
ResultConceptT &ResultConcept =
- derived_this()->getResultImpl(PassT::ID(), IR);
- typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ getResultImpl(PassT::ID(), IR, ExtraArgs...);
+ typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+ PreservedAnalyses, Invalidator>
ResultModelT;
return static_cast<ResultModelT &>(ResultConcept).Result;
}
@@ -373,12 +671,12 @@ public:
assert(AnalysisPasses.count(PassT::ID()) &&
"This analysis pass was not registered prior to being queried");
- ResultConceptT *ResultConcept =
- derived_this()->getCachedResultImpl(PassT::ID(), IR);
+ ResultConceptT *ResultConcept = getCachedResultImpl(PassT::ID(), IR);
if (!ResultConcept)
return nullptr;
- typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+ PreservedAnalyses, Invalidator>
ResultModelT;
return &static_cast<ResultModelT *>(ResultConcept)->Result;
}
@@ -401,9 +699,12 @@ public:
/// interface also lends itself to minimizing the number of times we have to
/// do lookups for analyses or construct complex passes only to throw them
/// away.
- template <typename PassBuilderT> bool registerPass(PassBuilderT PassBuilder) {
+ template <typename PassBuilderT>
+ bool registerPass(PassBuilderT &&PassBuilder) {
typedef decltype(PassBuilder()) PassT;
- typedef detail::AnalysisPassModel<IRUnitT, PassT> PassModelT;
+ typedef detail::AnalysisPassModel<IRUnitT, PassT, PreservedAnalyses,
+ Invalidator, ExtraArgTs...>
+ PassModelT;
auto &PassPtr = AnalysisPasses[PassT::ID()];
if (PassPtr)
@@ -421,125 +722,112 @@ public:
template <typename PassT> void invalidate(IRUnitT &IR) {
assert(AnalysisPasses.count(PassT::ID()) &&
"This analysis pass was not registered prior to being invalidated");
- derived_this()->invalidateImpl(PassT::ID(), IR);
+ invalidateImpl(PassT::ID(), IR);
}
/// \brief Invalidate analyses cached for an IR unit.
///
/// Walk through all of the analyses pertaining to this unit of IR and
/// invalidate them unless they are preserved by the PreservedAnalyses set.
- /// We accept the PreservedAnalyses set by value and update it with each
- /// analyis pass which has been successfully invalidated and thus can be
- /// preserved going forward. The updated set is returned.
- PreservedAnalyses invalidate(IRUnitT &IR, PreservedAnalyses PA) {
- return derived_this()->invalidateImpl(IR, std::move(PA));
- }
-
-protected:
- /// \brief Lookup a registered analysis pass.
- PassConceptT &lookupPass(void *PassID) {
- typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(PassID);
- assert(PI != AnalysisPasses.end() &&
- "Analysis passes must be registered prior to being queried!");
- return *PI->second;
- }
-
- /// \brief Lookup a registered analysis pass.
- const PassConceptT &lookupPass(void *PassID) const {
- typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(PassID);
- assert(PI != AnalysisPasses.end() &&
- "Analysis passes must be registered prior to being queried!");
- return *PI->second;
- }
+ void invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
+ // We're done if all analyses on this IR unit are preserved.
+ if (PA.allAnalysesInSetPreserved<AllAnalysesOn<IRUnitT>>())
+ return;
-private:
- /// \brief Map type from module analysis pass ID to pass concept pointer.
- typedef DenseMap<void *, std::unique_ptr<PassConceptT>> AnalysisPassMapT;
+ if (DebugLogging)
+ dbgs() << "Invalidating all non-preserved analyses for: " << IR.getName()
+ << "\n";
- /// \brief Collection of module analysis passes, indexed by ID.
- AnalysisPassMapT AnalysisPasses;
-};
+ // Track whether each pass's result is invalidated. Memoize the results
+ // using the IsResultInvalidated map.
+ SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
+ Invalidator Inv(IsResultInvalidated, AnalysisResults);
+ AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
+ for (auto &AnalysisResultPair : ResultsList) {
+ // This is basically the same thing as Invalidator::invalidate, but we
+ // can't call it here because we're operating on the type-erased result.
+ // Moreover if we instead called invalidate() directly, it would do an
+ // unnecessary look up in ResultsList.
+ AnalysisKey *ID = AnalysisResultPair.first;
+ auto &Result = *AnalysisResultPair.second;
+
+ auto IMapI = IsResultInvalidated.find(ID);
+ if (IMapI != IsResultInvalidated.end())
+ // This result was already handled via the Invalidator.
+ continue;
-} // End namespace detail
+ // Try to invalidate the result, giving it the Invalidator so it can
+ // recursively query for any dependencies it has and record the result.
+ // Note that we cannot re-use 'IMapI' here or pre-insert the ID as the
+ // invalidate method may insert things into the map as well, invalidating
+ // any iterator or pointer.
+ bool Inserted =
+ IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, Inv)})
+ .second;
+ (void)Inserted;
+ assert(Inserted && "Should never have already inserted this ID, likely "
+ "indicates a cycle!");
+ }
-/// \brief A generic analysis pass manager with lazy running and caching of
-/// results.
-///
-/// This analysis manager can be used for any IR unit where the address of the
-/// IR unit sufficies as its identity. It manages the cache for a unit of IR via
-/// the address of each unit of IR cached.
-template <typename IRUnitT>
-class AnalysisManager
- : public detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT> {
- friend class detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT>;
- typedef detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT> BaseT;
- typedef typename BaseT::ResultConceptT ResultConceptT;
- typedef typename BaseT::PassConceptT PassConceptT;
+ // Now erase the results that were marked above as invalidated.
+ if (!IsResultInvalidated.empty()) {
+ for (auto I = ResultsList.begin(), E = ResultsList.end(); I != E;) {
+ AnalysisKey *ID = I->first;
+ if (!IsResultInvalidated.lookup(ID)) {
+ ++I;
+ continue;
+ }
-public:
- // Most public APIs are inherited from the CRTP base class.
+ if (DebugLogging)
+ dbgs() << "Invalidating analysis: " << this->lookUpPass(ID).name()
+ << "\n";
- /// \brief Construct an empty analysis manager.
- ///
- /// A flag can be passed to indicate that the manager should perform debug
- /// logging.
- AnalysisManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+ I = ResultsList.erase(I);
+ AnalysisResults.erase({ID, &IR});
+ }
+ }
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- AnalysisManager(AnalysisManager &&Arg)
- : BaseT(std::move(static_cast<BaseT &>(Arg))),
- AnalysisResults(std::move(Arg.AnalysisResults)),
- DebugLogging(std::move(Arg.DebugLogging)) {}
- AnalysisManager &operator=(AnalysisManager &&RHS) {
- BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
- AnalysisResults = std::move(RHS.AnalysisResults);
- DebugLogging = std::move(RHS.DebugLogging);
- return *this;
+ if (ResultsList.empty())
+ AnalysisResultLists.erase(&IR);
}
- /// \brief Returns true if the analysis manager has an empty results cache.
- bool empty() const {
- assert(AnalysisResults.empty() == AnalysisResultLists.empty() &&
- "The storage and index of analysis results disagree on how many "
- "there are!");
- return AnalysisResults.empty();
+private:
+ /// \brief Look up a registered analysis pass.
+ PassConceptT &lookUpPass(AnalysisKey *ID) {
+ typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(ID);
+ assert(PI != AnalysisPasses.end() &&
+ "Analysis passes must be registered prior to being queried!");
+ return *PI->second;
}
- /// \brief Clear the analysis result cache.
- ///
- /// This routine allows cleaning up when the set of IR units itself has
- /// potentially changed, and thus we can't even look up a a result and
- /// invalidate it directly. Notably, this does *not* call invalidate functions
- /// as there is nothing to be done for them.
- void clear() {
- AnalysisResults.clear();
- AnalysisResultLists.clear();
+ /// \brief Look up a registered analysis pass.
+ const PassConceptT &lookUpPass(AnalysisKey *ID) const {
+ typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(ID);
+ assert(PI != AnalysisPasses.end() &&
+ "Analysis passes must be registered prior to being queried!");
+ return *PI->second;
}
-private:
- AnalysisManager(const AnalysisManager &) = delete;
- AnalysisManager &operator=(const AnalysisManager &) = delete;
-
/// \brief Get an analysis result, running the pass if necessary.
- ResultConceptT &getResultImpl(void *PassID, IRUnitT &IR) {
+ ResultConceptT &getResultImpl(AnalysisKey *ID, IRUnitT &IR,
+ ExtraArgTs... ExtraArgs) {
typename AnalysisResultMapT::iterator RI;
bool Inserted;
std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair(
- std::make_pair(PassID, &IR), typename AnalysisResultListT::iterator()));
+ std::make_pair(ID, &IR), typename AnalysisResultListT::iterator()));
// If we don't have a cached result for this function, look up the pass and
// run it to produce a result, which we then add to the cache.
if (Inserted) {
- auto &P = this->lookupPass(PassID);
+ auto &P = this->lookUpPass(ID);
if (DebugLogging)
dbgs() << "Running analysis: " << P.name() << "\n";
AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
- ResultList.emplace_back(PassID, P.run(IR, *this));
+ ResultList.emplace_back(ID, P.run(IR, *this, ExtraArgs...));
// P.run may have inserted elements into AnalysisResults and invalidated
// RI.
- RI = AnalysisResults.find(std::make_pair(PassID, &IR));
+ RI = AnalysisResults.find({ID, &IR});
assert(RI != AnalysisResults.end() && "we just inserted it!");
RI->second = std::prev(ResultList.end());
@@ -549,84 +837,31 @@ private:
}
/// \brief Get a cached analysis result or return null.
- ResultConceptT *getCachedResultImpl(void *PassID, IRUnitT &IR) const {
+ ResultConceptT *getCachedResultImpl(AnalysisKey *ID, IRUnitT &IR) const {
typename AnalysisResultMapT::const_iterator RI =
- AnalysisResults.find(std::make_pair(PassID, &IR));
+ AnalysisResults.find({ID, &IR});
return RI == AnalysisResults.end() ? nullptr : &*RI->second->second;
}
/// \brief Invalidate a function pass result.
- void invalidateImpl(void *PassID, IRUnitT &IR) {
+ void invalidateImpl(AnalysisKey *ID, IRUnitT &IR) {
typename AnalysisResultMapT::iterator RI =
- AnalysisResults.find(std::make_pair(PassID, &IR));
+ AnalysisResults.find({ID, &IR});
if (RI == AnalysisResults.end())
return;
if (DebugLogging)
- dbgs() << "Invalidating analysis: " << this->lookupPass(PassID).name()
+ dbgs() << "Invalidating analysis: " << this->lookUpPass(ID).name()
<< "\n";
AnalysisResultLists[&IR].erase(RI->second);
AnalysisResults.erase(RI);
}
- /// \brief Invalidate the results for a function..
- PreservedAnalyses invalidateImpl(IRUnitT &IR, PreservedAnalyses PA) {
- // Short circuit for a common case of all analyses being preserved.
- if (PA.areAllPreserved())
- return PA;
-
- if (DebugLogging)
- dbgs() << "Invalidating all non-preserved analyses for: " << IR.getName()
- << "\n";
-
- // Clear all the invalidated results associated specifically with this
- // function.
- SmallVector<void *, 8> InvalidatedPassIDs;
- AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
- for (typename AnalysisResultListT::iterator I = ResultsList.begin(),
- E = ResultsList.end();
- I != E;) {
- void *PassID = I->first;
-
- // Pass the invalidation down to the pass itself to see if it thinks it is
- // necessary. The analysis pass can return false if no action on the part
- // of the analysis manager is required for this invalidation event.
- if (I->second->invalidate(IR, PA)) {
- if (DebugLogging)
- dbgs() << "Invalidating analysis: " << this->lookupPass(PassID).name()
- << "\n";
-
- InvalidatedPassIDs.push_back(I->first);
- I = ResultsList.erase(I);
- } else {
- ++I;
- }
-
- // After handling each pass, we mark it as preserved. Once we've
- // invalidated any stale results, the rest of the system is allowed to
- // start preserving this analysis again.
- PA.preserve(PassID);
- }
- while (!InvalidatedPassIDs.empty())
- AnalysisResults.erase(
- std::make_pair(InvalidatedPassIDs.pop_back_val(), &IR));
- if (ResultsList.empty())
- AnalysisResultLists.erase(&IR);
-
- return PA;
- }
-
- /// \brief List of function analysis pass IDs and associated concept pointers.
- ///
- /// Requires iterators to be valid across appending new entries and arbitrary
- /// erases. Provides both the pass ID and concept pointer such that it is
- /// half of a bijection and provides storage for the actual result concept.
- typedef std::list<std::pair<
- void *, std::unique_ptr<detail::AnalysisResultConcept<IRUnitT>>>>
- AnalysisResultListT;
+ /// \brief Map type from module analysis pass ID to pass concept pointer.
+ typedef DenseMap<AnalysisKey *, std::unique_ptr<PassConceptT>> AnalysisPassMapT;
- /// \brief Map type from function pointer to our custom list type.
- typedef DenseMap<IRUnitT *, AnalysisResultListT> AnalysisResultListMapT;
+ /// \brief Collection of module analysis passes, indexed by ID.
+ AnalysisPassMapT AnalysisPasses;
/// \brief Map from function to a list of function analysis results.
///
@@ -634,12 +869,6 @@ private:
/// the ultimate storage for a particular cached analysis result.
AnalysisResultListMapT AnalysisResultLists;
- /// \brief Map type from a pair of analysis ID and function pointer to an
- /// iterator into a particular result list.
- typedef DenseMap<std::pair<void *, IRUnitT *>,
- typename AnalysisResultListT::iterator>
- AnalysisResultMapT;
-
/// \brief Map from an analysis ID and function to a particular cached
/// analysis result.
AnalysisResultMapT AnalysisResults;
@@ -668,78 +897,68 @@ typedef AnalysisManager<Function> FunctionAnalysisManager;
/// Note that the proxy's result is a move-only object and represents ownership
/// of the validity of the analyses in the \c FunctionAnalysisManager it
/// provides.
-template <typename AnalysisManagerT, typename IRUnitT>
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
class InnerAnalysisManagerProxy
: public AnalysisInfoMixin<
InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>> {
public:
class Result {
public:
- explicit Result(AnalysisManagerT &AM) : AM(&AM) {}
- Result(Result &&Arg) : AM(std::move(Arg.AM)) {
+ explicit Result(AnalysisManagerT &InnerAM) : InnerAM(&InnerAM) {}
+ Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)) {
// We have to null out the analysis manager in the moved-from state
// because we are taking ownership of the responsibilty to clear the
// analysis state.
- Arg.AM = nullptr;
+ Arg.InnerAM = nullptr;
}
Result &operator=(Result &&RHS) {
- AM = RHS.AM;
+ InnerAM = RHS.InnerAM;
// We have to null out the analysis manager in the moved-from state
// because we are taking ownership of the responsibilty to clear the
// analysis state.
- RHS.AM = nullptr;
+ RHS.InnerAM = nullptr;
return *this;
}
~Result() {
- // AM is cleared in a moved from state where there is nothing to do.
- if (!AM)
+ // InnerAM is cleared in a moved from state where there is nothing to do.
+ if (!InnerAM)
return;
// Clear out the analysis manager if we're being destroyed -- it means we
// didn't even see an invalidate call when we got invalidated.
- AM->clear();
+ InnerAM->clear();
}
/// \brief Accessor for the analysis manager.
- AnalysisManagerT &getManager() { return *AM; }
+ AnalysisManagerT &getManager() { return *InnerAM; }
- /// \brief Handler for invalidation of the module.
+ /// \brief Handler for invalidation of the outer IR unit.
///
/// If this analysis itself is preserved, then we assume that the set of \c
- /// Function objects in the \c Module hasn't changed and thus we don't need
- /// to invalidate *all* cached data associated with a \c Function* in the \c
- /// FunctionAnalysisManager.
+ /// IR units that the inner analysis manager controls hasn't changed and
+ /// thus we don't need to invalidate *all* cached data associated with any
+ /// \c IRUnitT* in the \c AnalysisManagerT.
///
/// Regardless of whether this analysis is marked as preserved, all of the
- /// analyses in the \c FunctionAnalysisManager are potentially invalidated
- /// based on the set of preserved analyses.
- bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
- // If this proxy isn't marked as preserved, then we can't even invalidate
- // individual function analyses, there may be an invalid set of Function
- // objects in the cache making it impossible to incrementally preserve
- // them. Just clear the entire manager.
- if (!PA.preserved(InnerAnalysisManagerProxy::ID()))
- AM->clear();
-
- // Return false to indicate that this result is still a valid proxy.
- return false;
- }
+ /// analyses in the \c AnalysisManagerT are potentially invalidated (for
+ /// the relevant inner set of their IR units) based on the set of preserved
+ /// analyses.
+ ///
+ /// Because this needs to understand the mapping from one IR unit to an
+ /// inner IR unit, this method isn't defined in the primary template.
+ /// Instead, each specialization of this template will need to provide an
+ /// explicit specialization of this method to handle that particular pair
+ /// of IR unit and inner AnalysisManagerT.
+ bool invalidate(
+ IRUnitT &IR, const PreservedAnalyses &PA,
+ typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv);
private:
- AnalysisManagerT *AM;
+ AnalysisManagerT *InnerAM;
};
- explicit InnerAnalysisManagerProxy(AnalysisManagerT &AM) : AM(&AM) {}
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- InnerAnalysisManagerProxy(const InnerAnalysisManagerProxy &Arg)
- : AM(Arg.AM) {}
- InnerAnalysisManagerProxy(InnerAnalysisManagerProxy &&Arg)
- : AM(std::move(Arg.AM)) {}
- InnerAnalysisManagerProxy &operator=(InnerAnalysisManagerProxy RHS) {
- std::swap(AM, RHS.AM);
- return *this;
- }
+ explicit InnerAnalysisManagerProxy(AnalysisManagerT &InnerAM)
+ : InnerAM(&InnerAM) {}
/// \brief Run the analysis pass and create our proxy result object.
///
@@ -750,25 +969,39 @@ public:
/// In debug builds, it will also assert that the analysis manager is empty
/// as no queries should arrive at the function analysis manager prior to
/// this analysis being requested.
- Result run(IRUnitT &IR, AnalysisManager<IRUnitT> &) { return Result(*AM); }
+ Result run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
+ ExtraArgTs...) {
+ return Result(*InnerAM);
+ }
private:
friend AnalysisInfoMixin<
InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>>;
- static char PassID;
+ static AnalysisKey Key;
- AnalysisManagerT *AM;
+ AnalysisManagerT *InnerAM;
};
-template <typename AnalysisManagerT, typename IRUnitT>
-char InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>::PassID;
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
+AnalysisKey
+ InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>::Key;
-extern template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
- Module>;
/// Provide the \c FunctionAnalysisManager to \c Module proxy.
typedef InnerAnalysisManagerProxy<FunctionAnalysisManager, Module>
FunctionAnalysisManagerModuleProxy;
+/// Specialization of the invalidate method for the \c
+/// FunctionAnalysisManagerModuleProxy's result.
+template <>
+bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
+ Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv);
+
+// Ensure the \c FunctionAnalysisManagerModuleProxy is provided as an extern
+// template.
+extern template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
+ Module>;
+
/// \brief A function analysis which acts as a proxy for a module analysis
/// manager.
///
@@ -778,10 +1011,14 @@ typedef InnerAnalysisManagerProxy<FunctionAnalysisManager, Module>
/// cannot request a module analysis to actually run. Instead, the user must
/// rely on the \c getCachedResult API.
///
-/// This proxy *doesn't* manage the invalidation in any way. That is handled by
-/// the recursive return path of each layer of the pass manager and the
-/// returned PreservedAnalysis set.
-template <typename AnalysisManagerT, typename IRUnitT>
+/// The invalidation provided by this proxy involves tracking when an
+/// invalidation event in the outer analysis manager needs to trigger an
+/// invalidation of a particular analysis on this IR unit.
+///
+/// Because outer analyses aren't invalidated while these IR units are being
+/// precessed, we have to register and handle these as deferred invalidation
+/// events.
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
class OuterAnalysisManagerProxy
: public AnalysisInfoMixin<
OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT>> {
@@ -790,51 +1027,71 @@ public:
class Result {
public:
explicit Result(const AnalysisManagerT &AM) : AM(&AM) {}
- // We have to explicitly define all the special member functions because
- // MSVC refuses to generate them.
- Result(const Result &Arg) : AM(Arg.AM) {}
- Result(Result &&Arg) : AM(std::move(Arg.AM)) {}
- Result &operator=(Result RHS) {
- std::swap(AM, RHS.AM);
- return *this;
- }
const AnalysisManagerT &getManager() const { return *AM; }
/// \brief Handle invalidation by ignoring it, this pass is immutable.
- bool invalidate(IRUnitT &) { return false; }
+ bool invalidate(
+ IRUnitT &, const PreservedAnalyses &,
+ typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &) {
+ return false;
+ }
+
+ /// Register a deferred invalidation event for when the outer analysis
+ /// manager processes its invalidations.
+ template <typename OuterAnalysisT, typename InvalidatedAnalysisT>
+ void registerOuterAnalysisInvalidation() {
+ AnalysisKey *OuterID = OuterAnalysisT::ID();
+ AnalysisKey *InvalidatedID = InvalidatedAnalysisT::ID();
+
+ auto &InvalidatedIDList = OuterAnalysisInvalidationMap[OuterID];
+ // Note, this is a linear scan. If we end up with large numbers of
+ // analyses that all trigger invalidation on the same outer analysis,
+ // this entire system should be changed to some other deterministic
+ // data structure such as a `SetVector` of a pair of pointers.
+ auto InvalidatedIt = std::find(InvalidatedIDList.begin(),
+ InvalidatedIDList.end(), InvalidatedID);
+ if (InvalidatedIt == InvalidatedIDList.end())
+ InvalidatedIDList.push_back(InvalidatedID);
+ }
+
+ /// Access the map from outer analyses to deferred invalidation requiring
+ /// analyses.
+ const SmallDenseMap<AnalysisKey *, TinyPtrVector<AnalysisKey *>, 2> &
+ getOuterInvalidations() const {
+ return OuterAnalysisInvalidationMap;
+ }
private:
const AnalysisManagerT *AM;
+
+ /// A map from an outer analysis ID to the set of this IR-unit's analyses
+ /// which need to be invalidated.
+ SmallDenseMap<AnalysisKey *, TinyPtrVector<AnalysisKey *>, 2>
+ OuterAnalysisInvalidationMap;
};
OuterAnalysisManagerProxy(const AnalysisManagerT &AM) : AM(&AM) {}
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- OuterAnalysisManagerProxy(const OuterAnalysisManagerProxy &Arg)
- : AM(Arg.AM) {}
- OuterAnalysisManagerProxy(OuterAnalysisManagerProxy &&Arg)
- : AM(std::move(Arg.AM)) {}
- OuterAnalysisManagerProxy &operator=(OuterAnalysisManagerProxy RHS) {
- std::swap(AM, RHS.AM);
- return *this;
- }
/// \brief Run the analysis pass and create our proxy result object.
/// Nothing to see here, it just forwards the \c AM reference into the
/// result.
- Result run(IRUnitT &, AnalysisManager<IRUnitT> &) { return Result(*AM); }
+ Result run(IRUnitT &, AnalysisManager<IRUnitT, ExtraArgTs...> &,
+ ExtraArgTs...) {
+ return Result(*AM);
+ }
private:
friend AnalysisInfoMixin<
OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT>>;
- static char PassID;
+ static AnalysisKey Key;
const AnalysisManagerT *AM;
};
-template <typename AnalysisManagerT, typename IRUnitT>
-char OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT>::PassID;
+template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
+AnalysisKey
+ OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>::Key;
extern template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
Function>;
@@ -870,21 +1127,6 @@ class ModuleToFunctionPassAdaptor
public:
explicit ModuleToFunctionPassAdaptor(FunctionPassT Pass)
: Pass(std::move(Pass)) {}
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- ModuleToFunctionPassAdaptor(const ModuleToFunctionPassAdaptor &Arg)
- : Pass(Arg.Pass) {}
- ModuleToFunctionPassAdaptor(ModuleToFunctionPassAdaptor &&Arg)
- : Pass(std::move(Arg.Pass)) {}
- friend void swap(ModuleToFunctionPassAdaptor &LHS,
- ModuleToFunctionPassAdaptor &RHS) {
- using std::swap;
- swap(LHS.Pass, RHS.Pass);
- }
- ModuleToFunctionPassAdaptor &operator=(ModuleToFunctionPassAdaptor RHS) {
- swap(*this, RHS);
- return *this;
- }
/// \brief Runs the function pass across every function in the module.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
@@ -901,20 +1143,19 @@ public:
// We know that the function pass couldn't have invalidated any other
// function's analyses (that's the contract of a function pass), so
- // directly handle the function analysis manager's invalidation here and
- // update our preserved set to reflect that these have already been
- // handled.
- PassPA = FAM.invalidate(F, std::move(PassPA));
+ // directly handle the function analysis manager's invalidation here.
+ FAM.invalidate(F, PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
}
- // By definition we preserve the proxy. This precludes *any* invalidation
- // of function analyses by the proxy, but that's OK because we've taken
- // care to invalidate analyses in the function analysis manager
- // incrementally above.
+ // By definition we preserve the proxy. We also preserve all analyses on
+ // Function units. This precludes *any* invalidation of function analyses
+ // by the proxy, but that's OK because we've taken care to invalidate
+ // analyses in the function analysis manager incrementally above.
+ PA.preserveSet<AllAnalysesOn<Function>>();
PA.preserve<FunctionAnalysisManagerModuleProxy>();
return PA;
}
@@ -933,19 +1174,29 @@ createModuleToFunctionPassAdaptor(FunctionPassT Pass) {
/// \brief A template utility pass to force an analysis result to be available.
///
-/// This is a no-op pass which simply forces a specific analysis pass's result
-/// to be available when it is run.
-template <typename AnalysisT>
-struct RequireAnalysisPass : PassInfoMixin<RequireAnalysisPass<AnalysisT>> {
+/// If there are extra arguments at the pass's run level there may also be
+/// extra arguments to the analysis manager's \c getResult routine. We can't
+/// guess how to effectively map the arguments from one to the other, and so
+/// this specialization just ignores them.
+///
+/// Specific patterns of run-method extra arguments and analysis manager extra
+/// arguments will have to be defined as appropriate specializations.
+template <typename AnalysisT, typename IRUnitT,
+ typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+ typename... ExtraArgTs>
+struct RequireAnalysisPass
+ : PassInfoMixin<RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
+ ExtraArgTs...>> {
/// \brief Run this pass over some unit of IR.
///
/// This pass can be run over any unit of IR and use any analysis manager
/// provided they satisfy the basic API requirements. When this pass is
/// created, these methods can be instantiated to satisfy whatever the
/// context requires.
- template <typename IRUnitT>
- PreservedAnalyses run(IRUnitT &Arg, AnalysisManager<IRUnitT> &AM) {
- (void)AM.template getResult<AnalysisT>(Arg);
+ PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM,
+ ExtraArgTs &&... Args) {
+ (void)AM.template getResult<AnalysisT>(Arg,
+ std::forward<ExtraArgTs>(Args)...);
return PreservedAnalyses::all();
}
@@ -965,13 +1216,11 @@ struct InvalidateAnalysisPass
/// provided they satisfy the basic API requirements. When this pass is
/// created, these methods can be instantiated to satisfy whatever the
/// context requires.
- template <typename IRUnitT>
- PreservedAnalyses run(IRUnitT &Arg, AnalysisManager<IRUnitT> &AM) {
- // We have to directly invalidate the analysis result as we can't
- // enumerate all other analyses and use the preserved set to control it.
- AM.template invalidate<AnalysisT>(Arg);
-
- return PreservedAnalyses::all();
+ template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+ PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM, ExtraArgTs &&...) {
+ auto PA = PreservedAnalyses::all();
+ PA.abandon<AnalysisT>();
+ return PA;
}
};
@@ -981,12 +1230,39 @@ struct InvalidateAnalysisPass
/// analysis passes to be re-run to produce fresh results if any are needed.
struct InvalidateAllAnalysesPass : PassInfoMixin<InvalidateAllAnalysesPass> {
/// \brief Run this pass over some unit of IR.
- template <typename IRUnitT>
- PreservedAnalyses run(IRUnitT &, AnalysisManager<IRUnitT> &) {
+ template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+ PreservedAnalyses run(IRUnitT &, AnalysisManagerT &, ExtraArgTs &&...) {
return PreservedAnalyses::none();
}
};
+/// A utility pass template that simply runs another pass multiple times.
+///
+/// This can be useful when debugging or testing passes. It also serves as an
+/// example of how to extend the pass manager in ways beyond composition.
+template <typename PassT>
+class RepeatedPass : public PassInfoMixin<RepeatedPass<PassT>> {
+public:
+ RepeatedPass(int Count, PassT P) : Count(Count), P(std::move(P)) {}
+
+ template <typename IRUnitT, typename AnalysisManagerT, typename... Ts>
+ PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM, Ts &&... Args) {
+ auto PA = PreservedAnalyses::all();
+ for (int i = 0; i < Count; ++i)
+ PA.intersect(P.run(Arg, AM, std::forward<Ts>(Args)...));
+ return PA;
+ }
+
+private:
+ int Count;
+ PassT P;
+};
+
+template <typename PassT>
+RepeatedPass<PassT> createRepeatedPass(int Count, PassT P) {
+ return RepeatedPass<PassT>(Count, std::move(P));
+}
+
}
#endif
diff --git a/include/llvm/IR/PassManagerInternal.h b/include/llvm/IR/PassManagerInternal.h
index 4351b5888283..02f21675fa9d 100644
--- a/include/llvm/IR/PassManagerInternal.h
+++ b/include/llvm/IR/PassManagerInternal.h
@@ -20,10 +20,14 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+#include <memory>
+#include <utility>
namespace llvm {
-template <typename IRUnitT> class AnalysisManager;
+template <typename IRUnitT> class AllAnalysesOn;
+template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
+class Invalidator;
class PreservedAnalyses;
/// \brief Implementation details of the pass manager interfaces.
@@ -31,16 +35,18 @@ namespace detail {
/// \brief Template for the abstract base class used to dispatch
/// polymorphically over pass objects.
-template <typename IRUnitT> struct PassConcept {
+template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
+struct PassConcept {
// Boiler plate necessary for the container of derived classes.
- virtual ~PassConcept() {}
+ virtual ~PassConcept() = default;
/// \brief The polymorphic API which runs the pass over a given IR entity.
///
/// Note that actual pass object can omit the analysis manager argument if
/// desired. Also that the analysis manager may be null if there is no
/// analysis manager in the pass pipeline.
- virtual PreservedAnalyses run(IRUnitT &IR, AnalysisManager<IRUnitT> &AM) = 0;
+ virtual PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
+ ExtraArgTs... ExtraArgs) = 0;
/// \brief Polymorphic method to access the name of a pass.
virtual StringRef name() = 0;
@@ -50,28 +56,33 @@ template <typename IRUnitT> struct PassConcept {
///
/// Can be instantiated for any object which provides a \c run method accepting
/// an \c IRUnitT& and an \c AnalysisManager<IRUnit>&. It requires the pass to
-/// be a copyable object. When the
-template <typename IRUnitT, typename PassT,
- typename PreservedAnalysesT = PreservedAnalyses>
-struct PassModel : PassConcept<IRUnitT> {
+/// be a copyable object.
+template <typename IRUnitT, typename PassT, typename PreservedAnalysesT,
+ typename AnalysisManagerT, typename... ExtraArgTs>
+struct PassModel : PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...> {
explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+
friend void swap(PassModel &LHS, PassModel &RHS) {
using std::swap;
swap(LHS.Pass, RHS.Pass);
}
+
PassModel &operator=(PassModel RHS) {
swap(*this, RHS);
return *this;
}
- PreservedAnalysesT run(IRUnitT &IR, AnalysisManager<IRUnitT> &AM) override {
- return Pass.run(IR, AM);
+ PreservedAnalysesT run(IRUnitT &IR, AnalysisManagerT &AM,
+ ExtraArgTs... ExtraArgs) override {
+ return Pass.run(IR, AM, ExtraArgs...);
}
+
StringRef name() override { return PassT::name(); }
+
PassT Pass;
};
@@ -79,38 +90,63 @@ struct PassModel : PassConcept<IRUnitT> {
///
/// This concept is parameterized over the IR unit that this result pertains
/// to.
-template <typename IRUnitT> struct AnalysisResultConcept {
- virtual ~AnalysisResultConcept() {}
+template <typename IRUnitT, typename PreservedAnalysesT, typename InvalidatorT>
+struct AnalysisResultConcept {
+ virtual ~AnalysisResultConcept() = default;
/// \brief Method to try and mark a result as invalid.
///
/// When the outer analysis manager detects a change in some underlying
/// unit of the IR, it will call this method on all of the results cached.
///
- /// This method also receives a set of preserved analyses which can be used
- /// to avoid invalidation because the pass which changed the underlying IR
- /// took care to update or preserve the analysis result in some way.
+ /// \p PA is a set of preserved analyses which can be used to avoid
+ /// invalidation because the pass which changed the underlying IR took care
+ /// to update or preserve the analysis result in some way.
+ ///
+ /// \p Inv is typically a \c AnalysisManager::Invalidator object that can be
+ /// used by a particular analysis result to discover if other analyses
+ /// results are also invalidated in the event that this result depends on
+ /// them. See the documentation in the \c AnalysisManager for more details.
///
/// \returns true if the result is indeed invalid (the default).
- virtual bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) = 0;
+ virtual bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA,
+ InvalidatorT &Inv) = 0;
};
/// \brief SFINAE metafunction for computing whether \c ResultT provides an
/// \c invalidate member function.
template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod {
- typedef char SmallType;
- struct BigType {
+ typedef char EnabledType;
+ struct DisabledType {
char a, b;
};
- template <typename T, bool (T::*)(IRUnitT &, const PreservedAnalyses &)>
- struct Checker;
-
- template <typename T> static SmallType f(Checker<T, &T::invalidate> *);
- template <typename T> static BigType f(...);
+ // Purely to help out MSVC which fails to disable the below specialization,
+ // explicitly enable using the result type's invalidate routine if we can
+ // successfully call that routine.
+ template <typename T> struct Nonce { typedef EnabledType Type; };
+ template <typename T>
+ static typename Nonce<decltype(std::declval<T>().invalidate(
+ std::declval<IRUnitT &>(), std::declval<PreservedAnalyses>()))>::Type
+ check(rank<2>);
+
+ // First we define an overload that can only be taken if there is no
+ // invalidate member. We do this by taking the address of an invalidate
+ // member in an adjacent base class of a derived class. This would be
+ // ambiguous if there were an invalidate member in the result type.
+ template <typename T, typename U> static DisabledType NonceFunction(T U::*);
+ struct CheckerBase { int invalidate; };
+ template <typename T> struct Checker : CheckerBase, T {};
+ template <typename T>
+ static decltype(NonceFunction(&Checker<T>::invalidate)) check(rank<1>);
+
+ // Now we have the fallback that will only be reached when there is an
+ // invalidate member, and enables the trait.
+ template <typename T>
+ static EnabledType check(rank<0>);
public:
- enum { Value = sizeof(f<ResultT>(nullptr)) == sizeof(SmallType) };
+ enum { Value = sizeof(check<ResultT>(rank<2>())) == sizeof(EnabledType) };
};
/// \brief Wrapper to model the analysis result concept.
@@ -120,7 +156,7 @@ public:
/// an invalidation handler. It is only selected when the invalidation handler
/// is not part of the ResultT's interface.
template <typename IRUnitT, typename PassT, typename ResultT,
- typename PreservedAnalysesT = PreservedAnalyses,
+ typename PreservedAnalysesT, typename InvalidatorT,
bool HasInvalidateHandler =
ResultHasInvalidateMethod<IRUnitT, ResultT>::Value>
struct AnalysisResultModel;
@@ -128,19 +164,22 @@ struct AnalysisResultModel;
/// \brief Specialization of \c AnalysisResultModel which provides the default
/// invalidate functionality.
template <typename IRUnitT, typename PassT, typename ResultT,
- typename PreservedAnalysesT>
-struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, false>
- : AnalysisResultConcept<IRUnitT> {
+ typename PreservedAnalysesT, typename InvalidatorT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT,
+ InvalidatorT, false>
+ : AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT> {
explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
AnalysisResultModel(AnalysisResultModel &&Arg)
: Result(std::move(Arg.Result)) {}
+
friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
using std::swap;
swap(LHS.Result, RHS.Result);
}
+
AnalysisResultModel &operator=(AnalysisResultModel RHS) {
swap(*this, RHS);
return *this;
@@ -151,8 +190,11 @@ struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, false>
// FIXME: We should actually use two different concepts for analysis results
// rather than two different models, and avoid the indirect function call for
// ones that use the trivial behavior.
- bool invalidate(IRUnitT &, const PreservedAnalysesT &PA) override {
- return !PA.preserved(PassT::ID());
+ bool invalidate(IRUnitT &, const PreservedAnalysesT &PA,
+ InvalidatorT &) override {
+ auto PAC = PA.template getChecker<PassT>();
+ return !PAC.preserved() &&
+ !PAC.template preservedSet<AllAnalysesOn<IRUnitT>>();
}
ResultT Result;
@@ -161,27 +203,31 @@ struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, false>
/// \brief Specialization of \c AnalysisResultModel which delegates invalidate
/// handling to \c ResultT.
template <typename IRUnitT, typename PassT, typename ResultT,
- typename PreservedAnalysesT>
-struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, true>
- : AnalysisResultConcept<IRUnitT> {
+ typename PreservedAnalysesT, typename InvalidatorT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT,
+ InvalidatorT, true>
+ : AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT> {
explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
AnalysisResultModel(AnalysisResultModel &&Arg)
: Result(std::move(Arg.Result)) {}
+
friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
using std::swap;
swap(LHS.Result, RHS.Result);
}
+
AnalysisResultModel &operator=(AnalysisResultModel RHS) {
swap(*this, RHS);
return *this;
}
/// \brief The model delegates to the \c ResultT method.
- bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA) override {
- return Result.invalidate(IR, PA);
+ bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA,
+ InvalidatorT &Inv) override {
+ return Result.invalidate(IR, PA, Inv);
}
ResultT Result;
@@ -191,14 +237,18 @@ struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, true>
///
/// This concept is parameterized over the IR unit that it can run over and
/// produce an analysis result.
-template <typename IRUnitT> struct AnalysisPassConcept {
- virtual ~AnalysisPassConcept() {}
+template <typename IRUnitT, typename PreservedAnalysesT, typename InvalidatorT,
+ typename... ExtraArgTs>
+struct AnalysisPassConcept {
+ virtual ~AnalysisPassConcept() = default;
/// \brief Method to run this analysis over a unit of IR.
/// \returns A unique_ptr to the analysis result object to be queried by
/// users.
- virtual std::unique_ptr<AnalysisResultConcept<IRUnitT>>
- run(IRUnitT &IR, AnalysisManager<IRUnitT> &AM) = 0;
+ virtual std::unique_ptr<
+ AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
+ run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
+ ExtraArgTs... ExtraArgs) = 0;
/// \brief Polymorphic method to access the name of a pass.
virtual StringRef name() = 0;
@@ -209,32 +259,39 @@ template <typename IRUnitT> struct AnalysisPassConcept {
/// Can wrap any type which implements a suitable \c run method. The method
/// must accept an \c IRUnitT& and an \c AnalysisManager<IRUnitT>& as arguments
/// and produce an object which can be wrapped in a \c AnalysisResultModel.
-template <typename IRUnitT, typename PassT>
-struct AnalysisPassModel : AnalysisPassConcept<IRUnitT> {
+template <typename IRUnitT, typename PassT, typename PreservedAnalysesT,
+ typename InvalidatorT, typename... ExtraArgTs>
+struct AnalysisPassModel : AnalysisPassConcept<IRUnitT, PreservedAnalysesT,
+ InvalidatorT, ExtraArgTs...> {
explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+
friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
using std::swap;
swap(LHS.Pass, RHS.Pass);
}
+
AnalysisPassModel &operator=(AnalysisPassModel RHS) {
swap(*this, RHS);
return *this;
}
// FIXME: Replace PassT::Result with type traits when we use C++11.
- typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
+ PreservedAnalysesT, InvalidatorT>
ResultModelT;
/// \brief The model delegates to the \c PassT::run method.
///
/// The return is wrapped in an \c AnalysisResultModel.
- std::unique_ptr<AnalysisResultConcept<IRUnitT>>
- run(IRUnitT &IR, AnalysisManager<IRUnitT> &AM) override {
- return make_unique<ResultModelT>(Pass.run(IR, AM));
+ std::unique_ptr<
+ AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
+ run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
+ ExtraArgTs... ExtraArgs) override {
+ return make_unique<ResultModelT>(Pass.run(IR, AM, ExtraArgs...));
}
/// \brief The model delegates to a static \c PassT::name method.
@@ -245,7 +302,8 @@ struct AnalysisPassModel : AnalysisPassConcept<IRUnitT> {
PassT Pass;
};
-} // End namespace detail
-}
+} // end namespace detail
+
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_PASSMANAGERINTERNAL_H
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
index 7da9afcf9463..a30fc97e98ef 100644
--- a/include/llvm/IR/PatternMatch.h
+++ b/include/llvm/IR/PatternMatch.h
@@ -294,10 +294,10 @@ template <typename Class> struct bind_ty {
/// \brief Match a value, capturing it if we match.
inline bind_ty<Value> m_Value(Value *&V) { return V; }
+inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
/// \brief Match an instruction, capturing it if we match.
inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
-
/// \brief Match a binary operator, capturing it if we match.
inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
@@ -682,7 +682,7 @@ template <typename SubPattern_t> struct Exact_match {
Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
template <typename OpTy> bool match(OpTy *V) {
- if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(V))
+ if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
return PEO->isExact() && SubPattern.match(V);
return false;
}
@@ -706,7 +706,7 @@ struct CmpClass_match {
: Predicate(Pred), L(LHS), R(RHS) {}
template <typename OpTy> bool match(OpTy *V) {
- if (Class *I = dyn_cast<Class>(V))
+ if (auto *I = dyn_cast<Class>(V))
if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
Predicate = I->getPredicate();
return true;
@@ -1349,6 +1349,35 @@ m_c_Xor(const LHS &L, const RHS &R) {
return m_CombineOr(m_Xor(L, R), m_Xor(R, L));
}
+/// Matches an SMin with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>,
+ MaxMin_match<ICmpInst, RHS, LHS, smin_pred_ty>>
+m_c_SMin(const LHS &L, const RHS &R) {
+ return m_CombineOr(m_SMin(L, R), m_SMin(R, L));
+}
+/// Matches an SMax with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>,
+ MaxMin_match<ICmpInst, RHS, LHS, smax_pred_ty>>
+m_c_SMax(const LHS &L, const RHS &R) {
+ return m_CombineOr(m_SMax(L, R), m_SMax(R, L));
+}
+/// Matches a UMin with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>,
+ MaxMin_match<ICmpInst, RHS, LHS, umin_pred_ty>>
+m_c_UMin(const LHS &L, const RHS &R) {
+ return m_CombineOr(m_UMin(L, R), m_UMin(R, L));
+}
+/// Matches a UMax with LHS and RHS in either order.
+template <typename LHS, typename RHS>
+inline match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>,
+ MaxMin_match<ICmpInst, RHS, LHS, umax_pred_ty>>
+m_c_UMax(const LHS &L, const RHS &R) {
+ return m_CombineOr(m_UMax(L, R), m_UMax(R, L));
+}
+
} // end namespace PatternMatch
} // end namespace llvm
diff --git a/include/llvm/IR/Statepoint.h b/include/llvm/IR/Statepoint.h
index 5cd7fe1b576c..916faa4b327e 100644
--- a/include/llvm/IR/Statepoint.h
+++ b/include/llvm/IR/Statepoint.h
@@ -1,4 +1,4 @@
-//===-- llvm/IR/Statepoint.h - gc.statepoint utilities ------ --*- C++ -*-===//
+//===-- llvm/IR/Statepoint.h - gc.statepoint utilities ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,6 +19,7 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
@@ -26,21 +27,33 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <vector>
namespace llvm {
+
/// The statepoint intrinsic accepts a set of flags as its third argument.
/// Valid values come out of this set.
enum class StatepointFlags {
None = 0,
GCTransition = 1, ///< Indicates that this statepoint is a transition from
///< GC-aware code to code that is not GC-aware.
-
- MaskAll = GCTransition ///< A bitmask that includes all valid flags.
+ /// Mark the deopt arguments associated with the statepoint as only being
+ /// "live-in". By default, deopt arguments are "live-through". "live-through"
+ /// requires that they the value be live on entry, on exit, and at any point
+ /// during the call. "live-in" only requires the value be available at the
+ /// start of the call. In particular, "live-in" values can be placed in
+ /// unused argument registers or other non-callee saved registers.
+ DeoptLiveIn = 2,
+
+ MaskAll = 3 ///< A bitmask that includes all valid flags.
};
class GCRelocateInst;
class GCResultInst;
-class ImmutableStatepoint;
bool isStatepoint(ImmutableCallSite CS);
bool isStatepoint(const Value *V);
@@ -59,8 +72,6 @@ template <typename FunTy, typename InstructionTy, typename ValueTy,
typename CallSiteTy>
class StatepointBase {
CallSiteTy StatepointCS;
- void *operator new(size_t, unsigned) = delete;
- void *operator new(size_t s) = delete;
protected:
explicit StatepointBase(InstructionTy *I) {
@@ -69,6 +80,7 @@ protected:
assert(StatepointCS && "isStatepoint implies CallSite");
}
}
+
explicit StatepointBase(CallSiteTy CS) {
if (isStatepoint(CS))
StatepointCS = CS;
@@ -86,6 +98,9 @@ public:
CallArgsBeginPos = 5,
};
+ void *operator new(size_t, unsigned) = delete;
+ void *operator new(size_t s) = delete;
+
explicit operator bool() const {
// We do not assign non-statepoint CallSites to StatepointCS.
return (bool)StatepointCS;
@@ -444,6 +459,7 @@ StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeSet AS);
/// Return \c true if the the \p Attr is an attribute that is a statepoint
/// directive.
bool isStatepointDirectiveAttr(Attribute Attr);
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_IR_STATEPOINT_H
diff --git a/include/llvm/IR/SymbolTableListTraits.h b/include/llvm/IR/SymbolTableListTraits.h
index 60e04e2f9eca..5c6d58affd7a 100644
--- a/include/llvm/IR/SymbolTableListTraits.h
+++ b/include/llvm/IR/SymbolTableListTraits.h
@@ -30,14 +30,6 @@
namespace llvm {
class ValueSymbolTable;
-template <typename NodeTy> class ilist_iterator;
-template <typename NodeTy, typename Traits> class iplist;
-template <typename Ty> struct ilist_traits;
-
-template <typename NodeTy>
-struct SymbolTableListSentinelTraits
- : public ilist_embedded_sentinel_traits<NodeTy> {};
-
/// Template metafunction to get the parent type for a symbol table list.
///
/// Implementations create a typedef called \c type so that we only need a
@@ -68,11 +60,9 @@ template <typename NodeTy> class SymbolTableList;
// ItemParentClass - The type of object that owns the list, e.g. BasicBlock.
//
template <typename ValueSubClass>
-class SymbolTableListTraits
- : public ilist_nextprev_traits<ValueSubClass>,
- public SymbolTableListSentinelTraits<ValueSubClass>,
- public ilist_node_traits<ValueSubClass> {
+class SymbolTableListTraits : public ilist_alloc_traits<ValueSubClass> {
typedef SymbolTableList<ValueSubClass> ListTy;
+ typedef typename simple_ilist<ValueSubClass>::iterator iterator;
typedef
typename SymbolTableListParentType<ValueSubClass>::type ItemParentClass;
@@ -101,10 +91,9 @@ private:
public:
void addNodeToList(ValueSubClass *V);
void removeNodeFromList(ValueSubClass *V);
- void transferNodesFromList(SymbolTableListTraits &L2,
- ilist_iterator<ValueSubClass> first,
- ilist_iterator<ValueSubClass> last);
-//private:
+ void transferNodesFromList(SymbolTableListTraits &L2, iterator first,
+ iterator last);
+ // private:
template<typename TPtr>
void setSymTabObject(TPtr *, TPtr);
static ValueSymbolTable *toPtr(ValueSymbolTable *P) { return P; }
@@ -116,8 +105,9 @@ public:
/// When nodes are inserted into and removed from this list, the associated
/// symbol table will be automatically updated. Similarly, parent links get
/// updated automatically.
-template <typename NodeTy>
-class SymbolTableList : public iplist<NodeTy, SymbolTableListTraits<NodeTy>> {};
+template <class T>
+class SymbolTableList
+ : public iplist_impl<simple_ilist<T>, SymbolTableListTraits<T>> {};
} // End llvm namespace
diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h
index ef7ad733f47a..778ee06169f1 100644
--- a/include/llvm/IR/Type.h
+++ b/include/llvm/IR/Type.h
@@ -81,6 +81,8 @@ private:
TypeID ID : 8; // The current base type of this type.
unsigned SubclassData : 24; // Space for subclasses to store data.
+ // Note that this should be synchronized with
+ // MAX_INT_BITS value in IntegerType class.
protected:
friend class LLVMContextImpl;
@@ -108,7 +110,7 @@ protected:
Type * const *ContainedTys;
static bool isSequentialType(TypeID TyID) {
- return TyID == ArrayTyID || TyID == PointerTyID || TyID == VectorTyID;
+ return TyID == ArrayTyID || TyID == VectorTyID;
}
public:
@@ -164,12 +166,12 @@ public:
const fltSemantics &getFltSemantics() const {
switch (getTypeID()) {
- case HalfTyID: return APFloat::IEEEhalf;
- case FloatTyID: return APFloat::IEEEsingle;
- case DoubleTyID: return APFloat::IEEEdouble;
- case X86_FP80TyID: return APFloat::x87DoubleExtended;
- case FP128TyID: return APFloat::IEEEquad;
- case PPC_FP128TyID: return APFloat::PPCDoubleDouble;
+ case HalfTyID: return APFloat::IEEEhalf();
+ case FloatTyID: return APFloat::IEEEsingle();
+ case DoubleTyID: return APFloat::IEEEdouble();
+ case X86_FP80TyID: return APFloat::x87DoubleExtended();
+ case FP128TyID: return APFloat::IEEEquad();
+ case PPC_FP128TyID: return APFloat::PPCDoubleDouble();
default: llvm_unreachable("Invalid floating type");
}
}
@@ -342,12 +344,21 @@ public:
}
inline uint64_t getArrayNumElements() const;
- Type *getArrayElementType() const { return getSequentialElementType(); }
+ Type *getArrayElementType() const {
+ assert(getTypeID() == ArrayTyID);
+ return ContainedTys[0];
+ }
inline unsigned getVectorNumElements() const;
- Type *getVectorElementType() const { return getSequentialElementType(); }
+ Type *getVectorElementType() const {
+ assert(getTypeID() == VectorTyID);
+ return ContainedTys[0];
+ }
- Type *getPointerElementType() const { return getSequentialElementType(); }
+ Type *getPointerElementType() const {
+ assert(getTypeID() == PointerTyID);
+ return ContainedTys[0];
+ }
/// Get the address space of this pointer or pointer vector type.
inline unsigned getPointerAddressSpace() const;
@@ -429,29 +440,21 @@ template <> struct isa_impl<PointerType, Type> {
// graph of sub types.
template <> struct GraphTraits<Type *> {
- typedef Type NodeType;
+ typedef Type *NodeRef;
typedef Type::subtype_iterator ChildIteratorType;
- static inline NodeType *getEntryNode(Type *T) { return T; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->subtype_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->subtype_end();
- }
+ static NodeRef getEntryNode(Type *T) { return T; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->subtype_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->subtype_end(); }
};
template <> struct GraphTraits<const Type*> {
- typedef const Type NodeType;
+ typedef const Type *NodeRef;
typedef Type::subtype_iterator ChildIteratorType;
- static inline NodeType *getEntryNode(NodeType *T) { return T; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->subtype_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->subtype_end();
- }
+ static NodeRef getEntryNode(NodeRef T) { return T; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->subtype_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->subtype_end(); }
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
diff --git a/include/llvm/IR/Use.h b/include/llvm/IR/Use.h
index e62eab56b1f1..ff6b2e1f1e22 100644
--- a/include/llvm/IR/Use.h
+++ b/include/llvm/IR/Use.h
@@ -27,7 +27,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/CBindingWrapping.h"
-#include <cstddef>
+#include "llvm-c/Types.h"
namespace llvm {
@@ -36,16 +36,6 @@ class User;
class Use;
template <typename> struct simplify_type;
-// Use** is only 4-byte aligned.
-template <> class PointerLikeTypeTraits<Use **> {
-public:
- static inline void *getAsVoidPointer(Use **P) { return P; }
- static inline Use **getFromVoidPointer(void *P) {
- return static_cast<Use **>(P);
- }
- enum { NumLowBitsAvailable = 2 };
-};
-
/// \brief A Use represents the edge between a Value definition and its users.
///
/// This is notionally a two-dimensional linked list. It supports traversing
@@ -65,6 +55,8 @@ public:
/// time complexity.
class Use {
public:
+ Use(const Use &U) = delete;
+
/// \brief Provide a fast substitute to std::swap<Use>
/// that also works with less standard-compliant compilers
void swap(Use &RHS);
@@ -74,8 +66,6 @@ public:
typedef PointerIntPair<User *, 1, unsigned> UserRef;
private:
- Use(const Use &U) = delete;
-
/// Destructor - Only for zap()
~Use() {
if (Val)
@@ -128,6 +118,7 @@ private:
PointerIntPair<Use **, 2, PrevPtrTag> Prev;
void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); }
+
void addToList(Use **List) {
Next = *List;
if (Next)
@@ -135,6 +126,7 @@ private:
setPrev(List);
*List = this;
}
+
void removeFromList() {
Use **StrippedPrev = Prev.getPointer();
*StrippedPrev = Next;
@@ -159,6 +151,6 @@ template <> struct simplify_type<const Use> {
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef)
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_USE_H
diff --git a/include/llvm/IR/UseListOrder.h b/include/llvm/IR/UseListOrder.h
index b86425b6a697..efff208295b6 100644
--- a/include/llvm/IR/UseListOrder.h
+++ b/include/llvm/IR/UseListOrder.h
@@ -34,18 +34,8 @@ struct UseListOrder {
: V(V), F(F), Shuffle(ShuffleSize) {}
UseListOrder() : V(nullptr), F(nullptr) {}
- UseListOrder(UseListOrder &&X)
- : V(X.V), F(X.F), Shuffle(std::move(X.Shuffle)) {}
- UseListOrder &operator=(UseListOrder &&X) {
- V = X.V;
- F = X.F;
- Shuffle = std::move(X.Shuffle);
- return *this;
- }
-
-private:
- UseListOrder(const UseListOrder &X) = delete;
- UseListOrder &operator=(const UseListOrder &X) = delete;
+ UseListOrder(UseListOrder &&) = default;
+ UseListOrder &operator=(UseListOrder &&) = default;
};
typedef std::vector<UseListOrder> UseListOrderStack;
diff --git a/include/llvm/IR/User.h b/include/llvm/IR/User.h
index 4d6b30cd1124..e6fe97484580 100644
--- a/include/llvm/IR/User.h
+++ b/include/llvm/IR/User.h
@@ -21,9 +21,15 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
-#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
namespace llvm {
@@ -37,9 +43,9 @@ template <class>
struct OperandTraits;
class User : public Value {
- User(const User &) = delete;
template <unsigned>
friend struct HungoffOperandTraits;
+
virtual void anchor();
LLVM_ATTRIBUTE_ALWAYS_INLINE inline static void *
@@ -88,8 +94,9 @@ protected:
void growHungoffUses(unsigned N, bool IsPhi = false);
public:
- ~User() override {
- }
+ User(const User &) = delete;
+ ~User() override = default;
+
/// \brief Free memory allocated for User and Use objects.
void operator delete(void *Usr);
/// \brief Placement delete - required by std, but never called.
@@ -100,6 +107,7 @@ public:
void operator delete(void*, unsigned, bool) {
llvm_unreachable("Constructor throws?");
}
+
protected:
template <int Idx, typename U> static Use &OpFrom(const U *that) {
return Idx < 0
@@ -112,6 +120,7 @@ protected:
template <int Idx> const Use &Op() const {
return OpFrom<Idx>(this);
}
+
private:
Use *&getHungOffOperands() { return *(reinterpret_cast<Use **>(this) - 1); }
@@ -124,6 +133,7 @@ private:
"Setting operand list only required for hung off uses");
getHungOffOperands() = NewList;
}
+
public:
Use *getOperandList() {
return HasHungOffUses ? getHungOffOperands() : getIntrusiveOperands();
@@ -131,10 +141,12 @@ public:
const Use *getOperandList() const {
return const_cast<User *>(this)->getOperandList();
}
+
Value *getOperand(unsigned i) const {
assert(i < NumUserOperands && "getOperand() out of range!");
return getOperandList()[i];
}
+
void setOperand(unsigned i, Value *Val) {
assert(i < NumUserOperands && "setOperand() out of range!");
assert((!isa<Constant>((const Value*)this) ||
@@ -142,6 +154,7 @@ public:
"Cannot mutate a constant with setOperand!");
getOperandList()[i] = Val;
}
+
const Use &getOperandUse(unsigned i) const {
assert(i < NumUserOperands && "getOperandUse() out of range!");
return getOperandList()[i];
@@ -250,9 +263,9 @@ public:
}
};
// Either Use objects, or a Use pointer can be prepended to User.
-static_assert(AlignOf<Use>::Alignment >= AlignOf<User>::Alignment,
+static_assert(alignof(Use) >= alignof(User),
"Alignment is insufficient after objects prepended to User");
-static_assert(AlignOf<Use *>::Alignment >= AlignOf<User>::Alignment,
+static_assert(alignof(Use *) >= alignof(User),
"Alignment is insufficient after objects prepended to User");
template<> struct simplify_type<User::op_iterator> {
@@ -268,6 +281,6 @@ template<> struct simplify_type<User::const_op_iterator> {
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_USER_H
diff --git a/include/llvm/IR/Value.h b/include/llvm/IR/Value.h
index f3a342dadf73..bdafbbf58cc4 100644
--- a/include/llvm/IR/Value.h
+++ b/include/llvm/IR/Value.h
@@ -18,12 +18,14 @@
#include "llvm/IR/Use.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
+#include "llvm-c/Types.h"
+#include <cassert>
+#include <iterator>
namespace llvm {
class APInt;
class Argument;
-class AssemblyAnnotationWriter;
class BasicBlock;
class Constant;
class ConstantData;
@@ -41,12 +43,10 @@ class Instruction;
class LLVMContext;
class Module;
class ModuleSlotTracker;
+class raw_ostream;
class StringRef;
class Twine;
class Type;
-class ValueHandleBase;
-class ValueSymbolTable;
-class raw_ostream;
template<typename ValueTy> class StringMapEntry;
typedef StringMapEntry<Value*> ValueName;
@@ -77,6 +77,7 @@ class Value {
const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
+
protected:
/// \brief Hold subclass data that can be dropped.
///
@@ -134,6 +135,7 @@ private:
U = U->getNext();
return *this;
}
+
use_iterator_impl operator++(int) { // Postincrement
auto tmp = *this;
++*this;
@@ -160,7 +162,7 @@ private:
friend class Value;
public:
- user_iterator_impl() {}
+ user_iterator_impl() = default;
bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
@@ -172,6 +174,7 @@ private:
++UI;
return *this;
}
+
user_iterator_impl operator++(int) { // Postincrement
auto tmp = *this;
++*this;
@@ -192,12 +195,12 @@ private:
Use &getUse() const { return *UI; }
};
- void operator=(const Value &) = delete;
- Value(const Value &) = delete;
-
protected:
Value(Type *Ty, unsigned scid);
+
public:
+ Value(const Value &) = delete;
+ void operator=(const Value &) = delete;
virtual ~Value();
/// \brief Support for debugging, callable in GDB: V->dump()
@@ -236,13 +239,15 @@ public:
private:
void destroyValueName();
+ void doRAUW(Value *New, bool NoMetadata);
void setNameImpl(const Twine &Name);
public:
/// \brief Return a constant reference to the value's name.
///
- /// This is cheap and guaranteed to return the same reference as long as the
- /// value is not modified.
+ /// This guaranteed to return the same reference as long as the value is not
+ /// modified. If the value has a name, this does a hashtable lookup, so it's
+ /// not free.
StringRef getName() const;
/// \brief Change the name of the value.
@@ -252,7 +257,6 @@ public:
/// \param Name The new name; or "" if the value's name should be removed.
void setName(const Twine &Name);
-
/// \brief Transfer the name from V to this value.
///
/// After taking V's name, sets V's name to empty.
@@ -267,6 +271,12 @@ public:
/// guaranteed to be empty.
void replaceAllUsesWith(Value *V);
+ /// \brief Change non-metadata uses of this to point to a new Value.
+ ///
+ /// Go through the uses list for this definition and make each use point to
+ /// "V" instead of "this". This function skips metadata entries in the list.
+ void replaceNonMetadataUsesWith(Value *V);
+
/// replaceUsesOutsideBlock - Go through the uses list for this definition and
/// make each use point to "V" instead of "this" when the use is outside the
/// block. 'This's use list is expected to have at least one element.
@@ -442,17 +452,18 @@ public:
return SubclassOptionalData == V->SubclassOptionalData;
}
- /// \brief Clear any optional flags not set in the given Value.
- void intersectOptionalDataWith(const Value *V) {
- SubclassOptionalData &= V->SubclassOptionalData;
- }
-
/// \brief Return true if there is a value handle associated with this value.
bool hasValueHandle() const { return HasValueHandle; }
/// \brief Return true if there is metadata referencing this value.
bool isUsedByMetadata() const { return IsUsedByMD; }
+ /// \brief Return true if this value is a swifterror value.
+ ///
+ /// swifterror values can be either a function argument or an alloca with a
+ /// swifterror attribute.
+ bool isSwiftError() const;
+
/// \brief Strip off pointer casts, all-zero GEPs, and aliases.
///
/// Returns the original uncasted value. If this is called on a non-pointer
@@ -783,18 +794,6 @@ template <> struct isa_impl<GlobalObject, Value> {
}
};
-// Value* is only 4-byte aligned.
-template<>
-class PointerLikeTypeTraits<Value*> {
- typedef Value* PT;
-public:
- static inline void *getAsVoidPointer(PT P) { return P; }
- static inline PT getFromVoidPointer(void *P) {
- return static_cast<PT>(P);
- }
- enum { NumLowBitsAvailable = 2 };
-};
-
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)
@@ -805,9 +804,9 @@ inline Value **unwrap(LLVMValueRef *Vals) {
template<typename T>
inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
-#ifdef DEBUG
+#ifndef NDEBUG
for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
- cast<T>(*I);
+ unwrap<T>(*I); // For side effect of calling assert on invalid usage.
#endif
(void)Length;
return reinterpret_cast<T**>(Vals);
@@ -817,6 +816,6 @@ inline LLVMValueRef *wrap(const Value **Vals) {
return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_VALUE_H
diff --git a/include/llvm/IR/ValueHandle.h b/include/llvm/IR/ValueHandle.h
index 3c2805913ef5..a4d4893a9bc9 100644
--- a/include/llvm/IR/ValueHandle.h
+++ b/include/llvm/IR/ValueHandle.h
@@ -22,17 +22,6 @@ namespace llvm {
class ValueHandleBase;
template<typename From> struct simplify_type;
-// ValueHandleBase** is only 4-byte aligned.
-template<>
-class PointerLikeTypeTraits<ValueHandleBase**> {
-public:
- static inline void *getAsVoidPointer(ValueHandleBase** P) { return P; }
- static inline ValueHandleBase **getFromVoidPointer(void *P) {
- return static_cast<ValueHandleBase**>(P);
- }
- enum { NumLowBitsAvailable = 2 };
-};
-
/// \brief This is the common base class of value handles.
///
/// ValueHandle's are smart pointers to Value's that have special behavior when
diff --git a/include/llvm/IR/ValueMap.h b/include/llvm/IR/ValueMap.h
index 85379ad468c4..9648e1989f94 100644
--- a/include/llvm/IR/ValueMap.h
+++ b/include/llvm/IR/ValueMap.h
@@ -27,14 +27,20 @@
#define LLVM_IR_VALUEMAP_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/TrackingMDRef.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/UniqueLock.h"
-#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
#include <iterator>
-#include <memory>
+#include <type_traits>
+#include <utility>
namespace llvm {
@@ -77,11 +83,12 @@ struct ValueMapConfig {
};
/// See the file comment.
-template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT> >
+template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT>>
class ValueMap {
friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
+
typedef ValueMapCallbackVH<KeyT, ValueT, Config> ValueMapCVH;
- typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH> > MapT;
+ typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH>> MapT;
typedef DenseMap<const Metadata *, TrackingMDRef> MDMapT;
typedef typename Config::ExtraData ExtraData;
MapT Map;
@@ -90,8 +97,6 @@ class ValueMap {
bool MayMapMetadata = true;
- ValueMap(const ValueMap&) = delete;
- ValueMap& operator=(const ValueMap&) = delete;
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
@@ -102,6 +107,8 @@ public:
: Map(NumInitBuckets), Data() {}
explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64)
: Map(NumInitBuckets), Data(Data) {}
+ ValueMap(const ValueMap &) = delete;
+ ValueMap &operator=(const ValueMap &) = delete;
bool hasMD() const { return bool(MDMap); }
MDMapT &MD() {
@@ -183,7 +190,6 @@ public:
insert(*I);
}
-
bool erase(const KeyT &Val) {
typename MapT::iterator I = Map.find_as(Val);
if (I == Map.end())
@@ -237,6 +243,7 @@ template <typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH final : public CallbackVH {
friend class ValueMap<KeyT, ValueT, Config>;
friend struct DenseMapInfo<ValueMapCallbackVH>;
+
typedef ValueMap<KeyT, ValueT, Config> ValueMapT;
typedef typename std::remove_pointer<KeyT>::type KeySansPointerT;
@@ -262,6 +269,7 @@ public:
Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
Copy.Map->Map.erase(Copy); // Definitely destroys *this.
}
+
void allUsesReplacedWith(Value *new_key) override {
assert(isa<KeySansPointerT>(new_key) &&
"Invalid RAUW on key of ValueMap<>");
@@ -289,30 +297,34 @@ public:
};
template<typename KeyT, typename ValueT, typename Config>
-struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
+struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
typedef ValueMapCallbackVH<KeyT, ValueT, Config> VH;
static inline VH getEmptyKey() {
return VH(DenseMapInfo<Value *>::getEmptyKey());
}
+
static inline VH getTombstoneKey() {
return VH(DenseMapInfo<Value *>::getTombstoneKey());
}
+
static unsigned getHashValue(const VH &Val) {
return DenseMapInfo<KeyT>::getHashValue(Val.Unwrap());
}
+
static unsigned getHashValue(const KeyT &Val) {
return DenseMapInfo<KeyT>::getHashValue(Val);
}
+
static bool isEqual(const VH &LHS, const VH &RHS) {
return LHS == RHS;
}
+
static bool isEqual(const KeyT &LHS, const VH &RHS) {
return LHS == RHS.getValPtr();
}
};
-
template<typename DenseMapT, typename KeyT>
class ValueMapIterator :
public std::iterator<std::forward_iterator_tag,
@@ -320,10 +332,11 @@ class ValueMapIterator :
ptrdiff_t> {
typedef typename DenseMapT::iterator BaseT;
typedef typename DenseMapT::mapped_type ValueT;
+
BaseT I;
+
public:
ValueMapIterator() : I() {}
-
ValueMapIterator(BaseT I) : I(I) {}
BaseT base() const { return I; }
@@ -369,7 +382,9 @@ class ValueMapConstIterator :
ptrdiff_t> {
typedef typename DenseMapT::const_iterator BaseT;
typedef typename DenseMapT::mapped_type ValueT;
+
BaseT I;
+
public:
ValueMapConstIterator() : I() {}
ValueMapConstIterator(BaseT I) : I(I) {}
@@ -414,4 +429,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_IR_VALUEMAP_H
diff --git a/include/llvm/IR/Verifier.h b/include/llvm/IR/Verifier.h
index fdb6ce400a8d..71f727c3d4fc 100644
--- a/include/llvm/IR/Verifier.h
+++ b/include/llvm/IR/Verifier.h
@@ -30,6 +30,49 @@ class FunctionPass;
class ModulePass;
class Module;
class raw_ostream;
+struct VerifierSupport;
+
+/// Verify that the TBAA Metadatas are valid.
+class TBAAVerifier {
+ VerifierSupport *Diagnostic = nullptr;
+
+ /// Helper to diagnose a failure
+ template <typename... Tys> void CheckFailed(Tys &&... Args);
+
+ /// Cache of TBAA base nodes that have already been visited. This cachce maps
+ /// a node that has been visited to a pair (IsInvalid, BitWidth) where
+ ///
+ /// \c IsInvalid is true iff the node is invalid.
+ /// \c BitWidth, if non-zero, is the bitwidth of the integer used to denoting
+ /// the offset of the access. If zero, only a zero offset is allowed.
+ ///
+ /// \c BitWidth has no meaning if \c IsInvalid is true.
+ typedef std::pair<bool, unsigned> TBAABaseNodeSummary;
+ DenseMap<const MDNode *, TBAABaseNodeSummary> TBAABaseNodes;
+
+ /// Maps an alleged scalar TBAA node to a boolean that is true if the said
+ /// TBAA node is a valid scalar TBAA node or false otherwise.
+ DenseMap<const MDNode *, bool> TBAAScalarNodes;
+
+ /// \name Helper functions used by \c visitTBAAMetadata.
+ /// @{
+ MDNode *getFieldNodeFromTBAABaseNode(Instruction &I, const MDNode *BaseNode,
+ APInt &Offset);
+ TBAAVerifier::TBAABaseNodeSummary verifyTBAABaseNode(Instruction &I,
+ const MDNode *BaseNode);
+ TBAABaseNodeSummary verifyTBAABaseNodeImpl(Instruction &I,
+ const MDNode *BaseNode);
+
+ bool isValidScalarTBAANode(const MDNode *MD);
+ /// @}
+
+public:
+ TBAAVerifier(VerifierSupport *Diagnostic = nullptr)
+ : Diagnostic(Diagnostic) {}
+ /// Visit an instruction and return true if it is valid, return false if an
+ /// invalid TBAA is attached.
+ bool visitTBAAMetadata(Instruction &I, const MDNode *MD);
+};
/// \brief Check a function for errors, useful for use when debugging a
/// pass.
@@ -58,13 +101,12 @@ FunctionPass *createVerifierPass(bool FatalErrors = true);
/// and debug info errors.
class VerifierAnalysis : public AnalysisInfoMixin<VerifierAnalysis> {
friend AnalysisInfoMixin<VerifierAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
struct Result {
bool IRBroken, DebugInfoBroken;
};
- static void *ID() { return (void *)&PassID; }
Result run(Module &M, ModuleAnalysisManager &);
Result run(Function &F, FunctionAnalysisManager &);
};
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index 90ff82fe86d4..a34ebaf18a03 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -46,6 +46,9 @@ void initializeInstrumentation(PassRegistry&);
/// Initialize all passes linked into the Analysis library.
void initializeAnalysis(PassRegistry&);
+/// Initialize all passes linked into the Coroutines library.
+void initializeCoroutines(PassRegistry&);
+
/// Initialize all passes linked into the CodeGen library.
void initializeCodeGen(PassRegistry&);
@@ -63,7 +66,7 @@ void initializeAddressSanitizerModulePass(PassRegistry&);
void initializeAddressSanitizerPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlignmentFromAssumptionsPass(PassRegistry&);
-void initializeAlwaysInlinerPass(PassRegistry&);
+void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&);
void initializeAssumptionCacheTrackerPass(PassRegistry &);
void initializeAtomicExpandPass(PassRegistry&);
@@ -76,12 +79,13 @@ void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry&);
void initializeBoundsCheckingPass(PassRegistry&);
void initializeBranchFolderPassPass(PassRegistry&);
void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&);
+void initializeBranchRelaxationPass(PassRegistry&);
void initializeBreakCriticalEdgesPass(PassRegistry&);
-void initializeCFGOnlyPrinterPass(PassRegistry&);
-void initializeCFGOnlyViewerPass(PassRegistry&);
-void initializeCFGPrinterPass(PassRegistry&);
+void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
+void initializeCFGPrinterLegacyPassPass(PassRegistry&);
+void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
void initializeCFGSimplifyPassPass(PassRegistry&);
-void initializeCFGViewerPass(PassRegistry&);
+void initializeCFGViewerLegacyPassPass(PassRegistry&);
void initializeCFLAndersAAWrapperPassPass(PassRegistry&);
void initializeCFLSteensAAWrapperPassPass(PassRegistry&);
void initializeCallGraphDOTPrinterPass(PassRegistry&);
@@ -89,6 +93,7 @@ void initializeCallGraphPrinterLegacyPassPass(PassRegistry&);
void initializeCallGraphViewerPass(PassRegistry&);
void initializeCallGraphWrapperPassPass(PassRegistry &);
void initializeCodeGenPreparePass(PassRegistry&);
+void initializeCountingFunctionInserterPass(PassRegistry&);
void initializeConstantHoistingLegacyPassPass(PassRegistry&);
void initializeConstantMergeLegacyPassPass(PassRegistry &);
void initializeConstantPropagationPass(PassRegistry&);
@@ -116,10 +121,12 @@ void initializeDominanceFrontierWrapperPassPass(PassRegistry&);
void initializeDominatorTreeWrapperPassPass(PassRegistry&);
void initializeDwarfEHPreparePass(PassRegistry&);
void initializeEarlyCSELegacyPassPass(PassRegistry &);
+void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry &);
void initializeEarlyIfConverterPass(PassRegistry&);
void initializeEdgeBundlesPass(PassRegistry&);
void initializeEfficiencySanitizerPass(PassRegistry&);
void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry &);
+void initializeRAGreedyPass(PassRegistry&);
void initializeGVNHoistLegacyPassPass(PassRegistry &);
void initializeExpandISelPseudosPass(PassRegistry&);
void initializeExpandPostRAPass(PassRegistry&);
@@ -130,7 +137,7 @@ void initializeFloat2IntLegacyPassPass(PassRegistry&);
void initializeForceFunctionAttrsLegacyPassPass(PassRegistry&);
void initializeForwardControlFlowIntegrityPass(PassRegistry&);
void initializeFuncletLayoutPass(PassRegistry &);
-void initializeFunctionImportPassPass(PassRegistry &);
+void initializeFunctionImportLegacyPassPass(PassRegistry &);
void initializeGCMachineCodeAnalysisPass(PassRegistry&);
void initializeGCModuleInfoPass(PassRegistry&);
void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
@@ -138,6 +145,7 @@ void initializeGVNLegacyPassPass(PassRegistry&);
void initializeGlobalDCELegacyPassPass(PassRegistry&);
void initializeGlobalMergePass(PassRegistry&);
void initializeGlobalOptLegacyPassPass(PassRegistry&);
+void initializeGlobalSplitPass(PassRegistry&);
void initializeGlobalsAAWrapperPassPass(PassRegistry&);
void initializeGuardWideningLegacyPassPass(PassRegistry&);
void initializeIPCPPass(PassRegistry&);
@@ -155,14 +163,20 @@ void initializeInstNamerPass(PassRegistry&);
void initializeInstSimplifierPass(PassRegistry&);
void initializeInstrProfilingLegacyPassPass(PassRegistry &);
void initializeInstructionCombiningPassPass(PassRegistry&);
+void initializeInstructionSelectPass(PassRegistry &);
void initializeInterleavedAccessPass(PassRegistry &);
void initializeInternalizeLegacyPassPass(PassRegistry&);
void initializeIntervalPartitionPass(PassRegistry&);
void initializeJumpThreadingPass(PassRegistry&);
-void initializeLCSSAWrapperPassPass(PassRegistry &);
+void initializeLCSSAWrapperPassPass(PassRegistry&);
+void initializeLCSSAVerificationPassPass(PassRegistry&);
void initializeLegacyLICMPassPass(PassRegistry&);
+void initializeLegacyLoopSinkPassPass(PassRegistry&);
+void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
+void initializeLegalizerPass(PassRegistry&);
+void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
void initializeLintPass(PassRegistry&);
void initializeLiveDebugValuesPass(PassRegistry&);
void initializeLiveDebugVariablesPass(PassRegistry&);
@@ -175,7 +189,7 @@ void initializeLoaderPassPass(PassRegistry&);
void initializeLoadStoreVectorizerPass(PassRegistry&);
void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
-void initializeLoopDataPrefetchPass(PassRegistry&);
+void initializeLoopDataPrefetchLegacyPassPass(PassRegistry &);
void initializeLoopDeletionLegacyPassPass(PassRegistry&);
void initializeLoopDistributeLegacyPass(PassRegistry&);
void initializeLoopExtractorPass(PassRegistry&);
@@ -198,9 +212,9 @@ void initializeLoopVersioningPassPass(PassRegistry &);
void initializeLowerAtomicLegacyPassPass(PassRegistry &);
void initializeLowerEmuTLSPass(PassRegistry&);
void initializeLowerExpectIntrinsicPass(PassRegistry&);
-void initializeLowerGuardIntrinsicPass(PassRegistry&);
+void initializeLowerGuardIntrinsicLegacyPassPass(PassRegistry&);
void initializeLowerIntrinsicsPass(PassRegistry&);
-void initializeLowerInvokePass(PassRegistry&);
+void initializeLowerInvokeLegacyPassPass(PassRegistry&);
void initializeLowerSwitchPass(PassRegistry&);
void initializeLowerTypeTestsPass(PassRegistry&);
void initializeMIRPrintingPassPass(PassRegistry&);
@@ -217,6 +231,7 @@ void initializeMachineFunctionPrinterPassPass(PassRegistry&);
void initializeMachineLICMPass(PassRegistry&);
void initializeMachineLoopInfoPass(PassRegistry&);
void initializeMachineModuleInfoPass(PassRegistry&);
+void initializeMachinePipelinerPass(PassRegistry&);
void initializeMachinePostDominatorTreePass(PassRegistry&);
void initializeMachineRegionInfoPassPass(PassRegistry&);
void initializeMachineSchedulerPass(PassRegistry&);
@@ -235,9 +250,9 @@ void initializeMergedLoadStoreMotionLegacyPassPass(PassRegistry &);
void initializeMetaRenamerPass(PassRegistry&);
void initializeModuleDebugInfoPrinterPass(PassRegistry&);
void initializeModuleSummaryIndexWrapperPassPass(PassRegistry &);
-void initializeNameAnonFunctionPass(PassRegistry &);
-void initializeNaryReassociatePass(PassRegistry&);
-void initializeNoAAPass(PassRegistry&);
+void initializeNameAnonGlobalLegacyPassPass(PassRegistry &);
+void initializeNaryReassociateLegacyPassPass(PassRegistry &);
+void initializeNewGVNPass(PassRegistry&);
void initializeObjCARCAAWrapperPassPass(PassRegistry&);
void initializeObjCARCAPElimPass(PassRegistry&);
void initializeObjCARCContractPass(PassRegistry&);
@@ -284,10 +299,12 @@ void initializeRegionOnlyViewerPass(PassRegistry&);
void initializeRegionPrinterPass(PassRegistry&);
void initializeRegionViewerPass(PassRegistry&);
void initializeRegisterCoalescerPass(PassRegistry&);
+void initializeStripGCRelocatesPass(PassRegistry&);
void initializeRenameIndependentSubregsPass(PassRegistry&);
+void initializeResetMachineFunctionPass(PassRegistry &);
void initializeReversePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
void initializeRewriteStatepointsForGCPass(PassRegistry&);
-void initializeRewriteSymbolsPass(PassRegistry&);
+void initializeRewriteSymbolsLegacyPassPass(PassRegistry&);
void initializeSCCPLegacyPassPass(PassRegistry &);
void initializeSCEVAAWrapperPassPass(PassRegistry&);
void initializeSLPVectorizerPass(PassRegistry&);
@@ -306,7 +323,7 @@ void initializeSingleLoopExtractorPass(PassRegistry&);
void initializeSinkingLegacyPassPass(PassRegistry&);
void initializeSjLjEHPreparePass(PassRegistry&);
void initializeSlotIndexesPass(PassRegistry&);
-void initializeSpeculativeExecutionPass(PassRegistry&);
+void initializeSpeculativeExecutionLegacyPassPass(PassRegistry&);
void initializeSpillPlacementPass(PassRegistry&);
void initializeStackColoringPass(PassRegistry&);
void initializeStackMapLivenessPass(PassRegistry&);
@@ -317,6 +334,7 @@ void initializeStripDeadDebugInfoPass(PassRegistry&);
void initializeStripDeadPrototypesLegacyPassPass(PassRegistry&);
void initializeStripDebugDeclarePass(PassRegistry&);
void initializeStripNonDebugSymbolsPass(PassRegistry&);
+void initializeStripNonLineTableDebugInfoPass(PassRegistry&);
void initializeStripSymbolsPass(PassRegistry&);
void initializeStructurizeCFGPass(PassRegistry&);
void initializeTailCallElimPass(PassRegistry&);
@@ -337,6 +355,7 @@ void initializeVirtRegRewriterPass(PassRegistry&);
void initializeWholeProgramDevirtPass(PassRegistry &);
void initializeWinEHPreparePass(PassRegistry&);
void initializeWriteBitcodePassPass(PassRegistry &);
+void initializeWriteThinLTOBitcodePass(PassRegistry &);
void initializeXRayInstrumentationPass(PassRegistry &);
}
diff --git a/include/llvm/LTO/Caching.h b/include/llvm/LTO/Caching.h
new file mode 100644
index 000000000000..3b96bd1dc301
--- /dev/null
+++ b/include/llvm/LTO/Caching.h
@@ -0,0 +1,37 @@
+//===- Caching.h - LLVM Link Time Optimizer Configuration -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the localCache function, which allows clients to add a
+// filesystem cache to ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_CACHING_H
+#define LLVM_LTO_CACHING_H
+
+#include "llvm/LTO/LTO.h"
+#include <string>
+
+namespace llvm {
+namespace lto {
+
+/// This type defines the callback to add a pre-existing native object file
+/// (e.g. in a cache).
+///
+/// File callbacks must be thread safe.
+typedef std::function<void(unsigned Task, StringRef Path)> AddFileFn;
+
+/// Create a local file system cache which uses the given cache directory and
+/// file callback.
+NativeObjectCache localCache(std::string CacheDirectoryPath, AddFileFn AddFile);
+
+} // namespace lto
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/LTO/Config.h b/include/llvm/LTO/Config.h
new file mode 100644
index 000000000000..3aa48c9f7c28
--- /dev/null
+++ b/include/llvm/LTO/Config.h
@@ -0,0 +1,181 @@
+//===-Config.h - LLVM Link Time Optimizer Configuration -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the lto::Config data structure, which allows clients to
+// configure LTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_CONFIG_H
+#define LLVM_LTO_CONFIG_H
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetOptions.h"
+
+#include <functional>
+
+namespace llvm {
+
+class Error;
+class Module;
+class ModuleSummaryIndex;
+class raw_pwrite_stream;
+
+namespace lto {
+
+/// LTO configuration. A linker can configure LTO by setting fields in this data
+/// structure and passing it to the lto::LTO constructor.
+struct Config {
+ // Note: when adding fields here, consider whether they need to be added to
+ // computeCacheKey in LTO.cpp.
+ std::string CPU;
+ TargetOptions Options;
+ std::vector<std::string> MAttrs;
+ Reloc::Model RelocModel = Reloc::PIC_;
+ CodeModel::Model CodeModel = CodeModel::Default;
+ CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
+ unsigned OptLevel = 2;
+ bool DisableVerify = false;
+
+ /// Disable entirely the optimizer, including importing for ThinLTO
+ bool CodeGenOnly = false;
+
+ /// If this field is set, the set of passes run in the middle-end optimizer
+ /// will be the one specified by the string. Only works with the new pass
+ /// manager as the old one doesn't have this ability.
+ std::string OptPipeline;
+
+ // If this field is set, it has the same effect of specifying an AA pipeline
+ // identified by the string. Only works with the new pass manager, in
+ // conjunction OptPipeline.
+ std::string AAPipeline;
+
+ /// Setting this field will replace target triples in input files with this
+ /// triple.
+ std::string OverrideTriple;
+
+ /// Setting this field will replace unspecified target triples in input files
+ /// with this triple.
+ std::string DefaultTriple;
+
+ /// Sample PGO profile path.
+ std::string SampleProfile;
+
+ bool ShouldDiscardValueNames = true;
+ DiagnosticHandlerFunction DiagHandler;
+
+ /// If this field is set, LTO will write input file paths and symbol
+ /// resolutions here in llvm-lto2 command line flag format. This can be
+ /// used for testing and for running the LTO pipeline outside of the linker
+ /// with llvm-lto2.
+ std::unique_ptr<raw_ostream> ResolutionFile;
+
+ /// The following callbacks deal with tasks, which normally represent the
+ /// entire optimization and code generation pipeline for what will become a
+ /// single native object file. Each task has a unique identifier between 0 and
+ /// getMaxTasks()-1, which is supplied to the callback via the Task parameter.
+ /// A task represents the entire pipeline for ThinLTO and regular
+ /// (non-parallel) LTO, but a parallel code generation task will be split into
+ /// N tasks before code generation, where N is the parallelism level.
+ ///
+ /// LTO may decide to stop processing a task at any time, for example if the
+ /// module is empty or if a module hook (see below) returns false. For this
+ /// reason, the client should not expect to receive exactly getMaxTasks()
+ /// native object files.
+
+ /// A module hook may be used by a linker to perform actions during the LTO
+ /// pipeline. For example, a linker may use this function to implement
+ /// -save-temps. If this function returns false, any further processing for
+ /// that task is aborted.
+ ///
+ /// Module hooks must be thread safe with respect to the linker's internal
+ /// data structures. A module hook will never be called concurrently from
+ /// multiple threads with the same task ID, or the same module.
+ ///
+ /// Note that in out-of-process backend scenarios, none of the hooks will be
+ /// called for ThinLTO tasks.
+ typedef std::function<bool(unsigned Task, const Module &)> ModuleHookFn;
+
+ /// This module hook is called after linking (regular LTO) or loading
+ /// (ThinLTO) the module, before modifying it.
+ ModuleHookFn PreOptModuleHook;
+
+ /// This hook is called after promoting any internal functions
+ /// (ThinLTO-specific).
+ ModuleHookFn PostPromoteModuleHook;
+
+ /// This hook is called after internalizing the module.
+ ModuleHookFn PostInternalizeModuleHook;
+
+ /// This hook is called after importing from other modules (ThinLTO-specific).
+ ModuleHookFn PostImportModuleHook;
+
+ /// This module hook is called after optimization is complete.
+ ModuleHookFn PostOptModuleHook;
+
+ /// This module hook is called before code generation. It is similar to the
+ /// PostOptModuleHook, but for parallel code generation it is called after
+ /// splitting the module.
+ ModuleHookFn PreCodeGenModuleHook;
+
+ /// A combined index hook is called after all per-module indexes have been
+ /// combined (ThinLTO-specific). It can be used to implement -save-temps for
+ /// the combined index.
+ ///
+ /// If this function returns false, any further processing for ThinLTO tasks
+ /// is aborted.
+ ///
+ /// It is called regardless of whether the backend is in-process, although it
+ /// is not called from individual backend processes.
+ typedef std::function<bool(const ModuleSummaryIndex &Index)>
+ CombinedIndexHookFn;
+ CombinedIndexHookFn CombinedIndexHook;
+
+ /// This is a convenience function that configures this Config object to write
+ /// temporary files named after the given OutputFileName for each of the LTO
+ /// phases to disk. A client can use this function to implement -save-temps.
+ ///
+ /// FIXME: Temporary files derived from ThinLTO backends are currently named
+ /// after the input file name, rather than the output file name, when
+ /// UseInputModulePath is set to true.
+ ///
+ /// Specifically, it (1) sets each of the above module hooks and the combined
+ /// index hook to a function that calls the hook function (if any) that was
+ /// present in the appropriate field when the addSaveTemps function was
+ /// called, and writes the module to a bitcode file with a name prefixed by
+ /// the given output file name, and (2) creates a resolution file whose name
+ /// is prefixed by the given output file name and sets ResolutionFile to its
+ /// file handle.
+ Error addSaveTemps(std::string OutputFileName,
+ bool UseInputModulePath = false);
+};
+
+/// A derived class of LLVMContext that initializes itself according to a given
+/// Config object. The purpose of this class is to tie ownership of the
+/// diagnostic handler to the context, as opposed to the Config object (which
+/// may be ephemeral).
+struct LTOLLVMContext : LLVMContext {
+ static void funcDiagHandler(const DiagnosticInfo &DI, void *Context) {
+ auto *Fn = static_cast<DiagnosticHandlerFunction *>(Context);
+ (*Fn)(DI);
+ }
+
+ LTOLLVMContext(const Config &C) : DiagHandler(C.DiagHandler) {
+ setDiscardValueNames(C.ShouldDiscardValueNames);
+ enableDebugTypeODRUniquing();
+ setDiagnosticHandler(funcDiagHandler, &DiagHandler, true);
+ }
+ DiagnosticHandlerFunction DiagHandler;
+};
+
+}
+}
+
+#endif
diff --git a/include/llvm/LTO/LTO.h b/include/llvm/LTO/LTO.h
index 5154c0007aaa..bc435702157e 100644
--- a/include/llvm/LTO/LTO.h
+++ b/include/llvm/LTO/LTO.h
@@ -16,39 +16,28 @@
#ifndef LLVM_LTO_LTO_H
#define LLVM_LTO_LTO_H
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/LTO/Config.h"
+#include "llvm/Linker/IRMover.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Support/thread.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
namespace llvm {
+class BitcodeModule;
+class Error;
class LLVMContext;
class MemoryBufferRef;
class Module;
-
-/// Helper to load a module from bitcode.
-std::unique_ptr<Module> loadModuleFromBuffer(const MemoryBufferRef &Buffer,
- LLVMContext &Context, bool Lazy);
-
-/// Provide a "loader" for the FunctionImporter to access function from other
-/// modules.
-class ModuleLoader {
- /// The context that will be used for importing.
- LLVMContext &Context;
-
- /// Map from Module identifier to MemoryBuffer. Used by clients like the
- /// FunctionImported to request loading a Module.
- StringMap<MemoryBufferRef> &ModuleMap;
-
-public:
- ModuleLoader(LLVMContext &Context, StringMap<MemoryBufferRef> &ModuleMap)
- : Context(Context), ModuleMap(ModuleMap) {}
-
- /// Load a module on demand.
- std::unique_ptr<Module> operator()(StringRef Identifier) {
- return loadModuleFromBuffer(ModuleMap[Identifier], Context, /*Lazy*/ true);
- }
-};
-
+class Target;
+class raw_pwrite_stream;
/// Resolve Weak and LinkOnce values in the \p Index. Linkage changes recorded
/// in the index and the ThinLTO backends must apply the changes to the Module
@@ -69,6 +58,400 @@ void thinLTOResolveWeakForLinkerInIndex(
void thinLTOInternalizeAndPromoteInIndex(
ModuleSummaryIndex &Index,
function_ref<bool(StringRef, GlobalValue::GUID)> isExported);
-}
+
+namespace lto {
+
+/// Given the original \p Path to an output file, replace any path
+/// prefix matching \p OldPrefix with \p NewPrefix. Also, create the
+/// resulting directory if it does not yet exist.
+std::string getThinLTOOutputFile(const std::string &Path,
+ const std::string &OldPrefix,
+ const std::string &NewPrefix);
+
+class LTO;
+struct SymbolResolution;
+class ThinBackendProc;
+
+/// An input file. This is a wrapper for ModuleSymbolTable that exposes only the
+/// information that an LTO client should need in order to do symbol resolution.
+class InputFile {
+ // FIXME: Remove LTO class friendship once we have bitcode symbol tables.
+ friend LTO;
+ InputFile() = default;
+
+ // FIXME: Remove the LLVMContext once we have bitcode symbol tables.
+ LLVMContext Ctx;
+ struct InputModule;
+ std::vector<InputModule> Mods;
+ ModuleSymbolTable SymTab;
+
+ std::vector<StringRef> Comdats;
+ DenseMap<const Comdat *, unsigned> ComdatMap;
+
+public:
+ ~InputFile();
+
+ /// Create an InputFile.
+ static Expected<std::unique_ptr<InputFile>> create(MemoryBufferRef Object);
+
+ class symbol_iterator;
+
+ /// This is a wrapper for ArrayRef<ModuleSymbolTable::Symbol>::iterator that
+ /// exposes only the information that an LTO client should need in order to do
+ /// symbol resolution.
+ ///
+ /// This object is ephemeral; it is only valid as long as an iterator obtained
+ /// from symbols() refers to it.
+ class Symbol {
+ friend symbol_iterator;
+ friend LTO;
+
+ ArrayRef<ModuleSymbolTable::Symbol>::iterator I;
+ const ModuleSymbolTable &SymTab;
+ const InputFile *File;
+ uint32_t Flags;
+ SmallString<64> Name;
+
+ bool shouldSkip() {
+ return !(Flags & object::BasicSymbolRef::SF_Global) ||
+ (Flags & object::BasicSymbolRef::SF_FormatSpecific);
+ }
+
+ void skip() {
+ ArrayRef<ModuleSymbolTable::Symbol>::iterator E = SymTab.symbols().end();
+ while (I != E) {
+ Flags = SymTab.getSymbolFlags(*I);
+ if (!shouldSkip())
+ break;
+ ++I;
+ }
+ if (I == E)
+ return;
+
+ Name.clear();
+ {
+ raw_svector_ostream OS(Name);
+ SymTab.printSymbolName(OS, *I);
+ }
+ }
+
+ bool isGV() const { return I->is<GlobalValue *>(); }
+ GlobalValue *getGV() const { return I->get<GlobalValue *>(); }
+
+ public:
+ Symbol(ArrayRef<ModuleSymbolTable::Symbol>::iterator I,
+ const ModuleSymbolTable &SymTab, const InputFile *File)
+ : I(I), SymTab(SymTab), File(File) {
+ skip();
+ }
+
+ /// Returns the mangled name of the global.
+ StringRef getName() const { return Name; }
+
+ uint32_t getFlags() const { return Flags; }
+ GlobalValue::VisibilityTypes getVisibility() const {
+ if (isGV())
+ return getGV()->getVisibility();
+ return GlobalValue::DefaultVisibility;
+ }
+ bool canBeOmittedFromSymbolTable() const {
+ return isGV() && llvm::canBeOmittedFromSymbolTable(getGV());
+ }
+ bool isTLS() const {
+ // FIXME: Expose a thread-local flag for module asm symbols.
+ return isGV() && getGV()->isThreadLocal();
+ }
+
+ // Returns the index of the comdat this symbol is in or -1 if the symbol
+ // is not in a comdat.
+ // FIXME: We have to return Expected<int> because aliases point to an
+ // arbitrary ConstantExpr and that might not actually be a constant. That
+ // means we might not be able to find what an alias is aliased to and
+ // so find its comdat.
+ Expected<int> getComdatIndex() const;
+
+ uint64_t getCommonSize() const {
+ assert(Flags & object::BasicSymbolRef::SF_Common);
+ if (!isGV())
+ return 0;
+ return getGV()->getParent()->getDataLayout().getTypeAllocSize(
+ getGV()->getType()->getElementType());
+ }
+ unsigned getCommonAlignment() const {
+ assert(Flags & object::BasicSymbolRef::SF_Common);
+ if (!isGV())
+ return 0;
+ return getGV()->getAlignment();
+ }
+ };
+
+ class symbol_iterator {
+ Symbol Sym;
+
+ public:
+ symbol_iterator(ArrayRef<ModuleSymbolTable::Symbol>::iterator I,
+ const ModuleSymbolTable &SymTab, const InputFile *File)
+ : Sym(I, SymTab, File) {}
+
+ symbol_iterator &operator++() {
+ ++Sym.I;
+ Sym.skip();
+ return *this;
+ }
+
+ symbol_iterator operator++(int) {
+ symbol_iterator I = *this;
+ ++*this;
+ return I;
+ }
+
+ const Symbol &operator*() const { return Sym; }
+ const Symbol *operator->() const { return &Sym; }
+
+ bool operator!=(const symbol_iterator &Other) const {
+ return Sym.I != Other.Sym.I;
+ }
+ };
+
+ /// A range over the symbols in this InputFile.
+ iterator_range<symbol_iterator> symbols() {
+ return llvm::make_range(
+ symbol_iterator(SymTab.symbols().begin(), SymTab, this),
+ symbol_iterator(SymTab.symbols().end(), SymTab, this));
+ }
+
+ /// Returns the path to the InputFile.
+ StringRef getName() const;
+
+ /// Returns the source file path specified at compile time.
+ StringRef getSourceFileName() const;
+
+ // Returns a table with all the comdats used by this file.
+ ArrayRef<StringRef> getComdatTable() const { return Comdats; }
+
+private:
+ iterator_range<symbol_iterator> module_symbols(InputModule &IM);
+};
+
+/// This class wraps an output stream for a native object. Most clients should
+/// just be able to return an instance of this base class from the stream
+/// callback, but if a client needs to perform some action after the stream is
+/// written to, that can be done by deriving from this class and overriding the
+/// destructor.
+class NativeObjectStream {
+public:
+ NativeObjectStream(std::unique_ptr<raw_pwrite_stream> OS) : OS(std::move(OS)) {}
+ std::unique_ptr<raw_pwrite_stream> OS;
+ virtual ~NativeObjectStream() = default;
+};
+
+/// This type defines the callback to add a native object that is generated on
+/// the fly.
+///
+/// Stream callbacks must be thread safe.
+typedef std::function<std::unique_ptr<NativeObjectStream>(unsigned Task)>
+ AddStreamFn;
+
+/// This is the type of a native object cache. To request an item from the
+/// cache, pass a unique string as the Key. For hits, the cached file will be
+/// added to the link and this function will return AddStreamFn(). For misses,
+/// the cache will return a stream callback which must be called at most once to
+/// produce content for the stream. The native object stream produced by the
+/// stream callback will add the file to the link after the stream is written
+/// to.
+///
+/// Clients generally look like this:
+///
+/// if (AddStreamFn AddStream = Cache(Task, Key))
+/// ProduceContent(AddStream);
+typedef std::function<AddStreamFn(unsigned Task, StringRef Key)>
+ NativeObjectCache;
+
+/// A ThinBackend defines what happens after the thin-link phase during ThinLTO.
+/// The details of this type definition aren't important; clients can only
+/// create a ThinBackend using one of the create*ThinBackend() functions below.
+typedef std::function<std::unique_ptr<ThinBackendProc>(
+ Config &C, ModuleSummaryIndex &CombinedIndex,
+ StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+ AddStreamFn AddStream, NativeObjectCache Cache)>
+ ThinBackend;
+
+/// This ThinBackend runs the individual backend jobs in-process.
+ThinBackend createInProcessThinBackend(unsigned ParallelismLevel);
+
+/// This ThinBackend writes individual module indexes to files, instead of
+/// running the individual backend jobs. This backend is for distributed builds
+/// where separate processes will invoke the real backends.
+///
+/// To find the path to write the index to, the backend checks if the path has a
+/// prefix of OldPrefix; if so, it replaces that prefix with NewPrefix. It then
+/// appends ".thinlto.bc" and writes the index to that path. If
+/// ShouldEmitImportsFiles is true it also writes a list of imported files to a
+/// similar path with ".imports" appended instead.
+ThinBackend createWriteIndexesThinBackend(std::string OldPrefix,
+ std::string NewPrefix,
+ bool ShouldEmitImportsFiles,
+ std::string LinkedObjectsFile);
+
+/// This class implements a resolution-based interface to LLVM's LTO
+/// functionality. It supports regular LTO, parallel LTO code generation and
+/// ThinLTO. You can use it from a linker in the following way:
+/// - Set hooks and code generation options (see lto::Config struct defined in
+/// Config.h), and use the lto::Config object to create an lto::LTO object.
+/// - Create lto::InputFile objects using lto::InputFile::create(), then use
+/// the symbols() function to enumerate its symbols and compute a resolution
+/// for each symbol (see SymbolResolution below).
+/// - After the linker has visited each input file (and each regular object
+/// file) and computed a resolution for each symbol, take each lto::InputFile
+/// and pass it and an array of symbol resolutions to the add() function.
+/// - Call the getMaxTasks() function to get an upper bound on the number of
+/// native object files that LTO may add to the link.
+/// - Call the run() function. This function will use the supplied AddStream
+/// and Cache functions to add up to getMaxTasks() native object files to
+/// the link.
+class LTO {
+ friend InputFile;
+
+public:
+ /// Create an LTO object. A default constructed LTO object has a reasonable
+ /// production configuration, but you can customize it by passing arguments to
+ /// this constructor.
+ /// FIXME: We do currently require the DiagHandler field to be set in Conf.
+ /// Until that is fixed, a Config argument is required.
+ LTO(Config Conf, ThinBackend Backend = nullptr,
+ unsigned ParallelCodeGenParallelismLevel = 1);
+ ~LTO();
+
+ /// Add an input file to the LTO link, using the provided symbol resolutions.
+ /// The symbol resolutions must appear in the enumeration order given by
+ /// InputFile::symbols().
+ Error add(std::unique_ptr<InputFile> Obj, ArrayRef<SymbolResolution> Res);
+
+ /// Returns an upper bound on the number of tasks that the client may expect.
+ /// This may only be called after all IR object files have been added. For a
+ /// full description of tasks see LTOBackend.h.
+ unsigned getMaxTasks() const;
+
+ /// Runs the LTO pipeline. This function calls the supplied AddStream
+ /// function to add native object files to the link.
+ ///
+ /// The Cache parameter is optional. If supplied, it will be used to cache
+ /// native object files and add them to the link.
+ ///
+ /// The client will receive at most one callback (via either AddStream or
+ /// Cache) for each task identifier.
+ Error run(AddStreamFn AddStream, NativeObjectCache Cache = nullptr);
+
+private:
+ Config Conf;
+
+ struct RegularLTOState {
+ RegularLTOState(unsigned ParallelCodeGenParallelismLevel, Config &Conf);
+ struct CommonResolution {
+ uint64_t Size = 0;
+ unsigned Align = 0;
+ /// Record if at least one instance of the common was marked as prevailing
+ bool Prevailing = false;
+ };
+ std::map<std::string, CommonResolution> Commons;
+
+ unsigned ParallelCodeGenParallelismLevel;
+ LTOLLVMContext Ctx;
+ bool HasModule = false;
+ std::unique_ptr<Module> CombinedModule;
+ std::unique_ptr<IRMover> Mover;
+ } RegularLTO;
+
+ struct ThinLTOState {
+ ThinLTOState(ThinBackend Backend);
+
+ ThinBackend Backend;
+ ModuleSummaryIndex CombinedIndex;
+ MapVector<StringRef, BitcodeModule> ModuleMap;
+ DenseMap<GlobalValue::GUID, StringRef> PrevailingModuleForGUID;
+ } ThinLTO;
+
+ // The global resolution for a particular (mangled) symbol name. This is in
+ // particular necessary to track whether each symbol can be internalized.
+ // Because any input file may introduce a new cross-partition reference, we
+ // cannot make any final internalization decisions until all input files have
+ // been added and the client has called run(). During run() we apply
+ // internalization decisions either directly to the module (for regular LTO)
+ // or to the combined index (for ThinLTO).
+ struct GlobalResolution {
+ /// The unmangled name of the global.
+ std::string IRName;
+
+ bool UnnamedAddr = true;
+
+ /// This field keeps track of the partition number of this global. The
+ /// regular LTO object is partition 0, while each ThinLTO object has its own
+ /// partition number from 1 onwards.
+ ///
+ /// Any global that is defined or used by more than one partition, or that
+ /// is referenced externally, may not be internalized.
+ ///
+ /// Partitions generally have a one-to-one correspondence with tasks, except
+ /// that we use partition 0 for all parallel LTO code generation partitions.
+ /// Any partitioning of the combined LTO object is done internally by the
+ /// LTO backend.
+ unsigned Partition = Unknown;
+
+ /// Special partition numbers.
+ enum : unsigned {
+ /// A partition number has not yet been assigned to this global.
+ Unknown = -1u,
+
+ /// This global is either used by more than one partition or has an
+ /// external reference, and therefore cannot be internalized.
+ External = -2u,
+ };
+ };
+
+ // Global mapping from mangled symbol names to resolutions.
+ StringMap<GlobalResolution> GlobalResolutions;
+
+ void addSymbolToGlobalRes(SmallPtrSet<GlobalValue *, 8> &Used,
+ const InputFile::Symbol &Sym, SymbolResolution Res,
+ unsigned Partition);
+
+ // These functions take a range of symbol resolutions [ResI, ResE) and consume
+ // the resolutions used by a single input module by incrementing ResI. After
+ // these functions return, [ResI, ResE) will refer to the resolution range for
+ // the remaining modules in the InputFile.
+ Error addModule(InputFile &Input, InputFile::InputModule &IM,
+ const SymbolResolution *&ResI, const SymbolResolution *ResE);
+ Error addRegularLTO(BitcodeModule BM, const SymbolResolution *&ResI,
+ const SymbolResolution *ResE);
+ Error addThinLTO(BitcodeModule BM, Module &M,
+ iterator_range<InputFile::symbol_iterator> Syms,
+ const SymbolResolution *&ResI, const SymbolResolution *ResE);
+
+ Error runRegularLTO(AddStreamFn AddStream);
+ Error runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache,
+ bool HasRegularLTO);
+
+ mutable bool CalledGetMaxTasks = false;
+};
+
+/// The resolution for a symbol. The linker must provide a SymbolResolution for
+/// each global symbol based on its internal resolution of that symbol.
+struct SymbolResolution {
+ SymbolResolution()
+ : Prevailing(0), FinalDefinitionInLinkageUnit(0), VisibleToRegularObj(0) {
+ }
+ /// The linker has chosen this definition of the symbol.
+ unsigned Prevailing : 1;
+
+ /// The definition of this symbol is unpreemptable at runtime and is known to
+ /// be in this linkage unit.
+ unsigned FinalDefinitionInLinkageUnit : 1;
+
+ /// The definition of this symbol is visible outside of the LTO unit.
+ unsigned VisibleToRegularObj : 1;
+};
+
+} // namespace lto
+} // namespace llvm
#endif
diff --git a/include/llvm/LTO/LTOBackend.h b/include/llvm/LTO/LTOBackend.h
new file mode 100644
index 000000000000..933503afddc8
--- /dev/null
+++ b/include/llvm/LTO/LTOBackend.h
@@ -0,0 +1,51 @@
+//===-LTOBackend.h - LLVM Link Time Optimizer Backend ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the "backend" phase of LTO, i.e. it performs
+// optimization and code generation on a loaded module. It is generally used
+// internally by the LTO class but can also be used independently, for example
+// to implement a standalone ThinLTO backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTOBACKEND_H
+#define LLVM_LTO_LTOBACKEND_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
+
+namespace llvm {
+
+class BitcodeModule;
+class Error;
+class Module;
+class Target;
+
+namespace lto {
+
+/// Runs a regular LTO backend.
+Error backend(Config &C, AddStreamFn AddStream,
+ unsigned ParallelCodeGenParallelismLevel,
+ std::unique_ptr<Module> M);
+
+/// Runs a ThinLTO backend.
+Error thinBackend(Config &C, unsigned Task, AddStreamFn AddStream, Module &M,
+ ModuleSummaryIndex &CombinedIndex,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const GVSummaryMapTy &DefinedGlobals,
+ MapVector<StringRef, BitcodeModule> &ModuleMap);
+}
+}
+
+#endif
diff --git a/include/llvm/LTO/legacy/LTOCodeGenerator.h b/include/llvm/LTO/legacy/LTOCodeGenerator.h
index d083e37d75b8..f14682111280 100644
--- a/include/llvm/LTO/legacy/LTOCodeGenerator.h
+++ b/include/llvm/LTO/legacy/LTOCodeGenerator.h
@@ -41,6 +41,7 @@
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <string>
@@ -77,6 +78,7 @@ struct LTOCodeGenerator {
/// Resets \a HasVerifiedInput.
void setModule(std::unique_ptr<LTOModule> M);
+ void setAsmUndefinedRefs(struct LTOModule *);
void setTargetOptions(const TargetOptions &Options);
void setDebugInfo(lto_debug_model);
void setCodePICModel(Optional<Reloc::Model> Model) { RelocModel = Model; }
@@ -85,8 +87,8 @@ struct LTOCodeGenerator {
/// The default is TargetMachine::CGFT_ObjectFile.
void setFileType(TargetMachine::CodeGenFileType FT) { FileType = FT; }
- void setCpu(const char *MCpu) { this->MCpu = MCpu; }
- void setAttr(const char *MAttr) { this->MAttr = MAttr; }
+ void setCpu(StringRef MCpu) { this->MCpu = MCpu; }
+ void setAttr(StringRef MAttr) { this->MAttr = MAttr; }
void setOptLevel(unsigned OptLevel);
void setShouldInternalize(bool Value) { ShouldInternalize = Value; }
@@ -116,7 +118,7 @@ struct LTOCodeGenerator {
/// name is misleading). This function should be called before
/// LTOCodeGenerator::compilexxx(), and
/// LTOCodeGenerator::writeMergedModules().
- void setCodeGenDebugOptions(const char *Opts);
+ void setCodeGenDebugOptions(StringRef Opts);
/// Parse the options set in setCodeGenDebugOptions.
///
@@ -129,7 +131,7 @@ struct LTOCodeGenerator {
/// true on success.
///
/// Calls \a verifyMergedModuleOnce().
- bool writeMergedModules(const char *Path);
+ bool writeMergedModules(StringRef Path);
/// Compile the merged module into a *single* output file; the path to output
/// file is returned to the caller via argument "name". Return true on
@@ -204,6 +206,9 @@ private:
void emitError(const std::string &ErrMsg);
void emitWarning(const std::string &ErrMsg);
+ bool setupOptimizationRemarks();
+ void finishOptimizationRemarks();
+
LLVMContext &Context;
std::unique_ptr<Module> MergedModule;
std::unique_ptr<Linker> TheLinker;
@@ -231,6 +236,7 @@ private:
bool ShouldEmbedUselists = false;
bool ShouldRestoreGlobalsLinkage = false;
TargetMachine::CodeGenFileType FileType = TargetMachine::CGFT_ObjectFile;
+ std::unique_ptr<tool_output_file> DiagnosticOutputFile;
};
}
#endif
diff --git a/include/llvm/LTO/legacy/LTOModule.h b/include/llvm/LTO/legacy/LTOModule.h
index 2e46219be19e..2a8758587a11 100644
--- a/include/llvm/LTO/legacy/LTOModule.h
+++ b/include/llvm/LTO/legacy/LTOModule.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/Module.h"
#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Target/TargetMachine.h"
#include <string>
#include <vector>
@@ -37,33 +38,36 @@ namespace llvm {
struct LTOModule {
private:
struct NameAndAttributes {
- const char *name;
- uint32_t attributes;
- bool isFunction;
- const GlobalValue *symbol;
+ StringRef name;
+ uint32_t attributes = 0;
+ bool isFunction = 0;
+ const GlobalValue *symbol = 0;
};
std::unique_ptr<LLVMContext> OwnedContext;
std::string LinkerOpts;
- std::unique_ptr<object::IRObjectFile> IRFile;
+ std::unique_ptr<Module> Mod;
+ MemoryBufferRef MBRef;
+ ModuleSymbolTable SymTab;
std::unique_ptr<TargetMachine> _target;
std::vector<NameAndAttributes> _symbols;
// _defines and _undefines only needed to disambiguate tentative definitions
StringSet<> _defines;
StringMap<NameAndAttributes> _undefines;
- std::vector<const char*> _asm_undefines;
+ std::vector<StringRef> _asm_undefines;
- LTOModule(std::unique_ptr<object::IRObjectFile> Obj, TargetMachine *TM);
+ LTOModule(std::unique_ptr<Module> M, MemoryBufferRef MBRef,
+ TargetMachine *TM);
public:
~LTOModule();
/// Returns 'true' if the file or memory contents is LLVM bitcode.
static bool isBitcodeFile(const void *mem, size_t length);
- static bool isBitcodeFile(const char *path);
+ static bool isBitcodeFile(StringRef path);
/// Returns 'true' if the Module is produced for ThinLTO.
bool isThinLTO();
@@ -91,13 +95,13 @@ public:
/// InitializeAllAsmPrinters();
/// InitializeAllAsmParsers();
static ErrorOr<std::unique_ptr<LTOModule>>
- createFromFile(LLVMContext &Context, const char *path,
+ createFromFile(LLVMContext &Context, StringRef path,
const TargetOptions &options);
static ErrorOr<std::unique_ptr<LTOModule>>
- createFromOpenFile(LLVMContext &Context, int fd, const char *path,
- size_t size, const TargetOptions &options);
+ createFromOpenFile(LLVMContext &Context, int fd, StringRef path, size_t size,
+ const TargetOptions &options);
static ErrorOr<std::unique_ptr<LTOModule>>
- createFromOpenFileSlice(LLVMContext &Context, int fd, const char *path,
+ createFromOpenFileSlice(LLVMContext &Context, int fd, StringRef path,
size_t map_size, off_t offset,
const TargetOptions &options);
static ErrorOr<std::unique_ptr<LTOModule>>
@@ -108,14 +112,10 @@ public:
size_t length, const TargetOptions &options,
StringRef path);
- const Module &getModule() const {
- return const_cast<LTOModule*>(this)->getModule();
- }
- Module &getModule() {
- return IRFile->getModule();
- }
+ const Module &getModule() const { return *Mod; }
+ Module &getModule() { return *Mod; }
- std::unique_ptr<Module> takeModule() { return IRFile->takeModule(); }
+ std::unique_ptr<Module> takeModule() { return std::move(Mod); }
/// Return the Module's target triple.
const std::string &getTargetTriple() {
@@ -140,10 +140,10 @@ public:
}
/// Get the name of the symbol at the specified index.
- const char *getSymbolName(uint32_t index) {
+ StringRef getSymbolName(uint32_t index) {
if (index < _symbols.size())
return _symbols[index].name;
- return nullptr;
+ return StringRef();
}
const GlobalValue *getSymbolGV(uint32_t index) {
@@ -152,13 +152,9 @@ public:
return nullptr;
}
- const char *getLinkerOpts() {
- return LinkerOpts.c_str();
- }
+ StringRef getLinkerOpts() { return LinkerOpts; }
- const std::vector<const char*> &getAsmUndefinedRefs() {
- return _asm_undefines;
- }
+ const std::vector<StringRef> &getAsmUndefinedRefs() { return _asm_undefines; }
private:
/// Parse metadata from the module
@@ -170,26 +166,26 @@ private:
void parseSymbols();
/// Add a symbol which isn't defined just yet to a list to be resolved later.
- void addPotentialUndefinedSymbol(const object::BasicSymbolRef &Sym,
+ void addPotentialUndefinedSymbol(ModuleSymbolTable::Symbol Sym,
bool isFunc);
/// Add a defined symbol to the list.
- void addDefinedSymbol(const char *Name, const GlobalValue *def,
+ void addDefinedSymbol(StringRef Name, const GlobalValue *def,
bool isFunction);
/// Add a data symbol as defined to the list.
- void addDefinedDataSymbol(const object::BasicSymbolRef &Sym);
- void addDefinedDataSymbol(const char*Name, const GlobalValue *v);
+ void addDefinedDataSymbol(ModuleSymbolTable::Symbol Sym);
+ void addDefinedDataSymbol(StringRef Name, const GlobalValue *v);
/// Add a function symbol as defined to the list.
- void addDefinedFunctionSymbol(const object::BasicSymbolRef &Sym);
- void addDefinedFunctionSymbol(const char *Name, const Function *F);
+ void addDefinedFunctionSymbol(ModuleSymbolTable::Symbol Sym);
+ void addDefinedFunctionSymbol(StringRef Name, const Function *F);
/// Add a global symbol from module-level ASM to the defined list.
- void addAsmGlobalSymbol(const char *, lto_symbol_attributes scope);
+ void addAsmGlobalSymbol(StringRef, lto_symbol_attributes scope);
/// Add a global symbol from module-level ASM to the undefined list.
- void addAsmGlobalSymbolUndef(const char *);
+ void addAsmGlobalSymbolUndef(StringRef);
/// Parse i386/ppc ObjC class data structure.
void addObjCClass(const GlobalVariable *clgv);
diff --git a/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
index 539880e8d3a7..cb4a16cb5b7b 100644
--- a/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
+++ b/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
@@ -38,7 +38,7 @@ struct TargetMachineBuilder {
std::string MAttr;
TargetOptions Options;
Optional<Reloc::Model> RelocModel;
- CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
+ CodeGenOpt::Level CGOptLevel = CodeGenOpt::Aggressive;
std::unique_ptr<TargetMachine> create() const;
};
@@ -72,18 +72,32 @@ public:
/**
* Process all the modules that were added to the code generator in parallel.
*
- * Client can access the resulting object files using getProducedBinaries()
+ * Client can access the resulting object files using getProducedBinaries(),
+ * unless setGeneratedObjectsDirectory() has been called, in which case
+ * results are available through getProducedBinaryFiles().
*/
void run();
/**
- * Return the "in memory" binaries produced by the code generator.
+ * Return the "in memory" binaries produced by the code generator. This is
+ * filled after run() unless setGeneratedObjectsDirectory() has been
+ * called, in which case results are available through
+ * getProducedBinaryFiles().
*/
std::vector<std::unique_ptr<MemoryBuffer>> &getProducedBinaries() {
return ProducedBinaries;
}
/**
+ * Return the "on-disk" binaries produced by the code generator. This is
+ * filled after run() when setGeneratedObjectsDirectory() has been
+ * called, in which case results are available through getProducedBinaries().
+ */
+ std::vector<std::string> &getProducedBinaryFiles() {
+ return ProducedBinaryFiles;
+ }
+
+ /**
* \defgroup Options setters
* @{
*/
@@ -156,6 +170,14 @@ public:
/// the processing.
void setSaveTempsDir(std::string Path) { SaveTempsDir = std::move(Path); }
+ /// Set the path to a directory where to save generated object files. This
+ /// path can be used by a linker to request on-disk files instead of in-memory
+ /// buffers. When set, results are available through getProducedBinaryFiles()
+ /// instead of getProducedBinaries().
+ void setGeneratedObjectsDirectory(std::string Path) {
+ SavedObjectsDirectoryPath = std::move(Path);
+ }
+
/// CPU to use to initialize the TargetMachine
void setCpu(std::string Cpu) { TMBuilder.MCpu = std::move(Cpu); }
@@ -177,6 +199,11 @@ public:
TMBuilder.CGOptLevel = CGOptLevel;
}
+ /// IR optimization level: from 0 to 3.
+ void setOptLevel(unsigned NewOptLevel) {
+ OptLevel = (NewOptLevel > 3) ? 3 : NewOptLevel;
+ }
+
/// Disable CodeGen, only run the stages till codegen and stop. The output
/// will be bitcode.
void disableCodeGen(bool Disable) { DisableCodeGen = Disable; }
@@ -244,9 +271,13 @@ private:
/// Helper factory to build a TargetMachine
TargetMachineBuilder TMBuilder;
- /// Vector holding the in-memory buffer containing the produced binaries.
+ /// Vector holding the in-memory buffer containing the produced binaries, when
+ /// SavedObjectsDirectoryPath isn't set.
std::vector<std::unique_ptr<MemoryBuffer>> ProducedBinaries;
+ /// Path to generated files in the supplied SavedObjectsDirectoryPath if any.
+ std::vector<std::string> ProducedBinaryFiles;
+
/// Vector holding the input buffers containing the bitcode modules to
/// process.
std::vector<MemoryBufferRef> Modules;
@@ -264,6 +295,9 @@ private:
/// Path to a directory to save the temporary bitcode files.
std::string SaveTempsDir;
+ /// Path to a directory to save the generated object files.
+ std::string SavedObjectsDirectoryPath;
+
/// Flag to enable/disable CodeGen. When set to true, the process stops after
/// optimizations and a bitcode is produced.
bool DisableCodeGen = false;
@@ -271,6 +305,9 @@ private:
/// Flag to indicate that only the CodeGen will be performed, no cross-module
/// importing or optimization.
bool CodeGenOnly = false;
+
+ /// IR Optimization Level [0-3].
+ unsigned OptLevel = 3;
};
}
#endif
diff --git a/include/llvm/LinkAllIR.h b/include/llvm/LinkAllIR.h
index 77e19ce900e3..f078c73f979e 100644
--- a/include/llvm/LinkAllIR.h
+++ b/include/llvm/LinkAllIR.h
@@ -31,7 +31,6 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
-#include "llvm/Support/TimeValue.h"
#include <cstdlib>
namespace {
diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h
index b2721d0a1fd9..e50137f8e02e 100644
--- a/include/llvm/LinkAllPasses.h
+++ b/include/llvm/LinkAllPasses.h
@@ -39,6 +39,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/FunctionAttrs.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/ObjCARC.h"
@@ -77,6 +78,7 @@ namespace {
(void) llvm::createCFLAndersAAWrapperPass();
(void) llvm::createCFLSteensAAWrapperPass();
(void) llvm::createStructurizeCFGPass();
+ (void) llvm::createLibCallsShrinkWrapPass();
(void) llvm::createConstantMergePass();
(void) llvm::createConstantPropagationPass();
(void) llvm::createCostModelAnalysisPass();
@@ -97,7 +99,7 @@ namespace {
(void) llvm::createInstrProfilingLegacyPass();
(void) llvm::createFunctionImportPass();
(void) llvm::createFunctionInliningPass();
- (void) llvm::createAlwaysInlinerPass();
+ (void) llvm::createAlwaysInlinerLegacyPass();
(void) llvm::createGlobalDCEPass();
(void) llvm::createGlobalOptimizerPass();
(void) llvm::createGlobalsAAWrapperPass();
@@ -110,6 +112,7 @@ namespace {
(void) llvm::createInternalizePass();
(void) llvm::createLCSSAPass();
(void) llvm::createLICMPass();
+ (void) llvm::createLoopSinkPass();
(void) llvm::createLazyValueInfoPass();
(void) llvm::createLoopExtractorPass();
(void) llvm::createLoopInterchangePass();
@@ -159,10 +162,12 @@ namespace {
(void) llvm::createInstCountPass();
(void) llvm::createConstantHoistingPass();
(void) llvm::createCodeGenPreparePass();
+ (void) llvm::createCountingFunctionInserterPass();
(void) llvm::createEarlyCSEPass();
(void) llvm::createGVNHoistPass();
(void) llvm::createMergedLoadStoreMotionPass();
(void) llvm::createGVNPass();
+ (void) llvm::createNewGVNPass();
(void) llvm::createMemCpyOptPass();
(void) llvm::createLoopDeletionPass();
(void) llvm::createPostDomTree();
diff --git a/include/llvm/Linker/IRMover.h b/include/llvm/Linker/IRMover.h
index 578940ed4069..2a187cbc42f5 100644
--- a/include/llvm/Linker/IRMover.h
+++ b/include/llvm/Linker/IRMover.h
@@ -71,8 +71,15 @@ public:
/// not present in ValuesToLink. The GlobalValue and a ValueAdder callback
/// are passed as an argument, and the callback is expected to be called
/// if the GlobalValue needs to be added to the \p ValuesToLink and linked.
+ /// - \p LinkModuleInlineAsm is true if the ModuleInlineAsm string in Src
+ /// should be linked with (concatenated into) the ModuleInlineAsm string
+ /// for the destination module. It should be true for full LTO, but not
+ /// when importing for ThinLTO, otherwise we can have duplicate symbols.
+ /// - \p IsPerformingImport is true when this IR link is to perform ThinLTO
+ /// function importing from Src.
Error move(std::unique_ptr<Module> Src, ArrayRef<GlobalValue *> ValuesToLink,
- std::function<void(GlobalValue &GV, ValueAdder Add)> AddLazyFor);
+ std::function<void(GlobalValue &GV, ValueAdder Add)> AddLazyFor,
+ bool LinkModuleInlineAsm, bool IsPerformingImport);
Module &getModule() { return Composite; }
private:
diff --git a/include/llvm/MC/ConstantPools.h b/include/llvm/MC/ConstantPools.h
index 552e1443e7d0..f0c445dbe59f 100644
--- a/include/llvm/MC/ConstantPools.h
+++ b/include/llvm/MC/ConstantPools.h
@@ -25,6 +25,7 @@ class MCExpr;
class MCSection;
class MCStreamer;
class MCSymbol;
+class MCSymbolRefExpr;
struct ConstantPoolEntry {
ConstantPoolEntry(MCSymbol *L, const MCExpr *Val, unsigned Sz, SMLoc Loc_)
@@ -40,6 +41,7 @@ struct ConstantPoolEntry {
class ConstantPool {
typedef SmallVector<ConstantPoolEntry, 4> EntryVecTy;
EntryVecTy Entries;
+ DenseMap<int64_t, const MCSymbolRefExpr *> CachedEntries;
public:
// Initialize a new empty constant pool
diff --git a/include/llvm/MC/LaneBitmask.h b/include/llvm/MC/LaneBitmask.h
new file mode 100644
index 000000000000..89e60928405d
--- /dev/null
+++ b/include/llvm/MC/LaneBitmask.h
@@ -0,0 +1,89 @@
+//===-- llvm/MC/LaneBitmask.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// A common definition of LaneBitmask for use in TableGen and CodeGen.
+///
+/// A lane mask is a bitmask representing the covering of a register with
+/// sub-registers.
+///
+/// This is typically used to track liveness at sub-register granularity.
+/// Lane masks for sub-register indices are similar to register units for
+/// physical registers. The individual bits in a lane mask can't be assigned
+/// any specific meaning. They can be used to check if two sub-register
+/// indices overlap.
+///
+/// Iff the target has a register such that:
+///
+/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+///
+/// then:
+///
+/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
+
+#ifndef LLVM_MC_LANEBITMASK_H
+#define LLVM_MC_LANEBITMASK_H
+
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Printable.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+ struct LaneBitmask {
+ // When changing the underlying type, change the format string as well.
+ typedef unsigned Type;
+ enum : unsigned { BitWidth = 8*sizeof(Type) };
+ constexpr static const char *const FormatStr = "%08X";
+
+ constexpr LaneBitmask() = default;
+ explicit constexpr LaneBitmask(Type V) : Mask(V) {}
+
+ constexpr bool operator== (LaneBitmask M) const { return Mask == M.Mask; }
+ constexpr bool operator!= (LaneBitmask M) const { return Mask != M.Mask; }
+ constexpr bool operator< (LaneBitmask M) const { return Mask < M.Mask; }
+ constexpr bool none() const { return Mask == 0; }
+ constexpr bool any() const { return Mask != 0; }
+ constexpr bool all() const { return ~Mask == 0; }
+
+ constexpr LaneBitmask operator~() const {
+ return LaneBitmask(~Mask);
+ }
+ constexpr LaneBitmask operator|(LaneBitmask M) const {
+ return LaneBitmask(Mask | M.Mask);
+ }
+ constexpr LaneBitmask operator&(LaneBitmask M) const {
+ return LaneBitmask(Mask & M.Mask);
+ }
+ LaneBitmask &operator|=(LaneBitmask M) {
+ Mask |= M.Mask;
+ return *this;
+ }
+ LaneBitmask &operator&=(LaneBitmask M) {
+ Mask &= M.Mask;
+ return *this;
+ }
+
+ constexpr Type getAsInteger() const { return Mask; }
+
+ static LaneBitmask getNone() { return LaneBitmask(0); }
+ static LaneBitmask getAll() { return ~LaneBitmask(0); }
+
+ private:
+ Type Mask = 0;
+ };
+
+ /// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
+ static LLVM_ATTRIBUTE_UNUSED Printable PrintLaneMask(LaneBitmask LaneMask) {
+ return Printable([LaneMask](raw_ostream &OS) {
+ OS << format(LaneBitmask::FormatStr, LaneMask.getAsInteger());
+ });
+ }
+}
+
+#endif // LLVM_MC_LANEBITMASK_H
diff --git a/include/llvm/MC/MCAsmBackend.h b/include/llvm/MC/MCAsmBackend.h
index ce17a2a06758..d4bdbcd2baa3 100644
--- a/include/llvm/MC/MCAsmBackend.h
+++ b/include/llvm/MC/MCAsmBackend.h
@@ -13,7 +13,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/MC/MCDirectives.h"
-#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -21,6 +20,7 @@
namespace llvm {
class MCAsmLayout;
class MCAssembler;
+class MCCFIInstruction;
class MCELFObjectTargetWriter;
struct MCFixupKindInfo;
class MCFragment;
@@ -28,6 +28,7 @@ class MCInst;
class MCRelaxableFragment;
class MCObjectWriter;
class MCSection;
+class MCSubtargetInfo;
class MCValue;
class raw_pwrite_stream;
diff --git a/include/llvm/MC/MCAsmInfo.h b/include/llvm/MC/MCAsmInfo.h
index e6ed5688d18d..f898bf5288d6 100644
--- a/include/llvm/MC/MCAsmInfo.h
+++ b/include/llvm/MC/MCAsmInfo.h
@@ -18,6 +18,7 @@
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCTargetOptions.h"
#include <cassert>
#include <vector>
@@ -41,14 +42,6 @@ enum class EncodingType {
};
}
-enum class ExceptionHandling {
- None, /// No exception support
- DwarfCFI, /// DWARF-like instruction based exceptions
- SjLj, /// setjmp/longjmp based exceptions
- ARM, /// ARM EHABI
- WinEH, /// Windows Exception Handling
-};
-
namespace LCOMM {
enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment };
}
@@ -92,12 +85,6 @@ protected:
/// directive for emitting thread local BSS Symbols. Default is false.
bool HasMachoTBSSDirective;
- /// True if the compiler should emit a ".reference .constructors_used" or
- /// ".reference .destructors_used" directive after the static ctor/dtor
- /// list. This directive is only emitted in Static relocation model. Default
- /// is false.
- bool HasStaticCtorDtorReferenceInStaticMode;
-
/// This is the maximum possible length of an instruction, which is needed to
/// compute the size of an inline asm. Defaults to 4.
unsigned MaxInstLength;
@@ -116,7 +103,7 @@ protected:
/// This indicates the comment character used by the assembler. Defaults to
/// "#"
- const char *CommentString;
+ StringRef CommentString;
/// This is appended to emitted labels. Defaults to ":"
const char *LabelSuffix;
@@ -130,17 +117,17 @@ protected:
/// This prefix is used for globals like constant pool entries that are
/// completely private to the .s file and should not have names in the .o
/// file. Defaults to "L"
- const char *PrivateGlobalPrefix;
+ StringRef PrivateGlobalPrefix;
/// This prefix is used for labels for basic blocks. Defaults to the same as
/// PrivateGlobalPrefix.
- const char *PrivateLabelPrefix;
+ StringRef PrivateLabelPrefix;
/// This prefix is used for symbols that should be passed through the
/// assembler but be removed by the linker. This is 'l' on Darwin, currently
/// used for some ObjC metadata. The default of "" meast that for this system
/// a plain private symbol should be used. Defaults to "".
- const char *LinkerPrivateGlobalPrefix;
+ StringRef LinkerPrivateGlobalPrefix;
/// If these are nonempty, they contain a directive to emit before and after
/// an inline assembly statement. Defaults to "#APP\n", "#NO_APP\n"
@@ -206,6 +193,14 @@ protected:
/// on Alpha. Defaults to NULL.
const char *GPRel32Directive;
+ /// If non-null, directives that are used to emit a word/dword which should
+ /// be relocated as a 32/64-bit DTP/TP-relative offset, e.g. .dtprelword/
+ /// .dtpreldword/.tprelword/.tpreldword on Mips.
+ const char *DTPRel32Directive = nullptr;
+ const char *DTPRel64Directive = nullptr;
+ const char *TPRel32Directive = nullptr;
+ const char *TPRel64Directive = nullptr;
+
/// This is true if this target uses "Sun Style" syntax for section switching
/// ("#alloc,#write" etc) instead of the normal ELF syntax (,"a,w") in
/// .section directives. Defaults to false.
@@ -376,6 +371,10 @@ protected:
// X86_64 ELF.
bool RelaxELFRelocations = true;
+ // If true, then the lexer and expression parser will support %neg(),
+ // %hi(), and similar unary operators.
+ bool HasMipsExpressions = false;
+
public:
explicit MCAsmInfo();
virtual ~MCAsmInfo();
@@ -405,6 +404,10 @@ public:
const char *getData64bitsDirective() const { return Data64bitsDirective; }
const char *getGPRel64Directive() const { return GPRel64Directive; }
const char *getGPRel32Directive() const { return GPRel32Directive; }
+ const char *getDTPRel64Directive() const { return DTPRel64Directive; }
+ const char *getDTPRel32Directive() const { return DTPRel32Directive; }
+ const char *getTPRel64Directive() const { return TPRel64Directive; }
+ const char *getTPRel32Directive() const { return TPRel32Directive; }
/// Targets can implement this method to specify a section to switch to if the
/// translation unit doesn't have any trampolines that require an executable
@@ -456,9 +459,6 @@ public:
bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
bool hasMachoTBSSDirective() const { return HasMachoTBSSDirective; }
- bool hasStaticCtorDtorReferenceInStaticMode() const {
- return HasStaticCtorDtorReferenceInStaticMode;
- }
unsigned getMaxInstLength() const { return MaxInstLength; }
unsigned getMinInstAlignment() const { return MinInstAlignment; }
bool getDollarIsPC() const { return DollarIsPC; }
@@ -468,17 +468,17 @@ public:
/// printed.
unsigned getCommentColumn() const { return 40; }
- const char *getCommentString() const { return CommentString; }
+ StringRef getCommentString() const { return CommentString; }
const char *getLabelSuffix() const { return LabelSuffix; }
bool useAssignmentForEHBegin() const { return UseAssignmentForEHBegin; }
bool needsLocalForSize() const { return NeedsLocalForSize; }
- const char *getPrivateGlobalPrefix() const { return PrivateGlobalPrefix; }
- const char *getPrivateLabelPrefix() const { return PrivateLabelPrefix; }
+ StringRef getPrivateGlobalPrefix() const { return PrivateGlobalPrefix; }
+ StringRef getPrivateLabelPrefix() const { return PrivateLabelPrefix; }
bool hasLinkerPrivateGlobalPrefix() const {
return LinkerPrivateGlobalPrefix[0] != '\0';
}
- const char *getLinkerPrivateGlobalPrefix() const {
+ StringRef getLinkerPrivateGlobalPrefix() const {
if (hasLinkerPrivateGlobalPrefix())
return LinkerPrivateGlobalPrefix;
return getPrivateGlobalPrefix();
@@ -598,6 +598,7 @@ public:
bool canRelaxRelocations() const { return RelaxELFRelocations; }
void setRelaxELFRelocations(bool V) { RelaxELFRelocations = V; }
+ bool hasMipsExpressions() const { return HasMipsExpressions; }
};
}
diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h
index aa3b451152df..641e78994768 100644
--- a/include/llvm/MC/MCAssembler.h
+++ b/include/llvm/MC/MCAssembler.h
@@ -10,6 +10,7 @@
#ifndef LLVM_MC_MCASSEMBLER_H
#define LLVM_MC_MCASSEMBLER_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
@@ -402,8 +403,7 @@ public:
ArrayRef<std::string> getFileNames() { return FileNames; }
void addFileName(StringRef FileName) {
- if (std::find(FileNames.begin(), FileNames.end(), FileName) ==
- FileNames.end())
+ if (!is_contained(FileNames, FileName))
FileNames.push_back(FileName);
}
diff --git a/include/llvm/MC/MCCodeView.h b/include/llvm/MC/MCCodeView.h
index d999ff555997..41521a6549b8 100644
--- a/include/llvm/MC/MCCodeView.h
+++ b/include/llvm/MC/MCCodeView.h
@@ -25,6 +25,7 @@ namespace llvm {
class MCContext;
class MCObjectStreamer;
class MCStreamer;
+class CodeViewContext;
/// \brief Instances of this class represent the information from a
/// .cv_loc directive.
@@ -36,8 +37,8 @@ class MCCVLoc {
uint16_t PrologueEnd : 1;
uint16_t IsStmt : 1;
-private: // MCContext manages these
- friend class MCContext;
+private: // CodeViewContext manages these
+ friend class CodeViewContext;
MCCVLoc(unsigned functionid, unsigned fileNum, unsigned line, unsigned column,
bool prologueend, bool isstmt)
: FunctionId(functionid), FileNum(fileNum), Line(line), Column(column),
@@ -104,6 +105,55 @@ public:
static void Make(MCObjectStreamer *MCOS);
};
+/// Information describing a function or inlined call site introduced by
+/// .cv_func_id or .cv_inline_site_id. Accumulates information from .cv_loc
+/// directives used with this function's id or the id of an inlined call site
+/// within this function or inlined call site.
+struct MCCVFunctionInfo {
+ /// If this represents an inlined call site, then ParentFuncIdPlusOne will be
+ /// the parent function id plus one. If this represents a normal function,
+ /// then there is no parent, and ParentFuncIdPlusOne will be FunctionSentinel.
+ /// If this struct is an unallocated slot in the function info vector, then
+ /// ParentFuncIdPlusOne will be zero.
+ unsigned ParentFuncIdPlusOne = 0;
+
+ enum : unsigned { FunctionSentinel = ~0U };
+
+ struct LineInfo {
+ unsigned File;
+ unsigned Line;
+ unsigned Col;
+ };
+
+ LineInfo InlinedAt;
+
+ /// The section of the first .cv_loc directive used for this function, or null
+ /// if none has been seen yet.
+ MCSection *Section = nullptr;
+
+ /// Map from inlined call site id to the inlined at location to use for that
+ /// call site. Call chains are collapsed, so for the call chain 'f -> g -> h',
+ /// the InlinedAtMap of 'f' will contain entries for 'g' and 'h' that both
+ /// list the line info for the 'g' call site.
+ DenseMap<unsigned, LineInfo> InlinedAtMap;
+
+ /// Returns true if this is function info has not yet been used in a
+ /// .cv_func_id or .cv_inline_site_id directive.
+ bool isUnallocatedFunctionInfo() const { return ParentFuncIdPlusOne == 0; }
+
+ /// Returns true if this represents an inlined call site, meaning
+ /// ParentFuncIdPlusOne is neither zero nor ~0U.
+ bool isInlinedCallSite() const {
+ return !isUnallocatedFunctionInfo() &&
+ ParentFuncIdPlusOne != FunctionSentinel;
+ }
+
+ unsigned getParentFuncId() const {
+ assert(isInlinedCallSite());
+ return ParentFuncIdPlusOne - 1;
+ }
+};
+
/// Holds state from .cv_file and .cv_loc directives for later emission.
class CodeViewContext {
public:
@@ -114,6 +164,48 @@ public:
bool addFile(unsigned FileNumber, StringRef Filename);
ArrayRef<StringRef> getFilenames() { return Filenames; }
+ /// Records the function id of a normal function. Returns false if the
+ /// function id has already been used, and true otherwise.
+ bool recordFunctionId(unsigned FuncId);
+
+ /// Records the function id of an inlined call site. Records the "inlined at"
+ /// location info of the call site, including what function or inlined call
+ /// site it was inlined into. Returns false if the function id has already
+ /// been used, and true otherwise.
+ bool recordInlinedCallSiteId(unsigned FuncId, unsigned IAFunc,
+ unsigned IAFile, unsigned IALine,
+ unsigned IACol);
+
+ /// Retreive the function info if this is a valid function id, or nullptr.
+ MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId) {
+ if (FuncId >= Functions.size())
+ return nullptr;
+ if (Functions[FuncId].isUnallocatedFunctionInfo())
+ return nullptr;
+ return &Functions[FuncId];
+ }
+
+ /// Saves the information from the currently parsed .cv_loc directive
+ /// and sets CVLocSeen. When the next instruction is assembled an entry
+ /// in the line number table with this information and the address of the
+ /// instruction will be created.
+ void setCurrentCVLoc(unsigned FunctionId, unsigned FileNo, unsigned Line,
+ unsigned Column, bool PrologueEnd, bool IsStmt) {
+ CurrentCVLoc.setFunctionId(FunctionId);
+ CurrentCVLoc.setFileNum(FileNo);
+ CurrentCVLoc.setLine(Line);
+ CurrentCVLoc.setColumn(Column);
+ CurrentCVLoc.setPrologueEnd(PrologueEnd);
+ CurrentCVLoc.setIsStmt(IsStmt);
+ CVLocSeen = true;
+ }
+ void clearCVLocSeen() { CVLocSeen = false; }
+
+ bool getCVLocSeen() { return CVLocSeen; }
+ const MCCVLoc &getCurrentCVLoc() { return CurrentCVLoc; }
+
+ bool isValidCVFileNumber(unsigned FileNumber);
+
/// \brief Add a line entry.
void addLineEntry(const MCCVLineEntry &LineEntry) {
size_t Offset = MCCVLines.size();
@@ -157,10 +249,12 @@ public:
const MCSymbol *FuncBegin,
const MCSymbol *FuncEnd);
- void emitInlineLineTableForFunction(
- MCObjectStreamer &OS, unsigned PrimaryFunctionId, unsigned SourceFileId,
- unsigned SourceLineNum, const MCSymbol *FnStartSym,
- const MCSymbol *FnEndSym, ArrayRef<unsigned> SecondaryFunctionIds);
+ void emitInlineLineTableForFunction(MCObjectStreamer &OS,
+ unsigned PrimaryFunctionId,
+ unsigned SourceFileId,
+ unsigned SourceLineNum,
+ const MCSymbol *FnStartSym,
+ const MCSymbol *FnEndSym);
/// Encodes the binary annotations once we have a layout.
void encodeInlineLineTable(MCAsmLayout &Layout,
@@ -180,6 +274,10 @@ public:
void emitFileChecksums(MCObjectStreamer &OS);
private:
+ /// The current CodeView line information from the last .cv_loc directive.
+ MCCVLoc CurrentCVLoc = MCCVLoc(0, 0, 0, 0, false, true);
+ bool CVLocSeen = false;
+
/// Map from string to string table offset.
StringMap<unsigned> StringTable;
@@ -204,6 +302,9 @@ private:
/// A collection of MCCVLineEntry for each section.
std::vector<MCCVLineEntry> MCCVLines;
+
+ /// All known functions and inlined call sites, indexed by function id.
+ std::vector<MCCVFunctionInfo> Functions;
};
} // end namespace llvm
diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h
index fe1377e054e8..f846b632f112 100644
--- a/include/llvm/MC/MCContext.h
+++ b/include/llvm/MC/MCContext.h
@@ -16,12 +16,12 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCCodeView.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Dwarf.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <tuple>
@@ -83,9 +83,9 @@ namespace llvm {
/// Bindings of names to symbols.
SymbolTable Symbols;
- /// ELF sections can have a corresponding symbol. This maps one to the
+ /// Sections can have a corresponding symbol. This maps one to the
/// other.
- DenseMap<const MCSectionELF *, MCSymbolELF *> SectionSymbols;
+ DenseMap<const MCSection *, MCSymbol *> SectionSymbols;
/// A mapping from a local label number and an instance count to a symbol.
/// For example, in the assembly
@@ -141,10 +141,6 @@ namespace llvm {
MCDwarfLoc CurrentDwarfLoc;
bool DwarfLocSeen;
- /// The current CodeView line information from the last .cv_loc directive.
- MCCVLoc CurrentCVLoc = MCCVLoc(0, 0, 0, 0, false, true);
- bool CVLocSeen = false;
-
/// Generate dwarf debugging info for assembly source files.
bool GenDwarfForAssembly;
@@ -307,6 +303,9 @@ namespace llvm {
/// Get the symbol for \p Name, or null.
MCSymbol *lookupSymbol(const Twine &Name) const;
+ /// Set value for a symbol.
+ void setSymbolValue(MCStreamer &Streamer, StringRef Sym, uint64_t Val);
+
/// getSymbols - Get a reference for the symbol table for clients that
/// want to, for example, iterate over all symbols. 'const' because we
/// still want any modifications to the table itself to use the MCContext
@@ -528,41 +527,15 @@ namespace llvm {
void setDwarfDebugProducer(StringRef S) { DwarfDebugProducer = S; }
StringRef getDwarfDebugProducer() { return DwarfDebugProducer; }
-
+ dwarf::DwarfFormat getDwarfFormat() const {
+ // TODO: Support DWARF64
+ return dwarf::DWARF32;
+ }
void setDwarfVersion(uint16_t v) { DwarfVersion = v; }
uint16_t getDwarfVersion() const { return DwarfVersion; }
/// @}
-
- /// \name CodeView Management
- /// @{
-
- /// Creates an entry in the cv file table.
- unsigned getCVFile(StringRef FileName, unsigned FileNumber);
-
- /// Saves the information from the currently parsed .cv_loc directive
- /// and sets CVLocSeen. When the next instruction is assembled an entry
- /// in the line number table with this information and the address of the
- /// instruction will be created.
- void setCurrentCVLoc(unsigned FunctionId, unsigned FileNo, unsigned Line,
- unsigned Column, bool PrologueEnd, bool IsStmt) {
- CurrentCVLoc.setFunctionId(FunctionId);
- CurrentCVLoc.setFileNum(FileNo);
- CurrentCVLoc.setLine(Line);
- CurrentCVLoc.setColumn(Column);
- CurrentCVLoc.setPrologueEnd(PrologueEnd);
- CurrentCVLoc.setIsStmt(IsStmt);
- CVLocSeen = true;
- }
- void clearCVLocSeen() { CVLocSeen = false; }
-
- bool getCVLocSeen() { return CVLocSeen; }
- const MCCVLoc &getCurrentCVLoc() { return CurrentCVLoc; }
-
- bool isValidCVFileNumber(unsigned FileNumber);
- /// @}
-
char *getSecureLogFile() { return SecureLogFile; }
raw_fd_ostream *getSecureLog() { return SecureLog.get(); }
bool getSecureLogUsed() { return SecureLogUsed; }
@@ -612,7 +585,7 @@ namespace llvm {
/// allocator supports it).
/// \return The allocated memory. Could be NULL.
inline void *operator new(size_t Bytes, llvm::MCContext &C,
- size_t Alignment = 8) LLVM_NOEXCEPT {
+ size_t Alignment = 8) noexcept {
return C.allocate(Bytes, Alignment);
}
/// \brief Placement delete companion to the new above.
@@ -621,8 +594,7 @@ inline void *operator new(size_t Bytes, llvm::MCContext &C,
/// invoking it directly; see the new operator for more details. This operator
/// is called implicitly by the compiler if a placement new expression using
/// the MCContext throws in the object constructor.
-inline void operator delete(void *Ptr, llvm::MCContext &C,
- size_t) LLVM_NOEXCEPT {
+inline void operator delete(void *Ptr, llvm::MCContext &C, size_t) noexcept {
C.deallocate(Ptr);
}
@@ -646,7 +618,7 @@ inline void operator delete(void *Ptr, llvm::MCContext &C,
/// allocator supports it).
/// \return The allocated memory. Could be NULL.
inline void *operator new[](size_t Bytes, llvm::MCContext &C,
- size_t Alignment = 8) LLVM_NOEXCEPT {
+ size_t Alignment = 8) noexcept {
return C.allocate(Bytes, Alignment);
}
@@ -656,7 +628,7 @@ inline void *operator new[](size_t Bytes, llvm::MCContext &C,
/// invoking it directly; see the new[] operator for more details. This operator
/// is called implicitly by the compiler if a placement new[] expression using
/// the MCContext throws in the object constructor.
-inline void operator delete[](void *Ptr, llvm::MCContext &C) LLVM_NOEXCEPT {
+inline void operator delete[](void *Ptr, llvm::MCContext &C) noexcept {
C.deallocate(Ptr);
}
diff --git a/include/llvm/MC/MCELFStreamer.h b/include/llvm/MC/MCELFStreamer.h
index b108f0df52b6..a5c263844352 100644
--- a/include/llvm/MC/MCELFStreamer.h
+++ b/include/llvm/MC/MCELFStreamer.h
@@ -57,7 +57,7 @@ public:
void EmitCOFFSymbolType(int Type) override;
void EndCOFFSymbolDef() override;
- void emitELFSize(MCSymbolELF *Symbol, const MCExpr *Value) override;
+ void emitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) override;
diff --git a/include/llvm/MC/MCExpr.h b/include/llvm/MC/MCExpr.h
index b0e4736565b0..0d1e675da459 100644
--- a/include/llvm/MC/MCExpr.h
+++ b/include/llvm/MC/MCExpr.h
@@ -266,6 +266,11 @@ public:
VK_WebAssembly_FUNCTION, // Function table index, rather than virtual addr
+ VK_AMDGPU_GOTPCREL32_LO, // symbol@gotpcrel32@lo
+ VK_AMDGPU_GOTPCREL32_HI, // symbol@gotpcrel32@hi
+ VK_AMDGPU_REL32_LO, // symbol@rel32@lo
+ VK_AMDGPU_REL32_HI, // symbol@rel32@hi
+
VK_TPREL,
VK_DTPREL
};
diff --git a/include/llvm/MC/MCFixup.h b/include/llvm/MC/MCFixup.h
index 8ab477c401a1..b493ca0b0ea7 100644
--- a/include/llvm/MC/MCFixup.h
+++ b/include/llvm/MC/MCFixup.h
@@ -33,6 +33,10 @@ enum MCFixupKind {
FK_GPRel_2, ///< A two-byte gp relative fixup.
FK_GPRel_4, ///< A four-byte gp relative fixup.
FK_GPRel_8, ///< A eight-byte gp relative fixup.
+ FK_DTPRel_4, ///< A four-byte dtp relative fixup.
+ FK_DTPRel_8, ///< A eight-byte dtp relative fixup.
+ FK_TPRel_4, ///< A four-byte tp relative fixup.
+ FK_TPRel_8, ///< A eight-byte tp relative fixup.
FK_SecRel_1, ///< A one-byte section relative fixup.
FK_SecRel_2, ///< A two-byte section relative fixup.
FK_SecRel_4, ///< A four-byte section relative fixup.
diff --git a/include/llvm/MC/MCFragment.h b/include/llvm/MC/MCFragment.h
index e0a2bfc23747..edb740f36d91 100644
--- a/include/llvm/MC/MCFragment.h
+++ b/include/llvm/MC/MCFragment.h
@@ -25,6 +25,7 @@ class MCSubtargetInfo;
class MCFragment : public ilist_node_with_parent<MCFragment, MCSection> {
friend class MCAsmLayout;
+ MCFragment() = delete;
MCFragment(const MCFragment &) = delete;
void operator=(const MCFragment &) = delete;
@@ -83,11 +84,6 @@ protected:
uint8_t BundlePadding, MCSection *Parent = nullptr);
~MCFragment();
-private:
-
- // This is a friend so that the sentinal can be created.
- friend struct ilist_sentinel_traits<MCFragment>;
- MCFragment();
public:
/// Destroys the current fragment.
@@ -350,9 +346,13 @@ class MCOrgFragment : public MCFragment {
/// Value - Value to use for filling bytes.
int8_t Value;
+ /// Loc - Source location of the directive that this fragment was created for.
+ SMLoc Loc;
+
public:
- MCOrgFragment(const MCExpr &Offset, int8_t Value, MCSection *Sec = nullptr)
- : MCFragment(FT_Org, false, 0, Sec), Offset(&Offset), Value(Value) {}
+ MCOrgFragment(const MCExpr &Offset, int8_t Value, SMLoc Loc,
+ MCSection *Sec = nullptr)
+ : MCFragment(FT_Org, false, 0, Sec), Offset(&Offset), Value(Value), Loc(Loc) {}
/// \name Accessors
/// @{
@@ -361,6 +361,8 @@ public:
uint8_t getValue() const { return Value; }
+ SMLoc getLoc() const { return Loc; }
+
/// @}
static bool classof(const MCFragment *F) {
@@ -495,7 +497,6 @@ class MCCVInlineLineTableFragment : public MCFragment {
unsigned StartLineNum;
const MCSymbol *FnStartSym;
const MCSymbol *FnEndSym;
- SmallVector<unsigned, 3> SecondaryFuncs;
SmallString<8> Contents;
/// CodeViewContext has the real knowledge about this format, so let it access
@@ -506,12 +507,10 @@ public:
MCCVInlineLineTableFragment(unsigned SiteFuncId, unsigned StartFileId,
unsigned StartLineNum, const MCSymbol *FnStartSym,
const MCSymbol *FnEndSym,
- ArrayRef<unsigned> SecondaryFuncs,
MCSection *Sec = nullptr)
: MCFragment(FT_CVInlineLines, false, 0, Sec), SiteFuncId(SiteFuncId),
StartFileId(StartFileId), StartLineNum(StartLineNum),
- FnStartSym(FnStartSym), FnEndSym(FnEndSym),
- SecondaryFuncs(SecondaryFuncs.begin(), SecondaryFuncs.end()) {}
+ FnStartSym(FnStartSym), FnEndSym(FnEndSym) {}
/// \name Accessors
/// @{
diff --git a/include/llvm/MC/MCInstPrinter.h b/include/llvm/MC/MCInstPrinter.h
index 2119c5a633b4..320b280cc756 100644
--- a/include/llvm/MC/MCInstPrinter.h
+++ b/include/llvm/MC/MCInstPrinter.h
@@ -10,10 +10,11 @@
#ifndef LLVM_MC_MCINSTPRINTER_H
#define LLVM_MC_MCINSTPRINTER_H
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Format.h"
+#include <cstdint>
namespace llvm {
+
template <typename T> class ArrayRef;
class MCInst;
class raw_ostream;
@@ -27,11 +28,13 @@ class StringRef;
void dumpBytes(ArrayRef<uint8_t> Bytes, raw_ostream &OS);
namespace HexStyle {
+
enum Style {
C, ///< 0xff
Asm ///< 0ffh
};
-}
+
+} // end namespace HexStyle
/// \brief This is an instance of a target assembly language printer that
/// converts an MCInst to valid target assembly syntax.
@@ -60,8 +63,8 @@ protected:
public:
MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
const MCRegisterInfo &mri)
- : CommentStream(nullptr), MAI(mai), MII(mii), MRI(mri), UseMarkup(0),
- PrintImmHex(0), PrintHexStyle(HexStyle::C) {}
+ : CommentStream(nullptr), MAI(mai), MII(mii), MRI(mri), UseMarkup(false),
+ PrintImmHex(false), PrintHexStyle(HexStyle::C) {}
virtual ~MCInstPrinter();
@@ -103,6 +106,6 @@ public:
format_object<uint64_t> formatHex(uint64_t Value) const;
};
-} // namespace llvm
+} // end namespace llvm
-#endif
+#endif // LLVM_MC_MCINSTPRINTER_H
diff --git a/include/llvm/MC/MCInstrDesc.h b/include/llvm/MC/MCInstrDesc.h
index 88aab73d4058..340d8253b8c9 100644
--- a/include/llvm/MC/MCInstrDesc.h
+++ b/include/llvm/MC/MCInstrDesc.h
@@ -47,8 +47,22 @@ enum OperandType {
OPERAND_REGISTER = 2,
OPERAND_MEMORY = 3,
OPERAND_PCREL = 4,
- OPERAND_FIRST_TARGET = 5
+
+ OPERAND_FIRST_GENERIC = 6,
+ OPERAND_GENERIC_0 = 6,
+ OPERAND_GENERIC_1 = 7,
+ OPERAND_GENERIC_2 = 8,
+ OPERAND_GENERIC_3 = 9,
+ OPERAND_GENERIC_4 = 10,
+ OPERAND_GENERIC_5 = 11,
+ OPERAND_LAST_GENERIC = 11,
+
+ OPERAND_FIRST_TARGET = 12,
};
+
+enum GenericOperandType {
+};
+
}
/// \brief This holds information about one operand of a machine instruction,
@@ -83,6 +97,16 @@ public:
/// \brief Set if this operand is a optional def.
bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }
+
+ bool isGenericType() const {
+ return OperandType >= MCOI::OPERAND_FIRST_GENERIC &&
+ OperandType <= MCOI::OPERAND_LAST_GENERIC;
+ }
+
+ unsigned getGenericTypeIndex() const {
+ assert(isGenericType() && "non-generic types don't have an index");
+ return OperandType - MCOI::OPERAND_FIRST_GENERIC;
+ }
};
//===----------------------------------------------------------------------===//
@@ -126,7 +150,8 @@ enum Flag {
RegSequence,
ExtractSubreg,
InsertSubreg,
- Convergent
+ Convergent,
+ Add
};
}
@@ -191,32 +216,35 @@ public:
unsigned getNumDefs() const { return NumDefs; }
/// \brief Return flags of this instruction.
- unsigned getFlags() const { return Flags; }
+ uint64_t getFlags() const { return Flags; }
/// \brief Return true if this instruction can have a variable number of
/// operands. In this case, the variable operands will be after the normal
/// operands but before the implicit definitions and uses (if any are
/// present).
- bool isVariadic() const { return Flags & (1 << MCID::Variadic); }
+ bool isVariadic() const { return Flags & (1ULL << MCID::Variadic); }
/// \brief Set if this instruction has an optional definition, e.g.
/// ARM instructions which can set condition code if 's' bit is set.
- bool hasOptionalDef() const { return Flags & (1 << MCID::HasOptionalDef); }
+ bool hasOptionalDef() const { return Flags & (1ULL << MCID::HasOptionalDef); }
/// \brief Return true if this is a pseudo instruction that doesn't
/// correspond to a real machine instruction.
- bool isPseudo() const { return Flags & (1 << MCID::Pseudo); }
+ bool isPseudo() const { return Flags & (1ULL << MCID::Pseudo); }
/// \brief Return true if the instruction is a return.
- bool isReturn() const { return Flags & (1 << MCID::Return); }
+ bool isReturn() const { return Flags & (1ULL << MCID::Return); }
+
+ /// \brief Return true if the instruction is an add instruction.
+ bool isAdd() const { return Flags & (1ULL << MCID::Add); }
/// \brief Return true if the instruction is a call.
- bool isCall() const { return Flags & (1 << MCID::Call); }
+ bool isCall() const { return Flags & (1ULL << MCID::Call); }
/// \brief Returns true if the specified instruction stops control flow
/// from executing the instruction immediately following it. Examples include
/// unconditional branches and return instructions.
- bool isBarrier() const { return Flags & (1 << MCID::Barrier); }
+ bool isBarrier() const { return Flags & (1ULL << MCID::Barrier); }
/// \brief Returns true if this instruction part of the terminator for
/// a basic block. Typically this is things like return and branch
@@ -224,17 +252,17 @@ public:
///
/// Various passes use this to insert code into the bottom of a basic block,
/// but before control flow occurs.
- bool isTerminator() const { return Flags & (1 << MCID::Terminator); }
+ bool isTerminator() const { return Flags & (1ULL << MCID::Terminator); }
/// \brief Returns true if this is a conditional, unconditional, or
/// indirect branch. Predicates below can be used to discriminate between
/// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
/// get more information.
- bool isBranch() const { return Flags & (1 << MCID::Branch); }
+ bool isBranch() const { return Flags & (1ULL << MCID::Branch); }
/// \brief Return true if this is an indirect branch, such as a
/// branch through a register.
- bool isIndirectBranch() const { return Flags & (1 << MCID::IndirectBranch); }
+ bool isIndirectBranch() const { return Flags & (1ULL << MCID::IndirectBranch); }
/// \brief Return true if this is a branch which may fall
/// through to the next instruction or may transfer control flow to some other
@@ -261,29 +289,29 @@ public:
/// that controls execution. It may be set to 'always', or may be set to other
/// values. There are various methods in TargetInstrInfo that can be used to
/// control and modify the predicate in this instruction.
- bool isPredicable() const { return Flags & (1 << MCID::Predicable); }
+ bool isPredicable() const { return Flags & (1ULL << MCID::Predicable); }
/// \brief Return true if this instruction is a comparison.
- bool isCompare() const { return Flags & (1 << MCID::Compare); }
+ bool isCompare() const { return Flags & (1ULL << MCID::Compare); }
/// \brief Return true if this instruction is a move immediate
/// (including conditional moves) instruction.
- bool isMoveImmediate() const { return Flags & (1 << MCID::MoveImm); }
+ bool isMoveImmediate() const { return Flags & (1ULL << MCID::MoveImm); }
/// \brief Return true if this instruction is a bitcast instruction.
- bool isBitcast() const { return Flags & (1 << MCID::Bitcast); }
+ bool isBitcast() const { return Flags & (1ULL << MCID::Bitcast); }
/// \brief Return true if this is a select instruction.
- bool isSelect() const { return Flags & (1 << MCID::Select); }
+ bool isSelect() const { return Flags & (1ULL << MCID::Select); }
/// \brief Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
- bool isNotDuplicable() const { return Flags & (1 << MCID::NotDuplicable); }
+ bool isNotDuplicable() const { return Flags & (1ULL << MCID::NotDuplicable); }
/// \brief Returns true if the specified instruction has a delay slot which
/// must be filled by the code generator.
- bool hasDelaySlot() const { return Flags & (1 << MCID::DelaySlot); }
+ bool hasDelaySlot() const { return Flags & (1ULL << MCID::DelaySlot); }
/// \brief Return true for instructions that can be folded as memory operands
/// in other instructions. The most common use for this is instructions that
@@ -292,7 +320,7 @@ public:
/// constant-pool loads, such as V_SETALLONES on x86, to allow them to be
/// folded when it is beneficial. This should only be set on instructions
/// that return a value in their only virtual register definition.
- bool canFoldAsLoad() const { return Flags & (1 << MCID::FoldableAsLoad); }
+ bool canFoldAsLoad() const { return Flags & (1ULL << MCID::FoldableAsLoad); }
/// \brief Return true if this instruction behaves
/// the same way as the generic REG_SEQUENCE instructions.
@@ -304,7 +332,7 @@ public:
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
/// override accordingly.
- bool isRegSequenceLike() const { return Flags & (1 << MCID::RegSequence); }
+ bool isRegSequenceLike() const { return Flags & (1ULL << MCID::RegSequence); }
/// \brief Return true if this instruction behaves
/// the same way as the generic EXTRACT_SUBREG instructions.
@@ -318,7 +346,7 @@ public:
/// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
/// override accordingly.
bool isExtractSubregLike() const {
- return Flags & (1 << MCID::ExtractSubreg);
+ return Flags & (1ULL << MCID::ExtractSubreg);
}
/// \brief Return true if this instruction behaves
@@ -331,14 +359,14 @@ public:
/// Note that for the optimizers to be able to take advantage of
/// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
/// override accordingly.
- bool isInsertSubregLike() const { return Flags & (1 << MCID::InsertSubreg); }
+ bool isInsertSubregLike() const { return Flags & (1ULL << MCID::InsertSubreg); }
/// \brief Return true if this instruction is convergent.
///
/// Convergent instructions may not be made control-dependent on any
/// additional values.
- bool isConvergent() const { return Flags & (1 << MCID::Convergent); }
+ bool isConvergent() const { return Flags & (1ULL << MCID::Convergent); }
//===--------------------------------------------------------------------===//
// Side Effect Analysis
@@ -347,13 +375,13 @@ public:
/// \brief Return true if this instruction could possibly read memory.
/// Instructions with this flag set are not necessarily simple load
/// instructions, they may load a value and modify it, for example.
- bool mayLoad() const { return Flags & (1 << MCID::MayLoad); }
+ bool mayLoad() const { return Flags & (1ULL << MCID::MayLoad); }
/// \brief Return true if this instruction could possibly modify memory.
/// Instructions with this flag set are not necessarily simple store
/// instructions, they may store a modified value based on their operands, or
/// may not actually modify anything, for example.
- bool mayStore() const { return Flags & (1 << MCID::MayStore); }
+ bool mayStore() const { return Flags & (1ULL << MCID::MayStore); }
/// \brief Return true if this instruction has side
/// effects that are not modeled by other flags. This does not return true
@@ -368,7 +396,7 @@ public:
/// a control register, flushing a cache, modifying a register invisible to
/// LLVM, etc.
bool hasUnmodeledSideEffects() const {
- return Flags & (1 << MCID::UnmodeledSideEffects);
+ return Flags & (1ULL << MCID::UnmodeledSideEffects);
}
//===--------------------------------------------------------------------===//
@@ -385,7 +413,7 @@ public:
/// sometimes. In these cases, the call to commuteInstruction will fail.
/// Also note that some instructions require non-trivial modification to
/// commute them.
- bool isCommutable() const { return Flags & (1 << MCID::Commutable); }
+ bool isCommutable() const { return Flags & (1ULL << MCID::Commutable); }
/// \brief Return true if this is a 2-address instruction which can be changed
/// into a 3-address instruction if needed. Doing this transformation can be
@@ -402,7 +430,7 @@ public:
/// instruction (e.g. shl reg, 4 on x86).
///
bool isConvertibleTo3Addr() const {
- return Flags & (1 << MCID::ConvertibleTo3Addr);
+ return Flags & (1ULL << MCID::ConvertibleTo3Addr);
}
/// \brief Return true if this instruction requires custom insertion support
@@ -414,14 +442,14 @@ public:
/// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
/// is used to insert this into the MachineBasicBlock.
bool usesCustomInsertionHook() const {
- return Flags & (1 << MCID::UsesCustomInserter);
+ return Flags & (1ULL << MCID::UsesCustomInserter);
}
/// \brief Return true if this instruction requires *adjustment* after
/// instruction selection by calling a target hook. For example, this can be
/// used to fill in ARM 's' optional operand depending on whether the
/// conditional flag register is used.
- bool hasPostISelHook() const { return Flags & (1 << MCID::HasPostISelHook); }
+ bool hasPostISelHook() const { return Flags & (1ULL << MCID::HasPostISelHook); }
/// \brief Returns true if this instruction is a candidate for remat. This
/// flag is only used in TargetInstrInfo method isTriviallyRematerializable.
@@ -430,7 +458,7 @@ public:
/// or isReallyTriviallyReMaterializableGeneric methods are called to verify
/// the instruction is really rematable.
bool isRematerializable() const {
- return Flags & (1 << MCID::Rematerializable);
+ return Flags & (1ULL << MCID::Rematerializable);
}
/// \brief Returns true if this instruction has the same cost (or less) than a
@@ -442,7 +470,7 @@ public:
///
/// This method could be called by interface TargetInstrInfo::isAsCheapAsAMove
/// for different subtargets.
- bool isAsCheapAsAMove() const { return Flags & (1 << MCID::CheapAsAMove); }
+ bool isAsCheapAsAMove() const { return Flags & (1ULL << MCID::CheapAsAMove); }
/// \brief Returns true if this instruction source operands have special
/// register allocation requirements that are not captured by the operand
@@ -451,7 +479,7 @@ public:
/// allocation passes should not attempt to change allocations for sources of
/// instructions with this flag.
bool hasExtraSrcRegAllocReq() const {
- return Flags & (1 << MCID::ExtraSrcRegAllocReq);
+ return Flags & (1ULL << MCID::ExtraSrcRegAllocReq);
}
/// \brief Returns true if this instruction def operands have special register
@@ -461,7 +489,7 @@ public:
/// allocation passes should not attempt to change allocations for definitions
/// of instructions with this flag.
bool hasExtraDefRegAllocReq() const {
- return Flags & (1 << MCID::ExtraDefRegAllocReq);
+ return Flags & (1ULL << MCID::ExtraDefRegAllocReq);
}
/// \brief Return a list of registers that are potentially read by any
diff --git a/include/llvm/MC/MCInstrInfo.h b/include/llvm/MC/MCInstrInfo.h
index 70c86587b08c..80f1f320b7c2 100644
--- a/include/llvm/MC/MCInstrInfo.h
+++ b/include/llvm/MC/MCInstrInfo.h
@@ -48,9 +48,9 @@ public:
}
/// \brief Returns the name for the instructions with the given opcode.
- const char *getName(unsigned Opcode) const {
+ StringRef getName(unsigned Opcode) const {
assert(Opcode < NumOpcodes && "Invalid opcode!");
- return &InstrNameData[InstrNameIndices[Opcode]];
+ return StringRef(&InstrNameData[InstrNameIndices[Opcode]]);
}
};
diff --git a/include/llvm/MC/MCInstrItineraries.h b/include/llvm/MC/MCInstrItineraries.h
index b2871a9805e1..1fb276a302b9 100644
--- a/include/llvm/MC/MCInstrItineraries.h
+++ b/include/llvm/MC/MCInstrItineraries.h
@@ -111,7 +111,7 @@ public:
MCSchedModel SchedModel; ///< Basic machine properties.
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
- const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
+ const unsigned *Forwardings; ///< Array of pipeline forwarding paths
const InstrItinerary *Itineraries; ///< Array of itineraries selected
/// Ctors.
diff --git a/include/llvm/MC/MCObjectFileInfo.h b/include/llvm/MC/MCObjectFileInfo.h
index cef4e5b3eb93..9aa8812c7bb3 100644
--- a/include/llvm/MC/MCObjectFileInfo.h
+++ b/include/llvm/MC/MCObjectFileInfo.h
@@ -65,12 +65,6 @@ protected:
/// constants.
MCSection *ReadOnlySection;
- /// This section contains the static constructor pointer list.
- MCSection *StaticCtorSection;
-
- /// This section contains the static destructor pointer list.
- MCSection *StaticDtorSection;
-
/// If exception handling is supported by the target, this is the section the
/// Language Specific Data Area information is emitted to.
MCSection *LSDASection;
diff --git a/include/llvm/MC/MCObjectStreamer.h b/include/llvm/MC/MCObjectStreamer.h
index d7775f27868c..f9111b7f47ea 100644
--- a/include/llvm/MC/MCObjectStreamer.h
+++ b/include/llvm/MC/MCObjectStreamer.h
@@ -112,7 +112,8 @@ public:
unsigned MaxBytesToEmit = 0) override;
void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0) override;
- void emitValueToOffset(const MCExpr *Offset, unsigned char Value) override;
+ void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
+ SMLoc Loc) override;
void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
unsigned Column, unsigned Flags,
unsigned Isa, unsigned Discriminator,
@@ -124,18 +125,23 @@ public:
const MCSymbol *Label);
void EmitCVLocDirective(unsigned FunctionId, unsigned FileNo, unsigned Line,
unsigned Column, bool PrologueEnd, bool IsStmt,
- StringRef FileName) override;
+ StringRef FileName, SMLoc Loc) override;
void EmitCVLinetableDirective(unsigned FunctionId, const MCSymbol *Begin,
const MCSymbol *End) override;
- void EmitCVInlineLinetableDirective(
- unsigned PrimaryFunctionId, unsigned SourceFileId, unsigned SourceLineNum,
- const MCSymbol *FnStartSym, const MCSymbol *FnEndSym,
- ArrayRef<unsigned> SecondaryFunctionIds) override;
+ void EmitCVInlineLinetableDirective(unsigned PrimaryFunctionId,
+ unsigned SourceFileId,
+ unsigned SourceLineNum,
+ const MCSymbol *FnStartSym,
+ const MCSymbol *FnEndSym) override;
void EmitCVDefRangeDirective(
ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
StringRef FixedSizePortion) override;
void EmitCVStringTableDirective() override;
void EmitCVFileChecksumsDirective() override;
+ void EmitDTPRel32Value(const MCExpr *Value) override;
+ void EmitDTPRel64Value(const MCExpr *Value) override;
+ void EmitTPRel32Value(const MCExpr *Value) override;
+ void EmitTPRel64Value(const MCExpr *Value) override;
void EmitGPRel32Value(const MCExpr *Value) override;
void EmitGPRel64Value(const MCExpr *Value) override;
bool EmitRelocDirective(const MCExpr &Offset, StringRef Name,
diff --git a/include/llvm/MC/MCParser/AsmLexer.h b/include/llvm/MC/MCParser/AsmLexer.h
index c779121b6cf0..029598c013d3 100644
--- a/include/llvm/MC/MCParser/AsmLexer.h
+++ b/include/llvm/MC/MCParser/AsmLexer.h
@@ -31,7 +31,8 @@ class AsmLexer : public MCAsmLexer {
StringRef CurBuf;
bool IsAtStartOfLine;
bool IsAtStartOfStatement;
-
+ bool IsParsingMSInlineAsm;
+ bool IsPeeking;
void operator=(const AsmLexer&) = delete;
AsmLexer(const AsmLexer&) = delete;
@@ -44,6 +45,7 @@ public:
~AsmLexer() override;
void setBuffer(StringRef Buf, const char *ptr = nullptr);
+ void setParsingMSInlineAsm(bool V) { IsParsingMSInlineAsm = V; }
StringRef LexUntilEndOfStatement() override;
diff --git a/include/llvm/MC/MCParser/MCAsmLexer.h b/include/llvm/MC/MCParser/MCAsmLexer.h
index 3dd22c93d363..56da6f85c199 100644
--- a/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -55,7 +55,15 @@ public:
Pipe, PipePipe, Caret,
Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
Less, LessEqual, LessLess, LessGreater,
- Greater, GreaterEqual, GreaterGreater, At
+ Greater, GreaterEqual, GreaterGreater, At,
+
+ // MIPS unary expression operators such as %neg.
+ PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
+ PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
+ PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
+ PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
+ PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
+ PercentTprel_Lo
};
private:
@@ -120,6 +128,20 @@ public:
}
};
+/// A callback class which is notified of each comment in an assembly file as
+/// it is lexed.
+class AsmCommentConsumer {
+public:
+ virtual ~AsmCommentConsumer() {};
+
+ /// Callback function for when a comment is lexed. Loc is the start of the
+ /// comment text (excluding the comment-start marker). CommentText is the text
+ /// of the comment, excluding the comment start and end markers, and the
+ /// newline for single-line comments.
+ virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
+};
+
+
/// Generic assembler lexer interface, for use by target specific assembly
/// lexers.
class MCAsmLexer {
@@ -136,6 +158,8 @@ protected: // Can only create subclasses.
const char *TokStart;
bool SkipSpace;
bool AllowAtInIdentifier;
+ bool IsAtStartOfStatement;
+ AsmCommentConsumer *CommentConsumer;
MCAsmLexer();
@@ -155,6 +179,8 @@ public:
/// the main input file has been reached.
const AsmToken &Lex() {
assert(!CurTok.empty());
+ // Mark if we parsing out a EndOfStatement.
+ IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
CurTok.erase(CurTok.begin());
// LexToken may generate multiple tokens via UnLex but will always return
// the first one. Place returned value at head of CurTok vector.
@@ -166,9 +192,12 @@ public:
}
void UnLex(AsmToken const &Token) {
+ IsAtStartOfStatement = false;
CurTok.insert(CurTok.begin(), Token);
}
+ bool isAtStartOfStatement() { return IsAtStartOfStatement; }
+
virtual StringRef LexUntilEndOfStatement() = 0;
/// Get the current source location.
@@ -220,6 +249,10 @@ public:
bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
+
+ void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
+ this->CommentConsumer = CommentConsumer;
+ }
};
} // End llvm namespace
diff --git a/include/llvm/MC/MCParser/MCAsmParser.h b/include/llvm/MC/MCParser/MCAsmParser.h
index ac8706d99505..eb85a3a30963 100644
--- a/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/include/llvm/MC/MCParser/MCAsmParser.h
@@ -11,7 +11,9 @@
#define LLVM_MC_MCPARSER_MCASMPARSER_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/Support/DataTypes.h"
@@ -67,6 +69,12 @@ public:
typedef std::pair<MCAsmParserExtension*, DirectiveHandler>
ExtensionDirectiveHandler;
+ struct MCPendingError {
+ SMLoc Loc;
+ SmallString<64> Msg;
+ SMRange Range;
+ };
+
private:
MCAsmParser(const MCAsmParser &) = delete;
void operator=(const MCAsmParser &) = delete;
@@ -78,6 +86,11 @@ private:
protected: // Can only create subclasses.
MCAsmParser();
+ bool HadError;
+
+ SmallVector<MCPendingError, 1> PendingErrors;
+ /// Flag tracking whether any errors have been encountered.
+
public:
virtual ~MCAsmParser();
@@ -122,21 +135,38 @@ public:
const MCInstPrinter *IP, MCAsmParserSemaCallback &SI) = 0;
/// \brief Emit a note at the location \p L, with the message \p Msg.
- virtual void Note(SMLoc L, const Twine &Msg,
- ArrayRef<SMRange> Ranges = None) = 0;
+ virtual void Note(SMLoc L, const Twine &Msg, SMRange Range = None) = 0;
/// \brief Emit a warning at the location \p L, with the message \p Msg.
///
/// \return The return value is true, if warnings are fatal.
- virtual bool Warning(SMLoc L, const Twine &Msg,
- ArrayRef<SMRange> Ranges = None) = 0;
+ virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) = 0;
+
+ /// \brief Return an error at the location \p L, with the message \p Msg. This
+ /// may be modified before being emitted.
+ ///
+ /// \return The return value is always true, as an idiomatic convenience to
+ /// clients.
+ bool Error(SMLoc L, const Twine &Msg, SMRange Range = None);
/// \brief Emit an error at the location \p L, with the message \p Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
/// clients.
- virtual bool Error(SMLoc L, const Twine &Msg,
- ArrayRef<SMRange> Ranges = None) = 0;
+ virtual bool printError(SMLoc L, const Twine &Msg, SMRange Range = None) = 0;
+
+ bool hasPendingError() { return !PendingErrors.empty(); }
+
+ bool printPendingErrors() {
+ bool rv = !PendingErrors.empty();
+ for (auto Err : PendingErrors) {
+ printError(Err.Loc, Twine(Err.Msg), Err.Range);
+ }
+ PendingErrors.clear();
+ return rv;
+ }
+
+ bool addErrorSuffix(const Twine &Suffix);
/// \brief Get the next AsmToken in the stream, possibly handling file
/// inclusion first.
@@ -146,7 +176,22 @@ public:
const AsmToken &getTok() const;
/// \brief Report an error at the current lexer location.
- bool TokError(const Twine &Msg, ArrayRef<SMRange> Ranges = None);
+ bool TokError(const Twine &Msg, SMRange Range = None);
+
+ bool parseTokenLoc(SMLoc &Loc);
+ bool parseToken(AsmToken::TokenKind T, const Twine &Msg = "unexpected token");
+ /// \brief Attempt to parse and consume token, returning true on
+ /// success.
+ bool parseOptionalToken(AsmToken::TokenKind T);
+
+ bool parseEOL(const Twine &ErrMsg);
+
+ bool parseMany(std::function<bool()> parseOne, bool hasComma = true);
+
+ bool parseIntToken(int64_t &V, const Twine &ErrMsg);
+
+ bool check(bool P, const llvm::Twine &Msg);
+ bool check(bool P, SMLoc Loc, const llvm::Twine &Msg);
/// \brief Parse an identifier or string (as a quoted identifier) and set \p
/// Res to the identifier contents.
@@ -196,7 +241,8 @@ public:
/// \brief Ensure that we have a valid section set in the streamer. Otherwise,
/// report an error and switch to .text.
- virtual void checkForValidSection() = 0;
+ /// \return - False on success.
+ virtual bool checkForValidSection() = 0;
/// \brief Parse an arbitrary expression of a specified parenthesis depth,
/// assuming that the initial '(' characters have already been consumed.
diff --git a/include/llvm/MC/MCParser/MCAsmParserExtension.h b/include/llvm/MC/MCParser/MCAsmParserExtension.h
index 30b25dcfdaec..dabda0ab9485 100644
--- a/include/llvm/MC/MCParser/MCAsmParserExtension.h
+++ b/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -68,8 +68,8 @@ public:
bool Warning(SMLoc L, const Twine &Msg) {
return getParser().Warning(L, Msg);
}
- bool Error(SMLoc L, const Twine &Msg) {
- return getParser().Error(L, Msg);
+ bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
+ return getParser().Error(L, Msg, Range);
}
void Note(SMLoc L, const Twine &Msg) {
getParser().Note(L, Msg);
@@ -79,8 +79,31 @@ public:
}
const AsmToken &Lex() { return getParser().Lex(); }
-
const AsmToken &getTok() { return getParser().getTok(); }
+ bool parseToken(AsmToken::TokenKind T,
+ const Twine &Msg = "unexpected token") {
+ return getParser().parseToken(T, Msg);
+ }
+
+ bool parseMany(std::function<bool()> parseOne, bool hasComma = true) {
+ return getParser().parseMany(parseOne, hasComma);
+ }
+
+ bool parseOptionalToken(AsmToken::TokenKind T) {
+ return getParser().parseOptionalToken(T);
+ }
+
+ bool check(bool P, const llvm::Twine &Msg) {
+ return getParser().check(P, Msg);
+ }
+
+ bool check(bool P, SMLoc Loc, const llvm::Twine &Msg) {
+ return getParser().check(P, Loc, Msg);
+ }
+
+ bool addErrorSuffix(const Twine &Suffix) {
+ return getParser().addErrorSuffix(Suffix);
+ }
bool HasBracketExpressions() const { return BracketExpressionsSupported; }
diff --git a/include/llvm/MC/MCParser/MCTargetAsmParser.h b/include/llvm/MC/MCParser/MCTargetAsmParser.h
index 28a7b9664882..70cd60c9a112 100644
--- a/include/llvm/MC/MCParser/MCTargetAsmParser.h
+++ b/include/llvm/MC/MCParser/MCTargetAsmParser.h
@@ -82,6 +82,12 @@ struct ParseInstructionInfo {
: AsmRewrites(rewrites) {}
};
+enum OperandMatchResultTy {
+ MatchOperand_Success, // operand matched successfully
+ MatchOperand_NoMatch, // operand did not match
+ MatchOperand_ParseFail // operand matched but had errors
+};
+
/// MCTargetAsmParser - Generic interface to target specific assembly parsers.
class MCTargetAsmParser : public MCAsmParserExtension {
public:
@@ -196,6 +202,13 @@ public:
return Match_InvalidOperand;
}
+ /// Validate the instruction match against any complex target predicates
+ /// before rendering any operands to it.
+ virtual unsigned
+ checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands) {
+ return Match_Success;
+ }
+
/// checkTargetMatchPredicate - Validate the instruction match against
/// any complex target predicates not expressible via match classes.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst) {
@@ -217,6 +230,16 @@ public:
}
virtual void onLabelParsed(MCSymbol *Symbol) { }
+
+ /// Ensure that all previously parsed instructions have been emitted to the
+ /// output streamer, if the target does not emit them immediately.
+ virtual void flushPendingInstructions(MCStreamer &Out) { }
+
+ virtual const MCExpr *createTargetUnaryExpr(const MCExpr *E,
+ AsmToken::TokenKind OperatorToken,
+ MCContext &Ctx) {
+ return nullptr;
+ }
};
} // End llvm namespace
diff --git a/include/llvm/MC/MCRegisterInfo.h b/include/llvm/MC/MCRegisterInfo.h
index 548280614e92..3dc88a298ff8 100644
--- a/include/llvm/MC/MCRegisterInfo.h
+++ b/include/llvm/MC/MCRegisterInfo.h
@@ -17,6 +17,7 @@
#define LLVM_MC_MCREGISTERINFO_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
@@ -161,7 +162,7 @@ private:
unsigned NumRegUnits; // Number of regunits.
const MCPhysReg (*RegUnitRoots)[2]; // Pointer to regunit root table.
const MCPhysReg *DiffLists; // Pointer to the difflists array
- const unsigned *RegUnitMaskSequences; // Pointer to lane mask sequences
+ const LaneBitmask *RegUnitMaskSequences; // Pointer to lane mask sequences
// for register units.
const char *RegStrings; // Pointer to the string table.
const char *RegClassStrings; // Pointer to the class strings.
@@ -248,7 +249,7 @@ public:
const MCPhysReg (*RURoots)[2],
unsigned NRU,
const MCPhysReg *DL,
- const unsigned *RUMS,
+ const LaneBitmask *RUMS,
const char *Strings,
const char *ClassStrings,
const uint16_t *SubIndices,
@@ -271,6 +272,16 @@ public:
NumSubRegIndices = NumIndices;
SubRegIdxRanges = SubIdxRanges;
RegEncodingTable = RET;
+
+ // Initialize DWARF register mapping variables
+ EHL2DwarfRegs = nullptr;
+ EHL2DwarfRegsSize = 0;
+ L2DwarfRegs = nullptr;
+ L2DwarfRegsSize = 0;
+ EHDwarf2LRegs = nullptr;
+ EHDwarf2LRegsSize = 0;
+ Dwarf2LRegs = nullptr;
+ Dwarf2LRegsSize = 0;
}
/// \brief Used to initialize LLVM register to Dwarf
@@ -569,11 +580,12 @@ public:
}
};
-/// MCRegUnitIterator enumerates a list of register units and their associated
-/// lane masks for Reg. The register units are in ascending numerical order.
+/// MCRegUnitMaskIterator enumerates a list of register units and their
+/// associated lane masks for Reg. The register units are in ascending
+/// numerical order.
class MCRegUnitMaskIterator {
MCRegUnitIterator RUIter;
- const unsigned *MaskListIter;
+ const LaneBitmask *MaskListIter;
public:
MCRegUnitMaskIterator() {}
/// Constructs an iterator that traverses the register units and their
@@ -585,7 +597,7 @@ public:
}
/// Returns a (RegUnit, LaneMask) pair.
- std::pair<unsigned,unsigned> operator*() const {
+ std::pair<unsigned,LaneBitmask> operator*() const {
return std::make_pair(*RUIter, *MaskListIter);
}
diff --git a/include/llvm/MC/MCSection.h b/include/llvm/MC/MCSection.h
index a8d7af9bd651..d4c31696b40f 100644
--- a/include/llvm/MC/MCSection.h
+++ b/include/llvm/MC/MCSection.h
@@ -31,16 +31,8 @@ class MCSection;
class MCSymbol;
class raw_ostream;
-template<>
-struct ilist_node_traits<MCFragment> {
- MCFragment *createNode(const MCFragment &V);
+template <> struct ilist_alloc_traits<MCFragment> {
static void deleteNode(MCFragment *V);
-
- void addNodeToList(MCFragment *) {}
- void removeNodeFromList(MCFragment *) {}
- void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
- ilist_iterator<MCFragment> /*first*/,
- ilist_iterator<MCFragment> /*last*/) {}
};
/// Instances of this class represent a uniqued identifier for a section in the
@@ -161,25 +153,17 @@ public:
const MCDummyFragment &getDummyFragment() const { return DummyFragment; }
MCDummyFragment &getDummyFragment() { return DummyFragment; }
- MCSection::iterator begin();
- MCSection::const_iterator begin() const {
- return const_cast<MCSection *>(this)->begin();
- }
+ iterator begin() { return Fragments.begin(); }
+ const_iterator begin() const { return Fragments.begin(); }
- MCSection::iterator end();
- MCSection::const_iterator end() const {
- return const_cast<MCSection *>(this)->end();
- }
+ iterator end() { return Fragments.end(); }
+ const_iterator end() const { return Fragments.end(); }
- MCSection::reverse_iterator rbegin();
- MCSection::const_reverse_iterator rbegin() const {
- return const_cast<MCSection *>(this)->rbegin();
- }
+ reverse_iterator rbegin() { return Fragments.rbegin(); }
+ const_reverse_iterator rbegin() const { return Fragments.rbegin(); }
- MCSection::reverse_iterator rend();
- MCSection::const_reverse_iterator rend() const {
- return const_cast<MCSection *>(this)->rend();
- }
+ reverse_iterator rend() { return Fragments.rend(); }
+ const_reverse_iterator rend() const { return Fragments.rend(); }
MCSection::iterator getSubsectionInsertionPoint(unsigned Subsection);
diff --git a/include/llvm/MC/MCSectionCOFF.h b/include/llvm/MC/MCSectionCOFF.h
index c9fd8ea1605d..7d5f9f7f3cde 100644
--- a/include/llvm/MC/MCSectionCOFF.h
+++ b/include/llvm/MC/MCSectionCOFF.h
@@ -84,6 +84,10 @@ public:
return WinCFISectionID;
}
+ static bool isImplicitlyDiscardable(StringRef Name) {
+ return Name.startswith(".debug");
+ }
+
static bool classof(const MCSection *S) { return S->getVariant() == SV_COFF; }
};
diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h
index cd710ee43425..41f00a24dfbf 100644
--- a/include/llvm/MC/MCStreamer.h
+++ b/include/llvm/MC/MCStreamer.h
@@ -169,6 +169,7 @@ class MCStreamer {
MCDwarfFrameInfo *getCurrentDwarfFrameInfo();
void EnsureValidDwarfFrame();
+ MCSymbol *EmitCFILabel();
MCSymbol *EmitCFICommon();
std::vector<WinEH::FrameInfo *> WinFrameInfos;
@@ -261,7 +262,11 @@ public:
///
/// If the comment includes embedded \n's, they will each get the comment
/// prefix as appropriate. The added comment should not end with a \n.
- virtual void AddComment(const Twine &T) {}
+ /// By default, each comment is terminated with an end of line, i.e. the
+ /// EOL param is set to true by default. If one prefers not to end the
+ /// comment with a new line then the EOL param should be passed
+ /// with a false value.
+ virtual void AddComment(const Twine &T, bool EOL = true) {}
/// \brief Return a raw_ostream that comments can be written to. Unlike
/// AddComment, you are required to terminate comments with \n if you use this
@@ -470,13 +475,13 @@ public:
/// \brief Emits a COFF section relative relocation.
///
/// \param Symbol - Symbol the section relative relocation should point to.
- virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
+ virtual void EmitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset);
/// \brief Emit an ELF .size directive.
///
/// This corresponds to an assembler statement such as:
/// .size symbol, expression
- virtual void emitELFSize(MCSymbolELF *Symbol, const MCExpr *Value);
+ virtual void emitELFSize(MCSymbol *Symbol, const MCExpr *Value);
/// \brief Emit a Linker Optimization Hint (LOH) directive.
/// \param Args - Arguments of the LOH.
@@ -569,6 +574,34 @@ public:
void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
bool IsSectionRelative = false);
+ /// \brief Emit the expression \p Value into the output as a dtprel
+ /// (64-bit DTP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .dtpreldword on
+ /// targets that support them.
+ virtual void EmitDTPRel64Value(const MCExpr *Value);
+
+ /// \brief Emit the expression \p Value into the output as a dtprel
+ /// (32-bit DTP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .dtprelword on
+ /// targets that support them.
+ virtual void EmitDTPRel32Value(const MCExpr *Value);
+
+ /// \brief Emit the expression \p Value into the output as a tprel
+ /// (64-bit TP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .tpreldword on
+ /// targets that support them.
+ virtual void EmitTPRel64Value(const MCExpr *Value);
+
+ /// \brief Emit the expression \p Value into the output as a tprel
+ /// (32-bit TP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .tprelword on
+ /// targets that support them.
+ virtual void EmitTPRel32Value(const MCExpr *Value);
+
/// \brief Emit the expression \p Value into the output as a gprel64 (64-bit
/// GP relative) value.
///
@@ -655,7 +688,8 @@ public:
/// \param Offset - The offset to reach. This may be an expression, but the
/// expression must be associated with the current section.
/// \param Value - The value to use when filling bytes.
- virtual void emitValueToOffset(const MCExpr *Offset, unsigned char Value = 0);
+ virtual void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
+ SMLoc Loc);
/// @}
@@ -681,14 +715,24 @@ public:
StringRef FileName);
/// \brief Associate a filename with a specified logical file number. This
- /// implements the '.cv_file 4 "foo.c"' assembler directive.
- virtual unsigned EmitCVFileDirective(unsigned FileNo, StringRef Filename);
+ /// implements the '.cv_file 4 "foo.c"' assembler directive. Returns true on
+ /// success.
+ virtual bool EmitCVFileDirective(unsigned FileNo, StringRef Filename);
+
+ /// \brief Introduces a function id for use with .cv_loc.
+ virtual bool EmitCVFuncIdDirective(unsigned FunctionId);
+
+ /// \brief Introduces an inline call site id for use with .cv_loc. Includes
+ /// extra information for inline line table generation.
+ virtual bool EmitCVInlineSiteIdDirective(unsigned FunctionId, unsigned IAFunc,
+ unsigned IAFile, unsigned IALine,
+ unsigned IACol, SMLoc Loc);
/// \brief This implements the CodeView '.cv_loc' assembler directive.
virtual void EmitCVLocDirective(unsigned FunctionId, unsigned FileNo,
unsigned Line, unsigned Column,
bool PrologueEnd, bool IsStmt,
- StringRef FileName);
+ StringRef FileName, SMLoc Loc);
/// \brief This implements the CodeView '.cv_linetable' assembler directive.
virtual void EmitCVLinetableDirective(unsigned FunctionId,
@@ -697,10 +741,11 @@ public:
/// \brief This implements the CodeView '.cv_inline_linetable' assembler
/// directive.
- virtual void EmitCVInlineLinetableDirective(
- unsigned PrimaryFunctionId, unsigned SourceFileId, unsigned SourceLineNum,
- const MCSymbol *FnStartSym, const MCSymbol *FnEndSym,
- ArrayRef<unsigned> SecondaryFunctionIds);
+ virtual void EmitCVInlineLinetableDirective(unsigned PrimaryFunctionId,
+ unsigned SourceFileId,
+ unsigned SourceLineNum,
+ const MCSymbol *FnStartSym,
+ const MCSymbol *FnEndSym);
/// \brief This implements the CodeView '.cv_def_range' assembler
/// directive.
diff --git a/include/llvm/MC/MCTargetOptions.h b/include/llvm/MC/MCTargetOptions.h
index 1d170b757cb3..a300c4f6fb00 100644
--- a/include/llvm/MC/MCTargetOptions.h
+++ b/include/llvm/MC/MCTargetOptions.h
@@ -14,6 +14,14 @@
namespace llvm {
+enum class ExceptionHandling {
+ None, /// No exception support
+ DwarfCFI, /// DWARF-like instruction based exceptions
+ SjLj, /// setjmp/longjmp based exceptions
+ ARM, /// ARM EHABI
+ WinEH, /// Windows Exception Handling
+};
+
class StringRef;
class MCTargetOptions {
@@ -30,9 +38,11 @@ public:
bool MCNoExecStack : 1;
bool MCFatalWarnings : 1;
bool MCNoWarn : 1;
+ bool MCNoDeprecatedWarn : 1;
bool MCSaveTempLabels : 1;
bool MCUseDwarfDirectory : 1;
bool MCIncrementalLinkerCompatible : 1;
+ bool MCPIECopyRelocations : 1;
bool ShowMCEncoding : 1;
bool ShowMCInst : 1;
bool AsmVerbose : 1;
@@ -56,9 +66,11 @@ inline bool operator==(const MCTargetOptions &LHS, const MCTargetOptions &RHS) {
ARE_EQUAL(MCNoExecStack) &&
ARE_EQUAL(MCFatalWarnings) &&
ARE_EQUAL(MCNoWarn) &&
+ ARE_EQUAL(MCNoDeprecatedWarn) &&
ARE_EQUAL(MCSaveTempLabels) &&
ARE_EQUAL(MCUseDwarfDirectory) &&
ARE_EQUAL(MCIncrementalLinkerCompatible) &&
+ ARE_EQUAL(MCPIECopyRelocations) &&
ARE_EQUAL(ShowMCEncoding) &&
ARE_EQUAL(ShowMCInst) &&
ARE_EQUAL(AsmVerbose) &&
diff --git a/include/llvm/MC/MCTargetOptionsCommandFlags.h b/include/llvm/MC/MCTargetOptionsCommandFlags.h
index 5180208d33b6..96179be3b8b0 100644
--- a/include/llvm/MC/MCTargetOptionsCommandFlags.h
+++ b/include/llvm/MC/MCTargetOptionsCommandFlags.h
@@ -26,8 +26,7 @@ cl::opt<MCTargetOptions::AsmInstrumentation> AsmInstrumentation(
cl::values(clEnumValN(MCTargetOptions::AsmInstrumentationNone, "none",
"no instrumentation at all"),
clEnumValN(MCTargetOptions::AsmInstrumentationAddress, "address",
- "instrument instructions with memory arguments"),
- clEnumValEnd));
+ "instrument instructions with memory arguments")));
cl::opt<bool> RelaxAll("mc-relax-all",
cl::desc("When used with filetype=obj, "
@@ -39,6 +38,8 @@ cl::opt<bool> IncrementalLinkerCompatible(
"When used with filetype=obj, "
"emit an object file which can be used with an incremental linker"));
+cl::opt<bool> PIECopyRelocations("pie-copy-relocations", cl::desc("PIE Copy Relocations"));
+
cl::opt<int> DwarfVersion("dwarf-version", cl::desc("Dwarf version"),
cl::init(0));
@@ -52,6 +53,9 @@ cl::opt<bool> FatalWarnings("fatal-warnings",
cl::opt<bool> NoWarn("no-warn", cl::desc("Suppress all warnings"));
cl::alias NoWarnW("W", cl::desc("Alias for --no-warn"), cl::aliasopt(NoWarn));
+cl::opt<bool> NoDeprecatedWarn("no-deprecated-warn",
+ cl::desc("Suppress all deprecated warnings"));
+
cl::opt<std::string>
ABIName("target-abi", cl::Hidden,
cl::desc("The name of the ABI to be targeted from the backend."),
@@ -63,11 +67,13 @@ static inline MCTargetOptions InitMCTargetOptionsFromFlags() {
(AsmInstrumentation == MCTargetOptions::AsmInstrumentationAddress);
Options.MCRelaxAll = RelaxAll;
Options.MCIncrementalLinkerCompatible = IncrementalLinkerCompatible;
+ Options.MCPIECopyRelocations = PIECopyRelocations;
Options.DwarfVersion = DwarfVersion;
Options.ShowMCInst = ShowMCInst;
Options.ABIName = ABIName;
Options.MCFatalWarnings = FatalWarnings;
Options.MCNoWarn = NoWarn;
+ Options.MCNoDeprecatedWarn = NoDeprecatedWarn;
return Options;
}
diff --git a/include/llvm/MC/MCWinCOFFStreamer.h b/include/llvm/MC/MCWinCOFFStreamer.h
index fe1ada9b9e5b..63e44f2e67d6 100644
--- a/include/llvm/MC/MCWinCOFFStreamer.h
+++ b/include/llvm/MC/MCWinCOFFStreamer.h
@@ -52,7 +52,7 @@ public:
void EndCOFFSymbolDef() override;
void EmitCOFFSafeSEH(MCSymbol const *Symbol) override;
void EmitCOFFSectionIndex(MCSymbol const *Symbol) override;
- void EmitCOFFSecRel32(MCSymbol const *Symbol) override;
+ void EmitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset) override;
void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) override;
void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
diff --git a/include/llvm/MC/SectionKind.h b/include/llvm/MC/SectionKind.h
index 02fb22623cf7..66eb9ec56d14 100644
--- a/include/llvm/MC/SectionKind.h
+++ b/include/llvm/MC/SectionKind.h
@@ -28,6 +28,9 @@ class SectionKind {
/// Text - Text section, used for functions and other executable code.
Text,
+ /// ExecuteOnly, Text section that is not readable.
+ ExecuteOnly,
+
/// ReadOnly - Data that is never written to at program runtime by the
/// program or the dynamic linker. Things in the top-level readonly
/// SectionKind are not mergeable.
@@ -112,7 +115,10 @@ class SectionKind {
public:
bool isMetadata() const { return K == Metadata; }
- bool isText() const { return K == Text; }
+
+ bool isText() const { return K == Text || K == ExecuteOnly; }
+
+ bool isExecuteOnly() const { return K == ExecuteOnly; }
bool isReadOnly() const {
return K == ReadOnly || isMergeableCString() ||
@@ -172,6 +178,7 @@ public:
static SectionKind getMetadata() { return get(Metadata); }
static SectionKind getText() { return get(Text); }
+ static SectionKind getExecuteOnly() { return get(ExecuteOnly); }
static SectionKind getReadOnly() { return get(ReadOnly); }
static SectionKind getMergeable1ByteCString() {
return get(Mergeable1ByteCString);
diff --git a/include/llvm/MC/StringTableBuilder.h b/include/llvm/MC/StringTableBuilder.h
index f2b8ecd2d997..7da444f7bfb1 100644
--- a/include/llvm/MC/StringTableBuilder.h
+++ b/include/llvm/MC/StringTableBuilder.h
@@ -10,11 +10,12 @@
#ifndef LLVM_MC_STRINGTABLEBUILDER_H
#define LLVM_MC_STRINGTABLEBUILDER_H
-#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseMap.h"
#include <cassert>
namespace llvm {
+class raw_ostream;
/// \brief Utility for building string tables with deduplicated suffixes.
class StringTableBuilder {
@@ -22,21 +23,24 @@ public:
enum Kind { ELF, WinCOFF, MachO, RAW };
private:
- SmallString<256> StringTable;
- DenseMap<CachedHash<StringRef>, size_t> StringIndexMap;
+ DenseMap<CachedHashStringRef, size_t> StringIndexMap;
size_t Size = 0;
Kind K;
unsigned Alignment;
+ bool Finalized = false;
void finalizeStringTable(bool Optimize);
+ void initSize();
public:
StringTableBuilder(Kind K, unsigned Alignment = 1);
+ ~StringTableBuilder();
/// \brief Add a string to the builder. Returns the position of S in the
/// table. The position will be changed if finalize is used.
/// Can only be used before the table is finalized.
- size_t add(StringRef S);
+ size_t add(CachedHashStringRef S);
+ size_t add(StringRef S) { return add(CachedHashStringRef(S)); }
/// \brief Analyze the strings and build the final table. No more strings can
/// be added after this point.
@@ -46,28 +50,21 @@ public:
/// returned by add will still be valid.
void finalizeInOrder();
- /// \brief Retrieve the string table data. Can only be used after the table
- /// is finalized.
- StringRef data() const {
- assert(isFinalized());
- return StringTable;
- }
-
/// \brief Get the offest of a string in the string table. Can only be used
/// after the table is finalized.
- size_t getOffset(StringRef S) const;
-
- const DenseMap<CachedHash<StringRef>, size_t> &getMap() const {
- return StringIndexMap;
+ size_t getOffset(CachedHashStringRef S) const;
+ size_t getOffset(StringRef S) const {
+ return getOffset(CachedHashStringRef(S));
}
size_t getSize() const { return Size; }
void clear();
+ void write(raw_ostream &OS) const;
+ void write(uint8_t *Buf) const;
+
private:
- bool isFinalized() const {
- return !StringTable.empty();
- }
+ bool isFinalized() const { return Finalized; }
};
} // end llvm namespace
diff --git a/include/llvm/Object/Archive.h b/include/llvm/Object/Archive.h
index cfba2567371a..08128b0c2515 100644
--- a/include/llvm/Object/Archive.h
+++ b/include/llvm/Object/Archive.h
@@ -18,35 +18,59 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/Binary.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
namespace object {
-struct ArchiveMemberHeader {
- char Name[16];
- char LastModified[12];
- char UID[6];
- char GID[6];
- char AccessMode[8];
- char Size[10]; ///< Size of data, not including header or padding.
- char Terminator[2];
+
+class Archive;
+
+class ArchiveMemberHeader {
+public:
+ friend class Archive;
+ ArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr,
+ uint64_t Size, Error *Err);
+ // ArchiveMemberHeader() = default;
/// Get the name without looking up long names.
- llvm::StringRef getName() const;
+ Expected<llvm::StringRef> getRawName() const;
+
+ /// Get the name looking up long names.
+ Expected<llvm::StringRef> getName(uint64_t Size) const;
/// Members are not larger than 4GB.
- ErrorOr<uint32_t> getSize() const;
+ Expected<uint32_t> getSize() const;
- sys::fs::perms getAccessMode() const;
- sys::TimeValue getLastModified() const;
+ Expected<sys::fs::perms> getAccessMode() const;
+ Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const;
llvm::StringRef getRawLastModified() const {
- return StringRef(LastModified, sizeof(LastModified)).rtrim(' ');
+ return StringRef(ArMemHdr->LastModified,
+ sizeof(ArMemHdr->LastModified)).rtrim(' ');
+ }
+ Expected<unsigned> getUID() const;
+ Expected<unsigned> getGID() const;
+
+ // This returns the size of the private struct ArMemHdrType
+ uint64_t getSizeOf() const {
+ return sizeof(ArMemHdrType);
}
- unsigned getUID() const;
- unsigned getGID() const;
+
+private:
+ struct ArMemHdrType {
+ char Name[16];
+ char LastModified[12];
+ char UID[6];
+ char GID[6];
+ char AccessMode[8];
+ char Size[10]; ///< Size of data, not including header or padding.
+ char Terminator[2];
+ };
+ Archive const *Parent;
+ ArMemHdrType const *ArMemHdr;
};
class Archive : public Binary {
@@ -55,52 +79,50 @@ public:
class Child {
friend Archive;
const Archive *Parent;
+ friend ArchiveMemberHeader;
+ ArchiveMemberHeader Header;
/// \brief Includes header but not padding byte.
StringRef Data;
/// \brief Offset from Data to the start of the file.
uint16_t StartOfFile;
- const ArchiveMemberHeader *getHeader() const {
- return reinterpret_cast<const ArchiveMemberHeader *>(Data.data());
- }
-
- bool isThinMember() const;
+ Expected<bool> isThinMember() const;
public:
- Child(const Archive *Parent, const char *Start, std::error_code *EC);
+ Child(const Archive *Parent, const char *Start, Error *Err);
Child(const Archive *Parent, StringRef Data, uint16_t StartOfFile);
bool operator ==(const Child &other) const {
- assert(Parent == other.Parent);
+ assert(!Parent || !other.Parent || Parent == other.Parent);
return Data.begin() == other.Data.begin();
}
const Archive *getParent() const { return Parent; }
- ErrorOr<Child> getNext() const;
+ Expected<Child> getNext() const;
- ErrorOr<StringRef> getName() const;
- ErrorOr<std::string> getFullName() const;
- StringRef getRawName() const { return getHeader()->getName(); }
- sys::TimeValue getLastModified() const {
- return getHeader()->getLastModified();
+ Expected<StringRef> getName() const;
+ Expected<std::string> getFullName() const;
+ Expected<StringRef> getRawName() const { return Header.getRawName(); }
+ Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const {
+ return Header.getLastModified();
}
StringRef getRawLastModified() const {
- return getHeader()->getRawLastModified();
+ return Header.getRawLastModified();
}
- unsigned getUID() const { return getHeader()->getUID(); }
- unsigned getGID() const { return getHeader()->getGID(); }
- sys::fs::perms getAccessMode() const {
- return getHeader()->getAccessMode();
+ Expected<unsigned> getUID() const { return Header.getUID(); }
+ Expected<unsigned> getGID() const { return Header.getGID(); }
+ Expected<sys::fs::perms> getAccessMode() const {
+ return Header.getAccessMode();
}
/// \return the size of the archive member without the header or padding.
- ErrorOr<uint64_t> getSize() const;
+ Expected<uint64_t> getSize() const;
/// \return the size in the archive header for this member.
- ErrorOr<uint64_t> getRawSize() const;
+ Expected<uint64_t> getRawSize() const;
- ErrorOr<StringRef> getBuffer() const;
+ Expected<StringRef> getBuffer() const;
uint64_t getChildOffset() const;
- ErrorOr<MemoryBufferRef> getMemoryBufferRef() const;
+ Expected<MemoryBufferRef> getMemoryBufferRef() const;
Expected<std::unique_ptr<Binary>>
getAsBinary(LLVMContext *Context = nullptr) const;
@@ -131,12 +153,12 @@ public:
// iteration. And if there is an error break out of the loop.
child_iterator &operator++() { // Preincrement
assert(E && "Can't increment iterator with no Error attached");
+ ErrorAsOutParameter ErrAsOutParam(E);
if (auto ChildOrErr = C.getNext())
C = *ChildOrErr;
else {
- ErrorAsOutParameter ErrAsOutParam(*E);
C = C.getParent()->child_end().C;
- *E = errorCodeToError(ChildOrErr.getError());
+ *E = ChildOrErr.takeError();
E = nullptr;
}
return *this;
@@ -158,7 +180,7 @@ public:
, SymbolIndex(symi)
, StringIndex(stri) {}
StringRef getName() const;
- ErrorOr<Child> getMember() const;
+ Expected<Child> getMember() const;
Symbol getNext() const;
};
@@ -218,8 +240,10 @@ public:
// check if a symbol is in the archive
Expected<Optional<Child>> findSym(StringRef name) const;
+ bool isEmpty() const;
bool hasSymbolTable() const;
StringRef getSymbolTable() const { return SymbolTable; }
+ StringRef getStringTable() const { return StringTable; }
uint32_t getNumberOfSymbols() const;
std::vector<std::unique_ptr<MemoryBuffer>> takeThinBuffers() {
diff --git a/include/llvm/Object/ArchiveWriter.h b/include/llvm/Object/ArchiveWriter.h
index 55b58fac4f66..3e84a5814d79 100644
--- a/include/llvm/Object/ArchiveWriter.h
+++ b/include/llvm/Object/ArchiveWriter.h
@@ -22,21 +22,11 @@ namespace llvm {
struct NewArchiveMember {
std::unique_ptr<MemoryBuffer> Buf;
- sys::TimeValue ModTime = sys::TimeValue::PosixZeroTime();
+ sys::TimePoint<std::chrono::seconds> ModTime;
unsigned UID = 0, GID = 0, Perms = 0644;
+ bool IsNew = false;
NewArchiveMember() = default;
- NewArchiveMember(NewArchiveMember &&Other)
- : Buf(std::move(Other.Buf)), ModTime(Other.ModTime), UID(Other.UID),
- GID(Other.GID), Perms(Other.Perms) {}
- NewArchiveMember &operator=(NewArchiveMember &&Other) {
- Buf = std::move(Other.Buf);
- ModTime = Other.ModTime;
- UID = Other.UID;
- GID = Other.GID;
- Perms = Other.Perms;
- return *this;
- }
NewArchiveMember(MemoryBufferRef BufRef);
static Expected<NewArchiveMember>
diff --git a/include/llvm/Object/Binary.h b/include/llvm/Object/Binary.h
index 5dff5406fcdd..00d06e3c7437 100644
--- a/include/llvm/Object/Binary.h
+++ b/include/llvm/Object/Binary.h
@@ -59,6 +59,8 @@ protected:
ID_MachO64L, // MachO 64-bit, little endian
ID_MachO64B, // MachO 64-bit, big endian
+ ID_Wasm,
+
ID_EndObjects
};
@@ -115,6 +117,8 @@ public:
return TypeID == ID_COFF;
}
+ bool isWasm() const { return TypeID == ID_Wasm; }
+
bool isCOFFImportFile() const {
return TypeID == ID_COFFImportFile;
}
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index dcc58b06e228..696042d29dab 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -15,6 +15,7 @@
#define LLVM_OBJECT_COFF_H
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/DebugInfo/CodeView/CVDebugRecord.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Endian.h"
@@ -161,14 +162,6 @@ struct data_directory {
support::ulittle32_t Size;
};
-struct import_directory_table_entry {
- support::ulittle32_t ImportLookupTableRVA;
- support::ulittle32_t TimeDateStamp;
- support::ulittle32_t ForwarderChain;
- support::ulittle32_t NameRVA;
- support::ulittle32_t ImportAddressTableRVA;
-};
-
struct debug_directory {
support::ulittle32_t Characteristics;
support::ulittle32_t TimeDateStamp;
@@ -180,15 +173,6 @@ struct debug_directory {
support::ulittle32_t PointerToRawData;
};
-/// Information that is resent in debug_directory::AddressOfRawData if Type is
-/// IMAGE_DEBUG_TYPE_CODEVIEW.
-struct debug_pdb_info {
- support::ulittle32_t Signature;
- uint8_t Guid[16];
- support::ulittle32_t Age;
- // PDBFileName: The null-terminated PDB file name follows.
-};
-
template <typename IntTy>
struct import_lookup_table_entry {
IntTy Data;
@@ -534,6 +518,10 @@ struct coff_import_directory_table_entry {
support::ulittle32_t ForwarderChain;
support::ulittle32_t NameRVA;
support::ulittle32_t ImportAddressTableRVA;
+ bool isNull() const {
+ return ImportLookupTableRVA == 0 && TimeDateStamp == 0 &&
+ ForwarderChain == 0 && NameRVA == 0 && ImportAddressTableRVA == 0;
+ }
};
template <typename IntTy>
@@ -633,7 +621,7 @@ private:
const coff_symbol32 *SymbolTable32;
const char *StringTable;
uint32_t StringTableSize;
- const import_directory_table_entry *ImportDirectory;
+ const coff_import_directory_table_entry *ImportDirectory;
const delay_import_directory_table_entry *DelayImportDirectory;
uint32_t NumberOfDelayImportDirectory;
const export_directory_table_entry *ExportDirectory;
@@ -711,17 +699,23 @@ public:
return COFFBigObjHeader->PointerToSymbolTable;
llvm_unreachable("no COFF header!");
}
- uint32_t getNumberOfSymbols() const {
+ uint32_t getRawNumberOfSymbols() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols;
if (COFFBigObjHeader)
return COFFBigObjHeader->NumberOfSymbols;
llvm_unreachable("no COFF header!");
}
+ uint32_t getNumberOfSymbols() const {
+ if (!SymbolTable16 && !SymbolTable32)
+ return 0;
+ return getRawNumberOfSymbols();
+ }
protected:
void moveSymbolNext(DataRefImpl &Symb) const override;
Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+ uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
uint32_t getSymbolFlags(DataRefImpl Symb) const override;
@@ -752,8 +746,8 @@ protected:
public:
COFFObjectFile(MemoryBufferRef Object, std::error_code &EC);
- basic_symbol_iterator symbol_begin_impl() const override;
- basic_symbol_iterator symbol_end_impl() const override;
+ basic_symbol_iterator symbol_begin() const override;
+ basic_symbol_iterator symbol_end() const override;
section_iterator section_begin() const override;
section_iterator section_end() const override;
@@ -872,14 +866,14 @@ public:
/// Get PDB information out of a codeview debug directory entry.
std::error_code getDebugPDBInfo(const debug_directory *DebugDir,
- const debug_pdb_info *&Info,
+ const codeview::DebugInfo *&Info,
StringRef &PDBFileName) const;
/// Get PDB information from an executable. If the information is not present,
/// Info will be set to nullptr and PDBFileName will be empty. An error is
/// returned only on corrupt object files. Convenience accessor that can be
/// used if the debug directory is not already handy.
- std::error_code getDebugPDBInfo(const debug_pdb_info *&Info,
+ std::error_code getDebugPDBInfo(const codeview::DebugInfo *&Info,
StringRef &PDBFileName) const;
bool isRelocatableObject() const override;
@@ -892,8 +886,8 @@ public:
class ImportDirectoryEntryRef {
public:
ImportDirectoryEntryRef() : OwningObject(nullptr) {}
- ImportDirectoryEntryRef(const import_directory_table_entry *Table, uint32_t I,
- const COFFObjectFile *Owner)
+ ImportDirectoryEntryRef(const coff_import_directory_table_entry *Table,
+ uint32_t I, const COFFObjectFile *Owner)
: ImportTable(Table), Index(I), OwningObject(Owner) {}
bool operator==(const ImportDirectoryEntryRef &Other) const;
@@ -903,15 +897,19 @@ public:
imported_symbol_iterator imported_symbol_end() const;
iterator_range<imported_symbol_iterator> imported_symbols() const;
+ imported_symbol_iterator lookup_table_begin() const;
+ imported_symbol_iterator lookup_table_end() const;
+ iterator_range<imported_symbol_iterator> lookup_table_symbols() const;
+
std::error_code getName(StringRef &Result) const;
std::error_code getImportLookupTableRVA(uint32_t &Result) const;
std::error_code getImportAddressTableRVA(uint32_t &Result) const;
std::error_code
- getImportTableEntry(const import_directory_table_entry *&Result) const;
+ getImportTableEntry(const coff_import_directory_table_entry *&Result) const;
private:
- const import_directory_table_entry *ImportTable;
+ const coff_import_directory_table_entry *ImportTable;
uint32_t Index;
const COFFObjectFile *OwningObject;
};
diff --git a/include/llvm/Object/COFFImportFile.h b/include/llvm/Object/COFFImportFile.h
index b04a44ea60d2..4192fe7e5c90 100644
--- a/include/llvm/Object/COFFImportFile.h
+++ b/include/llvm/Object/COFFImportFile.h
@@ -47,11 +47,11 @@ public:
return SymbolRef::SF_Global;
}
- basic_symbol_iterator symbol_begin_impl() const override {
+ basic_symbol_iterator symbol_begin() const override {
return BasicSymbolRef(DataRefImpl(), this);
}
- basic_symbol_iterator symbol_end_impl() const override {
+ basic_symbol_iterator symbol_end() const override {
DataRefImpl Symb;
Symb.p = isCode() ? 2 : 1;
return BasicSymbolRef(Symb, this);
diff --git a/include/llvm/Object/ELF.h b/include/llvm/Object/ELF.h
index 80b8be03810c..aaa79ae70f01 100644
--- a/include/llvm/Object/ELF.h
+++ b/include/llvm/Object/ELF.h
@@ -33,27 +33,29 @@ getElfArchType(StringRef Object) {
(uint8_t)Object[ELF::EI_DATA]);
}
+static inline Error createError(StringRef Err) {
+ return make_error<StringError>(Err, object_error::parse_failed);
+}
+
template <class ELFT>
class ELFFile {
public:
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
- typedef typename std::conditional<ELFT::Is64Bits,
- uint64_t, uint32_t>::type uintX_t;
-
- typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
- typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
- typedef Elf_Sym_Impl<ELFT> Elf_Sym;
- typedef Elf_Dyn_Impl<ELFT> Elf_Dyn;
- typedef Elf_Phdr_Impl<ELFT> Elf_Phdr;
- typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
- typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
- typedef Elf_Verdef_Impl<ELFT> Elf_Verdef;
- typedef Elf_Verdaux_Impl<ELFT> Elf_Verdaux;
- typedef Elf_Verneed_Impl<ELFT> Elf_Verneed;
- typedef Elf_Vernaux_Impl<ELFT> Elf_Vernaux;
- typedef Elf_Versym_Impl<ELFT> Elf_Versym;
- typedef Elf_Hash_Impl<ELFT> Elf_Hash;
- typedef Elf_GnuHash_Impl<ELFT> Elf_GnuHash;
+ typedef typename ELFT::uint uintX_t;
+ typedef typename ELFT::Ehdr Elf_Ehdr;
+ typedef typename ELFT::Shdr Elf_Shdr;
+ typedef typename ELFT::Sym Elf_Sym;
+ typedef typename ELFT::Dyn Elf_Dyn;
+ typedef typename ELFT::Phdr Elf_Phdr;
+ typedef typename ELFT::Rel Elf_Rel;
+ typedef typename ELFT::Rela Elf_Rela;
+ typedef typename ELFT::Verdef Elf_Verdef;
+ typedef typename ELFT::Verdaux Elf_Verdaux;
+ typedef typename ELFT::Verneed Elf_Verneed;
+ typedef typename ELFT::Vernaux Elf_Vernaux;
+ typedef typename ELFT::Versym Elf_Versym;
+ typedef typename ELFT::Hash Elf_Hash;
+ typedef typename ELFT::GnuHash Elf_GnuHash;
typedef typename ELFT::DynRange Elf_Dyn_Range;
typedef typename ELFT::ShdrRange Elf_Shdr_Range;
typedef typename ELFT::SymRange Elf_Sym_Range;
@@ -71,20 +73,24 @@ private:
StringRef Buf;
- const Elf_Ehdr *Header;
- const Elf_Shdr *SectionHeaderTable = nullptr;
- StringRef DotShstrtab; // Section header string table.
-
public:
- template<typename T>
- const T *getEntry(uint32_t Section, uint32_t Entry) const;
+ const Elf_Ehdr *getHeader() const {
+ return reinterpret_cast<const Elf_Ehdr *>(base());
+ }
+
template <typename T>
- const T *getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
+ Expected<const T *> getEntry(uint32_t Section, uint32_t Entry) const;
+ template <typename T>
+ Expected<const T *> getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
- ErrorOr<StringRef> getStringTable(const Elf_Shdr *Section) const;
- ErrorOr<StringRef> getStringTableForSymtab(const Elf_Shdr &Section) const;
+ Expected<StringRef> getStringTable(const Elf_Shdr *Section) const;
+ Expected<StringRef> getStringTableForSymtab(const Elf_Shdr &Section) const;
+ Expected<StringRef> getStringTableForSymtab(const Elf_Shdr &Section,
+ Elf_Shdr_Range Sections) const;
- ErrorOr<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section) const;
+ Expected<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section) const;
+ Expected<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section,
+ Elf_Shdr_Range Sections) const;
void VerifyStrTab(const Elf_Shdr *sh) const;
@@ -93,118 +99,66 @@ public:
SmallVectorImpl<char> &Result) const;
/// \brief Get the symbol for a given relocation.
- const Elf_Sym *getRelocationSymbol(const Elf_Rel *Rel,
- const Elf_Shdr *SymTab) const;
+ Expected<const Elf_Sym *> getRelocationSymbol(const Elf_Rel *Rel,
+ const Elf_Shdr *SymTab) const;
- ELFFile(StringRef Object, std::error_code &EC);
+ ELFFile(StringRef Object);
bool isMipsELF64() const {
- return Header->e_machine == ELF::EM_MIPS &&
- Header->getFileClass() == ELF::ELFCLASS64;
+ return getHeader()->e_machine == ELF::EM_MIPS &&
+ getHeader()->getFileClass() == ELF::ELFCLASS64;
}
bool isMips64EL() const {
- return Header->e_machine == ELF::EM_MIPS &&
- Header->getFileClass() == ELF::ELFCLASS64 &&
- Header->getDataEncoding() == ELF::ELFDATA2LSB;
+ return isMipsELF64() &&
+ getHeader()->getDataEncoding() == ELF::ELFDATA2LSB;
}
- const Elf_Shdr *section_begin() const;
- const Elf_Shdr *section_end() const;
- Elf_Shdr_Range sections() const {
- return makeArrayRef(section_begin(), section_end());
- }
+ Expected<Elf_Shdr_Range> sections() const;
- const Elf_Sym *symbol_begin(const Elf_Shdr *Sec) const {
+ Expected<Elf_Sym_Range> symbols(const Elf_Shdr *Sec) const {
if (!Sec)
- return nullptr;
- if (Sec->sh_entsize != sizeof(Elf_Sym))
- report_fatal_error("Invalid symbol size");
- return reinterpret_cast<const Elf_Sym *>(base() + Sec->sh_offset);
- }
- const Elf_Sym *symbol_end(const Elf_Shdr *Sec) const {
- if (!Sec)
- return nullptr;
- uint64_t Size = Sec->sh_size;
- if (Size % sizeof(Elf_Sym))
- report_fatal_error("Invalid symbol table size");
- return symbol_begin(Sec) + Size / sizeof(Elf_Sym);
- }
- Elf_Sym_Range symbols(const Elf_Shdr *Sec) const {
- return makeArrayRef(symbol_begin(Sec), symbol_end(Sec));
+ return makeArrayRef<Elf_Sym>(nullptr, nullptr);
+ return getSectionContentsAsArray<Elf_Sym>(Sec);
}
- const Elf_Rela *rela_begin(const Elf_Shdr *sec) const {
- if (sec->sh_entsize != sizeof(Elf_Rela))
- report_fatal_error("Invalid relocation entry size");
- return reinterpret_cast<const Elf_Rela *>(base() + sec->sh_offset);
+ Expected<Elf_Rela_Range> relas(const Elf_Shdr *Sec) const {
+ return getSectionContentsAsArray<Elf_Rela>(Sec);
}
- const Elf_Rela *rela_end(const Elf_Shdr *sec) const {
- uint64_t Size = sec->sh_size;
- if (Size % sizeof(Elf_Rela))
- report_fatal_error("Invalid relocation table size");
- return rela_begin(sec) + Size / sizeof(Elf_Rela);
- }
-
- Elf_Rela_Range relas(const Elf_Shdr *Sec) const {
- return makeArrayRef(rela_begin(Sec), rela_end(Sec));
- }
-
- const Elf_Rel *rel_begin(const Elf_Shdr *sec) const {
- if (sec->sh_entsize != sizeof(Elf_Rel))
- report_fatal_error("Invalid relocation entry size");
- return reinterpret_cast<const Elf_Rel *>(base() + sec->sh_offset);
- }
-
- const Elf_Rel *rel_end(const Elf_Shdr *sec) const {
- uint64_t Size = sec->sh_size;
- if (Size % sizeof(Elf_Rel))
- report_fatal_error("Invalid relocation table size");
- return rel_begin(sec) + Size / sizeof(Elf_Rel);
- }
-
- Elf_Rel_Range rels(const Elf_Shdr *Sec) const {
- return makeArrayRef(rel_begin(Sec), rel_end(Sec));
+ Expected<Elf_Rel_Range> rels(const Elf_Shdr *Sec) const {
+ return getSectionContentsAsArray<Elf_Rel>(Sec);
}
/// \brief Iterate over program header table.
- const Elf_Phdr *program_header_begin() const {
- if (Header->e_phnum && Header->e_phentsize != sizeof(Elf_Phdr))
- report_fatal_error("Invalid program header size");
- return reinterpret_cast<const Elf_Phdr *>(base() + Header->e_phoff);
- }
-
- const Elf_Phdr *program_header_end() const {
- return program_header_begin() + Header->e_phnum;
+ Expected<Elf_Phdr_Range> program_headers() const {
+ if (getHeader()->e_phnum && getHeader()->e_phentsize != sizeof(Elf_Phdr))
+ return createError("invalid e_phentsize");
+ auto *Begin =
+ reinterpret_cast<const Elf_Phdr *>(base() + getHeader()->e_phoff);
+ return makeArrayRef(Begin, Begin + getHeader()->e_phnum);
}
- const Elf_Phdr_Range program_headers() const {
- return makeArrayRef(program_header_begin(), program_header_end());
- }
-
- uint64_t getNumSections() const;
- uintX_t getStringTableIndex() const;
- uint32_t getExtendedSymbolTableIndex(const Elf_Sym *Sym,
- const Elf_Shdr *SymTab,
- ArrayRef<Elf_Word> ShndxTable) const;
- uint32_t getExtendedSymbolTableIndex(const Elf_Sym *Sym,
- const Elf_Sym *FirstSym,
- ArrayRef<Elf_Word> ShndxTable) const;
- const Elf_Ehdr *getHeader() const { return Header; }
- ErrorOr<const Elf_Shdr *> getSection(const Elf_Sym *Sym,
- const Elf_Shdr *SymTab,
- ArrayRef<Elf_Word> ShndxTable) const;
- ErrorOr<const Elf_Shdr *> getSection(uint32_t Index) const;
-
- const Elf_Sym *getSymbol(const Elf_Shdr *Sec, uint32_t Index) const {
- return &*(symbol_begin(Sec) + Index);
- }
-
- ErrorOr<StringRef> getSectionName(const Elf_Shdr *Section) const;
+ Expected<StringRef> getSectionStringTable(Elf_Shdr_Range Sections) const;
+ Expected<uint32_t> getSectionIndex(const Elf_Sym *Sym, Elf_Sym_Range Syms,
+ ArrayRef<Elf_Word> ShndxTable) const;
+ Expected<const Elf_Shdr *> getSection(const Elf_Sym *Sym,
+ const Elf_Shdr *SymTab,
+ ArrayRef<Elf_Word> ShndxTable) const;
+ Expected<const Elf_Shdr *> getSection(const Elf_Sym *Sym,
+ Elf_Sym_Range Symtab,
+ ArrayRef<Elf_Word> ShndxTable) const;
+ Expected<const Elf_Shdr *> getSection(uint32_t Index) const;
+
+ Expected<const Elf_Sym *> getSymbol(const Elf_Shdr *Sec,
+ uint32_t Index) const;
+
+ Expected<StringRef> getSectionName(const Elf_Shdr *Section) const;
+ Expected<StringRef> getSectionName(const Elf_Shdr *Section,
+ StringRef DotShstrtab) const;
template <typename T>
- ErrorOr<ArrayRef<T>> getSectionContentsAsArray(const Elf_Shdr *Sec) const;
- ErrorOr<ArrayRef<uint8_t> > getSectionContents(const Elf_Shdr *Sec) const;
+ Expected<ArrayRef<T>> getSectionContentsAsArray(const Elf_Shdr *Sec) const;
+ Expected<ArrayRef<uint8_t>> getSectionContents(const Elf_Shdr *Sec) const;
};
typedef ELFFile<ELFType<support::little, false>> ELF32LEFile;
@@ -213,61 +167,116 @@ typedef ELFFile<ELFType<support::big, false>> ELF32BEFile;
typedef ELFFile<ELFType<support::big, true>> ELF64BEFile;
template <class ELFT>
-uint32_t ELFFile<ELFT>::getExtendedSymbolTableIndex(
- const Elf_Sym *Sym, const Elf_Shdr *SymTab,
- ArrayRef<Elf_Word> ShndxTable) const {
- return getExtendedSymbolTableIndex(Sym, symbol_begin(SymTab), ShndxTable);
+inline Expected<const typename ELFT::Shdr *>
+getSection(typename ELFT::ShdrRange Sections, uint32_t Index) {
+ if (Index >= Sections.size())
+ return createError("invalid section index");
+ return &Sections[Index];
}
template <class ELFT>
-uint32_t ELFFile<ELFT>::getExtendedSymbolTableIndex(
- const Elf_Sym *Sym, const Elf_Sym *FirstSym,
- ArrayRef<Elf_Word> ShndxTable) const {
+inline Expected<uint32_t>
+getExtendedSymbolTableIndex(const typename ELFT::Sym *Sym,
+ const typename ELFT::Sym *FirstSym,
+ ArrayRef<typename ELFT::Word> ShndxTable) {
assert(Sym->st_shndx == ELF::SHN_XINDEX);
unsigned Index = Sym - FirstSym;
+ if (Index >= ShndxTable.size())
+ return createError("index past the end of the symbol table");
// The size of the table was checked in getSHNDXTable.
return ShndxTable[Index];
}
template <class ELFT>
-ErrorOr<const typename ELFFile<ELFT>::Elf_Shdr *>
+Expected<uint32_t>
+ELFFile<ELFT>::getSectionIndex(const Elf_Sym *Sym, Elf_Sym_Range Syms,
+ ArrayRef<Elf_Word> ShndxTable) const {
+ uint32_t Index = Sym->st_shndx;
+ if (Index == ELF::SHN_XINDEX) {
+ auto ErrorOrIndex = object::getExtendedSymbolTableIndex<ELFT>(
+ Sym, Syms.begin(), ShndxTable);
+ if (!ErrorOrIndex)
+ return ErrorOrIndex.takeError();
+ return *ErrorOrIndex;
+ }
+ if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE)
+ return 0;
+ return Index;
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Shdr *>
ELFFile<ELFT>::getSection(const Elf_Sym *Sym, const Elf_Shdr *SymTab,
ArrayRef<Elf_Word> ShndxTable) const {
- uint32_t Index = Sym->st_shndx;
- if (Index == ELF::SHN_XINDEX)
- return getSection(getExtendedSymbolTableIndex(Sym, SymTab, ShndxTable));
+ auto SymsOrErr = symbols(SymTab);
+ if (!SymsOrErr)
+ return SymsOrErr.takeError();
+ return getSection(Sym, *SymsOrErr, ShndxTable);
+}
- if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE)
+template <class ELFT>
+Expected<const typename ELFT::Shdr *>
+ELFFile<ELFT>::getSection(const Elf_Sym *Sym, Elf_Sym_Range Symbols,
+ ArrayRef<Elf_Word> ShndxTable) const {
+ auto IndexOrErr = getSectionIndex(Sym, Symbols, ShndxTable);
+ if (!IndexOrErr)
+ return IndexOrErr.takeError();
+ uint32_t Index = *IndexOrErr;
+ if (Index == 0)
return nullptr;
- return getSection(Sym->st_shndx);
+ auto SectionsOrErr = sections();
+ if (!SectionsOrErr)
+ return SectionsOrErr.takeError();
+ return object::getSection<ELFT>(*SectionsOrErr, Index);
+}
+
+template <class ELFT>
+inline Expected<const typename ELFT::Sym *>
+getSymbol(typename ELFT::SymRange Symbols, uint32_t Index) {
+ if (Index >= Symbols.size())
+ return createError("invalid symbol index");
+ return &Symbols[Index];
+}
+
+template <class ELFT>
+Expected<const typename ELFT::Sym *>
+ELFFile<ELFT>::getSymbol(const Elf_Shdr *Sec, uint32_t Index) const {
+ auto SymtabOrErr = symbols(Sec);
+ if (!SymtabOrErr)
+ return SymtabOrErr.takeError();
+ return object::getSymbol<ELFT>(*SymtabOrErr, Index);
}
template <class ELFT>
template <typename T>
-ErrorOr<ArrayRef<T>>
+Expected<ArrayRef<T>>
ELFFile<ELFT>::getSectionContentsAsArray(const Elf_Shdr *Sec) const {
+ if (Sec->sh_entsize != sizeof(T) && sizeof(T) != 1)
+ return createError("invalid sh_entsize");
+
uintX_t Offset = Sec->sh_offset;
uintX_t Size = Sec->sh_size;
if (Size % sizeof(T))
- return object_error::parse_failed;
- if (Offset + Size > Buf.size())
- return object_error::parse_failed;
+ return createError("size is not a multiple of sh_entsize");
+ if ((std::numeric_limits<uintX_t>::max() - Offset < Size) ||
+ Offset + Size > Buf.size())
+ return createError("invalid section offset");
const T *Start = reinterpret_cast<const T *>(base() + Offset);
return makeArrayRef(Start, Size / sizeof(T));
}
template <class ELFT>
-ErrorOr<ArrayRef<uint8_t>>
+Expected<ArrayRef<uint8_t>>
ELFFile<ELFT>::getSectionContents(const Elf_Shdr *Sec) const {
return getSectionContentsAsArray<uint8_t>(Sec);
}
template <class ELFT>
StringRef ELFFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
- return getELFRelocationTypeName(Header->e_machine, Type);
+ return getELFRelocationTypeName(getHeader()->e_machine, Type);
}
template <class ELFT>
@@ -302,7 +311,7 @@ void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
}
template <class ELFT>
-const typename ELFFile<ELFT>::Elf_Sym *
+Expected<const typename ELFT::Sym *>
ELFFile<ELFT>::getRelocationSymbol(const Elf_Rel *Rel,
const Elf_Shdr *SymTab) const {
uint32_t Index = Rel->getSymbol(isMips64EL());
@@ -312,193 +321,197 @@ ELFFile<ELFT>::getRelocationSymbol(const Elf_Rel *Rel,
}
template <class ELFT>
-uint64_t ELFFile<ELFT>::getNumSections() const {
- assert(Header && "Header not initialized!");
- if (Header->e_shnum == ELF::SHN_UNDEF && Header->e_shoff > 0) {
- assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
- return SectionHeaderTable->sh_size;
- }
- return Header->e_shnum;
+Expected<StringRef>
+ELFFile<ELFT>::getSectionStringTable(Elf_Shdr_Range Sections) const {
+ uint32_t Index = getHeader()->e_shstrndx;
+ if (Index == ELF::SHN_XINDEX)
+ Index = Sections[0].sh_link;
+
+ if (!Index) // no section string table.
+ return "";
+ if (Index >= Sections.size())
+ return createError("invalid section index");
+ return getStringTable(&Sections[Index]);
}
template <class ELFT>
-typename ELFFile<ELFT>::uintX_t ELFFile<ELFT>::getStringTableIndex() const {
- if (Header->e_shnum == ELF::SHN_UNDEF) {
- if (Header->e_shstrndx == ELF::SHN_HIRESERVE)
- return SectionHeaderTable->sh_link;
- if (Header->e_shstrndx >= getNumSections())
- return 0;
- }
- return Header->e_shstrndx;
+ELFFile<ELFT>::ELFFile(StringRef Object) : Buf(Object) {
+ assert(sizeof(Elf_Ehdr) <= Buf.size() && "Invalid buffer");
}
template <class ELFT>
-ELFFile<ELFT>::ELFFile(StringRef Object, std::error_code &EC)
- : Buf(Object) {
- const uint64_t FileSize = Buf.size();
+static bool compareAddr(uint64_t VAddr, const Elf_Phdr_Impl<ELFT> *Phdr) {
+ return VAddr < Phdr->p_vaddr;
+}
- if (sizeof(Elf_Ehdr) > FileSize) {
- // File too short!
- EC = object_error::parse_failed;
- return;
- }
+template <class ELFT>
+Expected<typename ELFT::ShdrRange> ELFFile<ELFT>::sections() const {
+ const uintX_t SectionTableOffset = getHeader()->e_shoff;
+ if (SectionTableOffset == 0)
+ return ArrayRef<Elf_Shdr>();
- Header = reinterpret_cast<const Elf_Ehdr *>(base());
+ if (getHeader()->e_shentsize != sizeof(Elf_Shdr))
+ return createError(
+ "invalid section header entry size (e_shentsize) in ELF header");
- if (Header->e_shoff == 0)
- return;
+ const uint64_t FileSize = Buf.size();
- const uint64_t SectionTableOffset = Header->e_shoff;
+ if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize)
+ return createError("section header table goes past the end of the file");
- if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize) {
- // Section header table goes past end of file!
- EC = object_error::parse_failed;
- return;
- }
+ // Invalid address alignment of section headers
+ if (SectionTableOffset & (alignof(Elf_Shdr) - 1))
+ return createError("invalid alignment of section headers");
- // The getNumSections() call below depends on SectionHeaderTable being set.
- SectionHeaderTable =
- reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);
- const uint64_t SectionTableSize = getNumSections() * Header->e_shentsize;
+ const Elf_Shdr *First =
+ reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);
- if (SectionTableOffset + SectionTableSize > FileSize) {
- // Section table goes past end of file!
- EC = object_error::parse_failed;
- return;
- }
+ uintX_t NumSections = getHeader()->e_shnum;
+ if (NumSections == 0)
+ NumSections = First->sh_size;
- // Get string table sections.
- uintX_t StringTableIndex = getStringTableIndex();
- if (StringTableIndex) {
- ErrorOr<const Elf_Shdr *> StrTabSecOrErr = getSection(StringTableIndex);
- if ((EC = StrTabSecOrErr.getError()))
- return;
-
- ErrorOr<StringRef> StringTableOrErr = getStringTable(*StrTabSecOrErr);
- if ((EC = StringTableOrErr.getError()))
- return;
- DotShstrtab = *StringTableOrErr;
- }
+ if (NumSections > UINT64_MAX / sizeof(Elf_Shdr))
+ return createError("section table goes past the end of file");
- EC = std::error_code();
-}
+ const uint64_t SectionTableSize = NumSections * sizeof(Elf_Shdr);
-template <class ELFT>
-static bool compareAddr(uint64_t VAddr, const Elf_Phdr_Impl<ELFT> *Phdr) {
- return VAddr < Phdr->p_vaddr;
-}
-
-template <class ELFT>
-const typename ELFFile<ELFT>::Elf_Shdr *ELFFile<ELFT>::section_begin() const {
- if (Header->e_shentsize != sizeof(Elf_Shdr))
- report_fatal_error(
- "Invalid section header entry size (e_shentsize) in ELF header");
- return reinterpret_cast<const Elf_Shdr *>(base() + Header->e_shoff);
-}
+ // Section table goes past end of file!
+ if (SectionTableOffset + SectionTableSize > FileSize)
+ return createError("section table goes past the end of file");
-template <class ELFT>
-const typename ELFFile<ELFT>::Elf_Shdr *ELFFile<ELFT>::section_end() const {
- return section_begin() + getNumSections();
+ return makeArrayRef(First, NumSections);
}
template <class ELFT>
template <typename T>
-const T *ELFFile<ELFT>::getEntry(uint32_t Section, uint32_t Entry) const {
- ErrorOr<const Elf_Shdr *> Sec = getSection(Section);
- if (std::error_code EC = Sec.getError())
- report_fatal_error(EC.message());
- return getEntry<T>(*Sec, Entry);
+Expected<const T *> ELFFile<ELFT>::getEntry(uint32_t Section,
+ uint32_t Entry) const {
+ auto SecOrErr = getSection(Section);
+ if (!SecOrErr)
+ return SecOrErr.takeError();
+ return getEntry<T>(*SecOrErr, Entry);
}
template <class ELFT>
template <typename T>
-const T *ELFFile<ELFT>::getEntry(const Elf_Shdr *Section,
- uint32_t Entry) const {
- return reinterpret_cast<const T *>(base() + Section->sh_offset +
- (Entry * Section->sh_entsize));
+Expected<const T *> ELFFile<ELFT>::getEntry(const Elf_Shdr *Section,
+ uint32_t Entry) const {
+ if (sizeof(T) != Section->sh_entsize)
+ return createError("invalid sh_entsize");
+ size_t Pos = Section->sh_offset + Entry * sizeof(T);
+ if (Pos + sizeof(T) > Buf.size())
+ return createError("invalid section offset");
+ return reinterpret_cast<const T *>(base() + Pos);
}
template <class ELFT>
-ErrorOr<const typename ELFFile<ELFT>::Elf_Shdr *>
+Expected<const typename ELFT::Shdr *>
ELFFile<ELFT>::getSection(uint32_t Index) const {
- assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
- if (Index >= getNumSections())
- return object_error::invalid_section_index;
-
- return reinterpret_cast<const Elf_Shdr *>(
- reinterpret_cast<const char *>(SectionHeaderTable) +
- (Index * Header->e_shentsize));
+ auto TableOrErr = sections();
+ if (!TableOrErr)
+ return TableOrErr.takeError();
+ return object::getSection<ELFT>(*TableOrErr, Index);
}
template <class ELFT>
-ErrorOr<StringRef>
+Expected<StringRef>
ELFFile<ELFT>::getStringTable(const Elf_Shdr *Section) const {
if (Section->sh_type != ELF::SHT_STRTAB)
- return object_error::parse_failed;
- uint64_t Offset = Section->sh_offset;
- uint64_t Size = Section->sh_size;
- if (Offset + Size > Buf.size())
- return object_error::parse_failed;
- StringRef Data((const char *)base() + Section->sh_offset, Size);
- if (Data[Size - 1] != '\0')
- return object_error::string_table_non_null_end;
- return Data;
+ return createError("invalid sh_type for string table, expected SHT_STRTAB");
+ auto V = getSectionContentsAsArray<char>(Section);
+ if (!V)
+ return V.takeError();
+ ArrayRef<char> Data = *V;
+ if (Data.empty())
+ return createError("empty string table");
+ if (Data.back() != '\0')
+ return createError("string table non-null terminated");
+ return StringRef(Data.begin(), Data.size());
}
template <class ELFT>
-ErrorOr<ArrayRef<typename ELFFile<ELFT>::Elf_Word>>
+Expected<ArrayRef<typename ELFT::Word>>
ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section) const {
+ auto SectionsOrErr = sections();
+ if (!SectionsOrErr)
+ return SectionsOrErr.takeError();
+ return getSHNDXTable(Section, *SectionsOrErr);
+}
+
+template <class ELFT>
+Expected<ArrayRef<typename ELFT::Word>>
+ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section,
+ Elf_Shdr_Range Sections) const {
assert(Section.sh_type == ELF::SHT_SYMTAB_SHNDX);
- const Elf_Word *ShndxTableBegin =
- reinterpret_cast<const Elf_Word *>(base() + Section.sh_offset);
- uintX_t Size = Section.sh_size;
- if (Size % sizeof(uint32_t))
- return object_error::parse_failed;
- uintX_t NumSymbols = Size / sizeof(uint32_t);
- const Elf_Word *ShndxTableEnd = ShndxTableBegin + NumSymbols;
- if (reinterpret_cast<const char *>(ShndxTableEnd) > Buf.end())
- return object_error::parse_failed;
- ErrorOr<const Elf_Shdr *> SymTableOrErr = getSection(Section.sh_link);
- if (std::error_code EC = SymTableOrErr.getError())
- return EC;
+ auto VOrErr = getSectionContentsAsArray<Elf_Word>(&Section);
+ if (!VOrErr)
+ return VOrErr.takeError();
+ ArrayRef<Elf_Word> V = *VOrErr;
+ auto SymTableOrErr = object::getSection<ELFT>(Sections, Section.sh_link);
+ if (!SymTableOrErr)
+ return SymTableOrErr.takeError();
const Elf_Shdr &SymTable = **SymTableOrErr;
if (SymTable.sh_type != ELF::SHT_SYMTAB &&
SymTable.sh_type != ELF::SHT_DYNSYM)
- return object_error::parse_failed;
- if (NumSymbols != (SymTable.sh_size / sizeof(Elf_Sym)))
- return object_error::parse_failed;
- return makeArrayRef(ShndxTableBegin, ShndxTableEnd);
+ return createError("invalid sh_type");
+ if (V.size() != (SymTable.sh_size / sizeof(Elf_Sym)))
+ return createError("invalid section contents size");
+ return V;
}
template <class ELFT>
-ErrorOr<StringRef>
+Expected<StringRef>
ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec) const {
+ auto SectionsOrErr = sections();
+ if (!SectionsOrErr)
+ return SectionsOrErr.takeError();
+ return getStringTableForSymtab(Sec, *SectionsOrErr);
+}
+
+template <class ELFT>
+Expected<StringRef>
+ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec,
+ Elf_Shdr_Range Sections) const {
+
if (Sec.sh_type != ELF::SHT_SYMTAB && Sec.sh_type != ELF::SHT_DYNSYM)
- return object_error::parse_failed;
- ErrorOr<const Elf_Shdr *> SectionOrErr = getSection(Sec.sh_link);
- if (std::error_code EC = SectionOrErr.getError())
- return EC;
+ return createError(
+ "invalid sh_type for symbol table, expected SHT_SYMTAB or SHT_DYNSYM");
+ auto SectionOrErr = object::getSection<ELFT>(Sections, Sec.sh_link);
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
return getStringTable(*SectionOrErr);
}
template <class ELFT>
-ErrorOr<StringRef>
+Expected<StringRef>
ELFFile<ELFT>::getSectionName(const Elf_Shdr *Section) const {
+ auto SectionsOrErr = sections();
+ if (!SectionsOrErr)
+ return SectionsOrErr.takeError();
+ auto Table = getSectionStringTable(*SectionsOrErr);
+ if (!Table)
+ return Table.takeError();
+ return getSectionName(Section, *Table);
+}
+
+template <class ELFT>
+Expected<StringRef> ELFFile<ELFT>::getSectionName(const Elf_Shdr *Section,
+ StringRef DotShstrtab) const {
uint32_t Offset = Section->sh_name;
if (Offset == 0)
return StringRef();
if (Offset >= DotShstrtab.size())
- return object_error::parse_failed;
+ return createError("invalid string offset");
return StringRef(DotShstrtab.data() + Offset);
}
/// This function returns the hash value for a symbol in the .dynsym section
/// Name of the API remains consistent as specified in the libelf
/// REF : http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
-static inline unsigned elf_hash(StringRef &symbolName) {
+inline unsigned hashSysV(StringRef SymbolName) {
unsigned h = 0, g;
- for (unsigned i = 0, j = symbolName.size(); i < j; i++) {
- h = (h << 4) + symbolName[i];
+ for (char C : SymbolName) {
+ h = (h << 4) + C;
g = h & 0xf0000000L;
if (g != 0)
h ^= g >> 24;
diff --git a/include/llvm/Object/ELFObjectFile.h b/include/llvm/Object/ELFObjectFile.h
index 07c6364a6894..69987d433e2d 100644
--- a/include/llvm/Object/ELFObjectFile.h
+++ b/include/llvm/Object/ELFObjectFile.h
@@ -59,6 +59,7 @@ protected:
virtual uint32_t getSectionType(DataRefImpl Sec) const = 0;
virtual uint64_t getSectionFlags(DataRefImpl Sec) const = 0;
+ virtual uint64_t getSectionOffset(DataRefImpl Sec) const = 0;
virtual ErrorOr<int64_t> getRelocationAddend(DataRefImpl Rel) const = 0;
@@ -90,6 +91,10 @@ public:
uint64_t getFlags() const {
return getObject()->getSectionFlags(getRawDataRefImpl());
}
+
+ uint64_t getOffset() const {
+ return getObject()->getSectionOffset(getRawDataRefImpl());
+ }
};
class elf_section_iterator : public section_iterator {
@@ -245,11 +250,15 @@ protected:
uint32_t getSectionType(DataRefImpl Sec) const override;
uint64_t getSectionFlags(DataRefImpl Sec) const override;
+ uint64_t getSectionOffset(DataRefImpl Sec) const override;
StringRef getRelocationTypeName(uint32_t Type) const;
/// \brief Get the relocation section that contains \a Rel.
const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
- return *EF.getSection(Rel.d.a);
+ auto RelSecOrErr = EF.getSection(Rel.d.a);
+ if (!RelSecOrErr)
+ report_fatal_error(errorToErrorCode(RelSecOrErr.takeError()).message());
+ return *RelSecOrErr;
}
DataRefImpl toDRI(const Elf_Shdr *SymTable, unsigned SymbolNum) const {
@@ -262,7 +271,13 @@ protected:
assert(SymTable->sh_type == ELF::SHT_SYMTAB ||
SymTable->sh_type == ELF::SHT_DYNSYM);
- uintptr_t SHT = reinterpret_cast<uintptr_t>(EF.section_begin());
+ auto SectionsOrErr = EF.sections();
+ if (!SectionsOrErr) {
+ DRI.d.a = 0;
+ DRI.d.b = 0;
+ return DRI;
+ }
+ uintptr_t SHT = reinterpret_cast<uintptr_t>((*SectionsOrErr).begin());
unsigned SymTableIndex =
(reinterpret_cast<uintptr_t>(SymTable) - SHT) / sizeof(Elf_Shdr);
@@ -311,15 +326,18 @@ public:
const Elf_Rela *getRela(DataRefImpl Rela) const;
const Elf_Sym *getSymbol(DataRefImpl Sym) const {
- return EF.template getEntry<Elf_Sym>(Sym.d.a, Sym.d.b);
+ auto Ret = EF.template getEntry<Elf_Sym>(Sym.d.a, Sym.d.b);
+ if (!Ret)
+ report_fatal_error(errorToErrorCode(Ret.takeError()).message());
+ return *Ret;
}
const Elf_Shdr *getSection(DataRefImpl Sec) const {
return reinterpret_cast<const Elf_Shdr *>(Sec.p);
}
- basic_symbol_iterator symbol_begin_impl() const override;
- basic_symbol_iterator symbol_end_impl() const override;
+ basic_symbol_iterator symbol_begin() const override;
+ basic_symbol_iterator symbol_end() const override;
elf_symbol_iterator dynamic_symbol_begin() const;
elf_symbol_iterator dynamic_symbol_end() const;
@@ -364,10 +382,18 @@ void ELFObjectFile<ELFT>::moveSymbolNext(DataRefImpl &Sym) const {
template <class ELFT>
Expected<StringRef> ELFObjectFile<ELFT>::getSymbolName(DataRefImpl Sym) const {
const Elf_Sym *ESym = getSymbol(Sym);
- const Elf_Shdr *SymTableSec = *EF.getSection(Sym.d.a);
- const Elf_Shdr *StringTableSec = *EF.getSection(SymTableSec->sh_link);
- StringRef SymTable = *EF.getStringTable(StringTableSec);
- return ESym->getName(SymTable);
+ auto SymTabOrErr = EF.getSection(Sym.d.a);
+ if (!SymTabOrErr)
+ return SymTabOrErr.takeError();
+ const Elf_Shdr *SymTableSec = *SymTabOrErr;
+ auto StrTabOrErr = EF.getSection(SymTableSec->sh_link);
+ if (!StrTabOrErr)
+ return StrTabOrErr.takeError();
+ const Elf_Shdr *StringTableSec = *StrTabOrErr;
+ auto SymStrTabOrErr = EF.getStringTable(StringTableSec);
+ if (!SymStrTabOrErr)
+ return SymStrTabOrErr.takeError();
+ return ESym->getName(*SymStrTabOrErr);
}
template <class ELFT>
@@ -381,6 +407,11 @@ uint32_t ELFObjectFile<ELFT>::getSectionType(DataRefImpl Sec) const {
}
template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionOffset(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_offset;
+}
+
+template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSymbolValueImpl(DataRefImpl Symb) const {
const Elf_Sym *ESym = getSymbol(Symb);
uint64_t Ret = ESym->st_value;
@@ -409,13 +440,15 @@ ELFObjectFile<ELFT>::getSymbolAddress(DataRefImpl Symb) const {
}
const Elf_Ehdr *Header = EF.getHeader();
- const Elf_Shdr *SymTab = *EF.getSection(Symb.d.a);
+ auto SymTabOrErr = EF.getSection(Symb.d.a);
+ if (!SymTabOrErr)
+ return SymTabOrErr.takeError();
+ const Elf_Shdr *SymTab = *SymTabOrErr;
if (Header->e_type == ELF::ET_REL) {
- ErrorOr<const Elf_Shdr *> SectionOrErr =
- EF.getSection(ESym, SymTab, ShndxTable);
- if (std::error_code EC = SectionOrErr.getError())
- return errorCodeToError(EC);
+ auto SectionOrErr = EF.getSection(ESym, SymTab, ShndxTable);
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
const Elf_Shdr *Section = *SectionOrErr;
if (Section)
Result += Section->sh_addr;
@@ -495,9 +528,14 @@ uint32_t ELFObjectFile<ELFT>::getSymbolFlags(DataRefImpl Sym) const {
if (ESym->st_shndx == ELF::SHN_ABS)
Result |= SymbolRef::SF_Absolute;
- if (ESym->getType() == ELF::STT_FILE || ESym->getType() == ELF::STT_SECTION ||
- ESym == EF.symbol_begin(DotSymtabSec) ||
- ESym == EF.symbol_begin(DotDynSymSec))
+ if (ESym->getType() == ELF::STT_FILE || ESym->getType() == ELF::STT_SECTION)
+ Result |= SymbolRef::SF_FormatSpecific;
+
+ auto DotSymtabSecSyms = EF.symbols(DotSymtabSec);
+ if (DotSymtabSecSyms && ESym == (*DotSymtabSecSyms).begin())
+ Result |= SymbolRef::SF_FormatSpecific;
+ auto DotDynSymSecSyms = EF.symbols(DotDynSymSec);
+ if (DotDynSymSecSyms && ESym == (*DotDynSymSecSyms).begin())
Result |= SymbolRef::SF_FormatSpecific;
if (EF.getHeader()->e_machine == ELF::EM_ARM) {
@@ -533,9 +571,9 @@ template <class ELFT>
Expected<section_iterator>
ELFObjectFile<ELFT>::getSymbolSection(const Elf_Sym *ESym,
const Elf_Shdr *SymTab) const {
- ErrorOr<const Elf_Shdr *> ESecOrErr = EF.getSection(ESym, SymTab, ShndxTable);
- if (std::error_code EC = ESecOrErr.getError())
- return errorCodeToError(EC);
+ auto ESecOrErr = EF.getSection(ESym, SymTab, ShndxTable);
+ if (!ESecOrErr)
+ return ESecOrErr.takeError();
const Elf_Shdr *ESec = *ESecOrErr;
if (!ESec)
@@ -550,7 +588,10 @@ template <class ELFT>
Expected<section_iterator>
ELFObjectFile<ELFT>::getSymbolSection(DataRefImpl Symb) const {
const Elf_Sym *Sym = getSymbol(Symb);
- const Elf_Shdr *SymTab = *EF.getSection(Symb.d.a);
+ auto SymTabOrErr = EF.getSection(Symb.d.a);
+ if (!SymTabOrErr)
+ return SymTabOrErr.takeError();
+ const Elf_Shdr *SymTab = *SymTabOrErr;
return getSymbolSection(Sym, SymTab);
}
@@ -563,9 +604,9 @@ void ELFObjectFile<ELFT>::moveSectionNext(DataRefImpl &Sec) const {
template <class ELFT>
std::error_code ELFObjectFile<ELFT>::getSectionName(DataRefImpl Sec,
StringRef &Result) const {
- ErrorOr<StringRef> Name = EF.getSectionName(&*getSection(Sec));
+ auto Name = EF.getSectionName(&*getSection(Sec));
if (!Name)
- return Name.getError();
+ return errorToErrorCode(Name.takeError());
Result = *Name;
return std::error_code();
}
@@ -627,7 +668,10 @@ template <class ELFT>
relocation_iterator
ELFObjectFile<ELFT>::section_rel_begin(DataRefImpl Sec) const {
DataRefImpl RelData;
- uintptr_t SHT = reinterpret_cast<uintptr_t>(EF.section_begin());
+ auto SectionsOrErr = EF.sections();
+ if (!SectionsOrErr)
+ return relocation_iterator(RelocationRef());
+ uintptr_t SHT = reinterpret_cast<uintptr_t>((*SectionsOrErr).begin());
RelData.d.a = (Sec.p - SHT) / EF.getHeader()->e_shentsize;
RelData.d.b = 0;
return relocation_iterator(RelocationRef(RelData, this));
@@ -644,9 +688,9 @@ ELFObjectFile<ELFT>::section_rel_end(DataRefImpl Sec) const {
const Elf_Shdr *RelSec = getRelSection(RelData);
// Error check sh_link here so that getRelocationSymbol can just use it.
- ErrorOr<const Elf_Shdr *> SymSecOrErr = EF.getSection(RelSec->sh_link);
- if (std::error_code EC = SymSecOrErr.getError())
- report_fatal_error(EC.message());
+ auto SymSecOrErr = EF.getSection(RelSec->sh_link);
+ if (!SymSecOrErr)
+ report_fatal_error(errorToErrorCode(SymSecOrErr.takeError()).message());
RelData.d.b += S->sh_size / S->sh_entsize;
return relocation_iterator(RelocationRef(RelData, this));
@@ -663,9 +707,9 @@ ELFObjectFile<ELFT>::getRelocatedSection(DataRefImpl Sec) const {
if (Type != ELF::SHT_REL && Type != ELF::SHT_RELA)
return section_end();
- ErrorOr<const Elf_Shdr *> R = EF.getSection(EShdr->sh_info);
- if (std::error_code EC = R.getError())
- report_fatal_error(EC.message());
+ auto R = EF.getSection(EShdr->sh_info);
+ if (!R)
+ report_fatal_error(errorToErrorCode(R.takeError()).message());
return section_iterator(SectionRef(toDRI(*R), this));
}
@@ -738,14 +782,20 @@ template <class ELFT>
const typename ELFObjectFile<ELFT>::Elf_Rel *
ELFObjectFile<ELFT>::getRel(DataRefImpl Rel) const {
assert(getRelSection(Rel)->sh_type == ELF::SHT_REL);
- return EF.template getEntry<Elf_Rel>(Rel.d.a, Rel.d.b);
+ auto Ret = EF.template getEntry<Elf_Rel>(Rel.d.a, Rel.d.b);
+ if (!Ret)
+ report_fatal_error(errorToErrorCode(Ret.takeError()).message());
+ return *Ret;
}
template <class ELFT>
const typename ELFObjectFile<ELFT>::Elf_Rela *
ELFObjectFile<ELFT>::getRela(DataRefImpl Rela) const {
assert(getRelSection(Rela)->sh_type == ELF::SHT_RELA);
- return EF.template getEntry<Elf_Rela>(Rela.d.a, Rela.d.b);
+ auto Ret = EF.template getEntry<Elf_Rela>(Rela.d.a, Rela.d.b);
+ if (!Ret)
+ report_fatal_error(errorToErrorCode(Ret.takeError()).message());
+ return *Ret;
}
template <class ELFT>
@@ -753,10 +803,13 @@ ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, std::error_code &EC)
: ELFObjectFileBase(
getELFType(ELFT::TargetEndianness == support::little, ELFT::Is64Bits),
Object),
- EF(Data.getBuffer(), EC) {
- if (EC)
+ EF(Data.getBuffer()) {
+ auto SectionsOrErr = EF.sections();
+ if (!SectionsOrErr) {
+ EC = errorToErrorCode(SectionsOrErr.takeError());
return;
- for (const Elf_Shdr &Sec : EF.sections()) {
+ }
+ for (const Elf_Shdr &Sec : *SectionsOrErr) {
switch (Sec.sh_type) {
case ELF::SHT_DYNSYM: {
if (DotDynSymSec) {
@@ -777,9 +830,11 @@ ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, std::error_code &EC)
break;
}
case ELF::SHT_SYMTAB_SHNDX: {
- ErrorOr<ArrayRef<Elf_Word>> TableOrErr = EF.getSHNDXTable(Sec);
- if ((EC = TableOrErr.getError()))
+ auto TableOrErr = EF.getSHNDXTable(Sec);
+ if (!TableOrErr) {
+ EC = errorToErrorCode(TableOrErr.takeError());
return;
+ }
ShndxTable = *TableOrErr;
break;
}
@@ -788,16 +843,16 @@ ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, std::error_code &EC)
}
template <class ELFT>
-basic_symbol_iterator ELFObjectFile<ELFT>::symbol_begin_impl() const {
+basic_symbol_iterator ELFObjectFile<ELFT>::symbol_begin() const {
DataRefImpl Sym = toDRI(DotSymtabSec, 0);
return basic_symbol_iterator(SymbolRef(Sym, this));
}
template <class ELFT>
-basic_symbol_iterator ELFObjectFile<ELFT>::symbol_end_impl() const {
+basic_symbol_iterator ELFObjectFile<ELFT>::symbol_end() const {
const Elf_Shdr *SymTab = DotSymtabSec;
if (!SymTab)
- return symbol_begin_impl();
+ return symbol_begin();
DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
return basic_symbol_iterator(SymbolRef(Sym, this));
}
@@ -817,12 +872,18 @@ elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_end() const {
template <class ELFT>
section_iterator ELFObjectFile<ELFT>::section_begin() const {
- return section_iterator(SectionRef(toDRI(EF.section_begin()), this));
+ auto SectionsOrErr = EF.sections();
+ if (!SectionsOrErr)
+ return section_iterator(SectionRef());
+ return section_iterator(SectionRef(toDRI((*SectionsOrErr).begin()), this));
}
template <class ELFT>
section_iterator ELFObjectFile<ELFT>::section_end() const {
- return section_iterator(SectionRef(toDRI(EF.section_end()), this));
+ auto SectionsOrErr = EF.sections();
+ if (!SectionsOrErr)
+ return section_iterator(SectionRef());
+ return section_iterator(SectionRef(toDRI((*SectionsOrErr).end()), this));
}
template <class ELFT>
@@ -854,6 +915,8 @@ StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
return "ELF32-mips";
case ELF::EM_PPC:
return "ELF32-ppc";
+ case ELF::EM_RISCV:
+ return "ELF32-riscv";
case ELF::EM_SPARC:
case ELF::EM_SPARC32PLUS:
return "ELF32-sparc";
@@ -874,6 +937,8 @@ StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
return (IsLittleEndian ? "ELF64-aarch64-little" : "ELF64-aarch64-big");
case ELF::EM_PPC64:
return "ELF64-ppc64";
+ case ELF::EM_RISCV:
+ return "ELF64-riscv";
case ELF::EM_S390:
return "ELF64-s390";
case ELF::EM_SPARCV9:
@@ -907,7 +972,7 @@ unsigned ELFObjectFile<ELFT>::getArch() const {
case ELF::EM_X86_64:
return Triple::x86_64;
case ELF::EM_AARCH64:
- return Triple::aarch64;
+ return IsLittleEndian ? Triple::aarch64 : Triple::aarch64_be;
case ELF::EM_ARM:
return Triple::arm;
case ELF::EM_AVR:
@@ -929,6 +994,15 @@ unsigned ELFObjectFile<ELFT>::getArch() const {
return Triple::ppc;
case ELF::EM_PPC64:
return IsLittleEndian ? Triple::ppc64le : Triple::ppc64;
+ case ELF::EM_RISCV:
+ switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+ case ELF::ELFCLASS32:
+ return Triple::riscv32;
+ case ELF::ELFCLASS64:
+ return Triple::riscv64;
+ default:
+ report_fatal_error("Invalid ELFCLASS!");
+ }
case ELF::EM_S390:
return Triple::systemz;
diff --git a/include/llvm/Object/ELFTypes.h b/include/llvm/Object/ELFTypes.h
index 55028f360dbf..3e03fd8b980e 100644
--- a/include/llvm/Object/ELFTypes.h
+++ b/include/llvm/Object/ELFTypes.h
@@ -124,20 +124,19 @@ struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, true>>
};
// I really don't like doing this, but the alternative is copypasta.
-#define LLVM_ELF_IMPORT_TYPES(E, W) \
- typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Addr Elf_Addr; \
- typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Off Elf_Off; \
- typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Half Elf_Half; \
- typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Word Elf_Word; \
- typedef \
- typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Sword Elf_Sword; \
- typedef \
- typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Xword Elf_Xword; \
- typedef \
- typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Sxword Elf_Sxword;
#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) \
- LLVM_ELF_IMPORT_TYPES(ELFT::TargetEndianness, ELFT::Is64Bits)
+ typedef typename ELFT::Addr Elf_Addr; \
+ typedef typename ELFT::Off Elf_Off; \
+ typedef typename ELFT::Half Elf_Half; \
+ typedef typename ELFT::Word Elf_Word; \
+ typedef typename ELFT::Sword Elf_Sword; \
+ typedef typename ELFT::Xword Elf_Xword; \
+ typedef typename ELFT::Sxword Elf_Sxword;
+
+#define LLD_ELF_COMMA ,
+#define LLVM_ELF_IMPORT_TYPES(E, W) \
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFType<E LLD_ELF_COMMA W>)
// Section header.
template <class ELFT> struct Elf_Shdr_Base;
@@ -519,7 +518,7 @@ struct Elf_Phdr_Impl<ELFType<TargetEndianness, true>> {
Elf_Xword p_align; // Segment alignment constraint
};
-// ELFT needed for endianess.
+// ELFT needed for endianness.
template <class ELFT>
struct Elf_Hash_Impl {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
@@ -609,10 +608,13 @@ template <class ELFT> struct Elf_Mips_Options {
// or 0 for global options
Elf_Word info; // Kind-specific information
- const Elf_Mips_RegInfo<ELFT> &getRegInfo() const {
+ Elf_Mips_RegInfo<ELFT> &getRegInfo() {
assert(kind == llvm::ELF::ODK_REGINFO);
- return *reinterpret_cast<const Elf_Mips_RegInfo<ELFT> *>(
- (const uint8_t *)this + sizeof(Elf_Mips_Options));
+ return *reinterpret_cast<Elf_Mips_RegInfo<ELFT> *>(
+ (uint8_t *)this + sizeof(Elf_Mips_Options));
+ }
+ const Elf_Mips_RegInfo<ELFT> &getRegInfo() const {
+ return const_cast<Elf_Mips_Options *>(this)->getRegInfo();
}
};
diff --git a/include/llvm/Object/Error.h b/include/llvm/Object/Error.h
index cd55e5dc26d7..eb938338715d 100644
--- a/include/llvm/Object/Error.h
+++ b/include/llvm/Object/Error.h
@@ -34,6 +34,7 @@ enum class object_error {
string_table_non_null_end,
invalid_section_index,
bitcode_section_not_found,
+ invalid_symbol_index,
};
inline std::error_code make_error_code(object_error e) {
diff --git a/include/llvm/Object/IRObjectFile.h b/include/llvm/Object/IRObjectFile.h
index 9fe011e17d62..0ea89011e883 100644
--- a/include/llvm/Object/IRObjectFile.h
+++ b/include/llvm/Object/IRObjectFile.h
@@ -14,6 +14,8 @@
#ifndef LLVM_OBJECT_IROBJECTFILE_H
#define LLVM_OBJECT_IROBJECTFILE_H
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Object/SymbolicFile.h"
namespace llvm {
@@ -26,31 +28,21 @@ namespace object {
class ObjectFile;
class IRObjectFile : public SymbolicFile {
- std::unique_ptr<Module> M;
- std::unique_ptr<Mangler> Mang;
- std::vector<std::pair<std::string, uint32_t>> AsmSymbols;
+ std::vector<std::unique_ptr<Module>> Mods;
+ ModuleSymbolTable SymTab;
+ IRObjectFile(MemoryBufferRef Object,
+ std::vector<std::unique_ptr<Module>> Mods);
public:
- IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> M);
~IRObjectFile() override;
void moveSymbolNext(DataRefImpl &Symb) const override;
std::error_code printSymbolName(raw_ostream &OS,
DataRefImpl Symb) const override;
uint32_t getSymbolFlags(DataRefImpl Symb) const override;
- GlobalValue *getSymbolGV(DataRefImpl Symb);
- const GlobalValue *getSymbolGV(DataRefImpl Symb) const {
- return const_cast<IRObjectFile *>(this)->getSymbolGV(Symb);
- }
- basic_symbol_iterator symbol_begin_impl() const override;
- basic_symbol_iterator symbol_end_impl() const override;
+ basic_symbol_iterator symbol_begin() const override;
+ basic_symbol_iterator symbol_end() const override;
- const Module &getModule() const {
- return const_cast<IRObjectFile*>(this)->getModule();
- }
- Module &getModule() {
- return *M;
- }
- std::unique_ptr<Module> takeModule();
+ StringRef getTargetTriple() const;
static inline bool classof(const Binary *v) {
return v->isIR();
@@ -60,23 +52,14 @@ public:
/// error code if not found.
static ErrorOr<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);
- /// Parse inline ASM and collect the symbols that are not defined in
- /// the current module.
- ///
- /// For each found symbol, call \p AsmUndefinedRefs with the name of the
- /// symbol found and the associated flags.
- static void CollectAsmUndefinedRefs(
- const Triple &TheTriple, StringRef InlineAsm,
- function_ref<void(StringRef, BasicSymbolRef::Flags)> AsmUndefinedRefs);
-
/// \brief Finds and returns bitcode in the given memory buffer (which may
/// be either a bitcode file or a native object file with embedded bitcode),
/// or an error code if not found.
static ErrorOr<MemoryBufferRef>
findBitcodeInMemBuffer(MemoryBufferRef Object);
- static ErrorOr<std::unique_ptr<IRObjectFile>> create(MemoryBufferRef Object,
- LLVMContext &Context);
+ static Expected<std::unique_ptr<IRObjectFile>> create(MemoryBufferRef Object,
+ LLVMContext &Context);
};
}
}
diff --git a/include/llvm/Object/MachO.h b/include/llvm/Object/MachO.h
index 7906db1e8a77..8c33ec8fd603 100644
--- a/include/llvm/Object/MachO.h
+++ b/include/llvm/Object/MachO.h
@@ -194,7 +194,8 @@ public:
typedef LoadCommandList::const_iterator load_command_iterator;
static Expected<std::unique_ptr<MachOObjectFile>>
- create(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits);
+ create(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
+ uint32_t UniversalCputype = 0, uint32_t UniversalIndex = 0);
void moveSymbolNext(DataRefImpl &Symb) const override;
@@ -202,6 +203,8 @@ public:
Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
// MachO specific.
+ Error checkSymbolTable() const;
+
std::error_code getIndirectName(DataRefImpl Symb, StringRef &Res) const;
unsigned getSectionType(SectionRef Sec) const;
@@ -248,8 +251,8 @@ public:
// TODO: Would be useful to have an iterator based version
// of the load command interface too.
- basic_symbol_iterator symbol_begin_impl() const override;
- basic_symbol_iterator symbol_end_impl() const override;
+ basic_symbol_iterator symbol_begin() const override;
+ basic_symbol_iterator symbol_end() const override;
// MachO specific.
basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
@@ -410,7 +413,8 @@ public:
static Triple::ArchType getArch(uint32_t CPUType);
static Triple getArchTriple(uint32_t CPUType, uint32_t CPUSubType,
- const char **McpuDefault = nullptr);
+ const char **McpuDefault = nullptr,
+ const char **ArchFlag = nullptr);
static bool isValidArch(StringRef ArchFlag);
static Triple getHostArch();
@@ -443,7 +447,8 @@ public:
private:
MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
- Error &Err);
+ Error &Err, uint32_t UniversalCputype = 0,
+ uint32_t UniversalIndex = 0);
uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
diff --git a/include/llvm/Object/MachOUniversal.h b/include/llvm/Object/MachOUniversal.h
index 7eb2af944f3d..a14c4ca01223 100644
--- a/include/llvm/Object/MachOUniversal.h
+++ b/include/llvm/Object/MachOUniversal.h
@@ -89,16 +89,24 @@ public:
else // Parent->getMagic() == MachO::FAT_MAGIC_64
return Header64.reserved;
}
- std::string getArchTypeName() const {
+ std::string getArchFlagName() const {
+ const char *McpuDefault, *ArchFlag;
if (Parent->getMagic() == MachO::FAT_MAGIC) {
Triple T =
- MachOObjectFile::getArchTriple(Header.cputype, Header.cpusubtype);
- return T.getArchName();
+ MachOObjectFile::getArchTriple(Header.cputype, Header.cpusubtype,
+ &McpuDefault, &ArchFlag);
} else { // Parent->getMagic() == MachO::FAT_MAGIC_64
Triple T =
MachOObjectFile::getArchTriple(Header64.cputype,
- Header64.cpusubtype);
- return T.getArchName();
+ Header64.cpusubtype,
+ &McpuDefault, &ArchFlag);
+ }
+ if (ArchFlag) {
+ std::string ArchFlagName(ArchFlag);
+ return ArchFlagName;
+ } else {
+ std::string ArchFlagName("");
+ return ArchFlagName;
}
}
diff --git a/include/llvm/Object/ModuleSummaryIndexObjectFile.h b/include/llvm/Object/ModuleSummaryIndexObjectFile.h
index d021fb29427f..6205927039dc 100644
--- a/include/llvm/Object/ModuleSummaryIndexObjectFile.h
+++ b/include/llvm/Object/ModuleSummaryIndexObjectFile.h
@@ -50,11 +50,11 @@ public:
llvm_unreachable("not implemented");
return 0;
}
- basic_symbol_iterator symbol_begin_impl() const override {
+ basic_symbol_iterator symbol_begin() const override {
llvm_unreachable("not implemented");
return basic_symbol_iterator(BasicSymbolRef());
}
- basic_symbol_iterator symbol_end_impl() const override {
+ basic_symbol_iterator symbol_end() const override {
llvm_unreachable("not implemented");
return basic_symbol_iterator(BasicSymbolRef());
}
@@ -79,25 +79,18 @@ public:
static ErrorOr<MemoryBufferRef>
findBitcodeInMemBuffer(MemoryBufferRef Object);
- /// \brief Looks for summary sections in the given memory buffer,
- /// returns true if found, else false.
- static bool hasGlobalValueSummaryInMemBuffer(
- MemoryBufferRef Object,
- const DiagnosticHandlerFunction &DiagnosticHandler);
-
/// \brief Parse module summary index in the given memory buffer.
/// Return new ModuleSummaryIndexObjectFile instance containing parsed module
/// summary/index.
- static ErrorOr<std::unique_ptr<ModuleSummaryIndexObjectFile>>
- create(MemoryBufferRef Object,
- const DiagnosticHandlerFunction &DiagnosticHandler);
+ static Expected<std::unique_ptr<ModuleSummaryIndexObjectFile>>
+ create(MemoryBufferRef Object);
};
}
/// Parse the module summary index out of an IR file and return the module
/// summary index object if found, or nullptr if not.
-ErrorOr<std::unique_ptr<ModuleSummaryIndex>> getModuleSummaryIndexForFile(
- StringRef Path, const DiagnosticHandlerFunction &DiagnosticHandler);
+Expected<std::unique_ptr<ModuleSummaryIndex>>
+getModuleSummaryIndexForFile(StringRef Path);
}
#endif
diff --git a/include/llvm/Object/ModuleSymbolTable.h b/include/llvm/Object/ModuleSymbolTable.h
new file mode 100644
index 000000000000..70775352d977
--- /dev/null
+++ b/include/llvm/Object/ModuleSymbolTable.h
@@ -0,0 +1,61 @@
+//===- ModuleSymbolTable.h - symbol table for in-memory IR ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents a symbol table built from in-memory IR. It provides
+// access to GlobalValues and should only be used if such access is required
+// (e.g. in the LTO implementation).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MODULESYMBOLTABLE_H
+#define LLVM_OBJECT_MODULESYMBOLTABLE_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Object/SymbolicFile.h"
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class GlobalValue;
+
+class ModuleSymbolTable {
+public:
+ typedef std::pair<std::string, uint32_t> AsmSymbol;
+ typedef PointerUnion<GlobalValue *, AsmSymbol *> Symbol;
+
+private:
+ Module *FirstMod = nullptr;
+
+ SpecificBumpPtrAllocator<AsmSymbol> AsmSymbols;
+ std::vector<Symbol> SymTab;
+ Mangler Mang;
+
+public:
+ ArrayRef<Symbol> symbols() const { return SymTab; }
+ void addModule(Module *M);
+
+ void printSymbolName(raw_ostream &OS, Symbol S) const;
+ uint32_t getSymbolFlags(Symbol S) const;
+
+ /// Parse inline ASM and collect the symbols that are defined or referenced in
+ /// the current module.
+ ///
+ /// For each found symbol, call \p AsmSymbol with the name of the symbol found
+ /// and the associated flags.
+ static void CollectAsmSymbols(
+ const Triple &TheTriple, StringRef InlineAsm,
+ function_ref<void(StringRef, object::BasicSymbolRef::Flags)> AsmSymbol);
+};
+
+}
+
+#endif
diff --git a/include/llvm/Object/ObjectFile.h b/include/llvm/Object/ObjectFile.h
index 6272a5f056eb..13d5845c3a71 100644
--- a/include/llvm/Object/ObjectFile.h
+++ b/include/llvm/Object/ObjectFile.h
@@ -29,6 +29,7 @@ namespace object {
class ObjectFile;
class COFFObjectFile;
class MachOObjectFile;
+class WasmObjectFile;
class SymbolRef;
class symbol_iterator;
@@ -300,8 +301,12 @@ public:
createELFObjectFile(MemoryBufferRef Object);
static Expected<std::unique_ptr<MachOObjectFile>>
- createMachOObjectFile(MemoryBufferRef Object);
+ createMachOObjectFile(MemoryBufferRef Object,
+ uint32_t UniversalCputype = 0,
+ uint32_t UniversalIndex = 0);
+ static Expected<std::unique_ptr<WasmObjectFile>>
+ createWasmObjectFile(MemoryBufferRef Object);
};
// Inline function definitions.
diff --git a/include/llvm/Object/RelocVisitor.h b/include/llvm/Object/RelocVisitor.h
index 5e0df98d8627..3510d293d73d 100644
--- a/include/llvm/Object/RelocVisitor.h
+++ b/include/llvm/Object/RelocVisitor.h
@@ -86,6 +86,7 @@ private:
return RelocToApply();
}
case Triple::aarch64:
+ case Triple::aarch64_be:
switch (RelocType) {
case llvm::ELF::R_AARCH64_ABS32:
return visitELF_AARCH64_ABS32(R, Value);
@@ -95,6 +96,17 @@ private:
HasError = true;
return RelocToApply();
}
+ case Triple::bpfel:
+ case Triple::bpfeb:
+ switch (RelocType) {
+ case llvm::ELF::R_BPF_64_64:
+ return visitELF_BPF_64_64(R, Value);
+ case llvm::ELF::R_BPF_64_32:
+ return visitELF_BPF_64_32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
case Triple::mips64el:
case Triple::mips64:
switch (RelocType) {
@@ -139,6 +151,14 @@ private:
HasError = true;
return RelocToApply();
}
+ case Triple::amdgcn:
+ switch (RelocType) {
+ case llvm::ELF::R_AMDGPU_ABS32:
+ return visitELF_AMDGPU_ABS32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
default:
HasError = true;
return RelocToApply();
@@ -200,6 +220,14 @@ private:
HasError = true;
return RelocToApply();
}
+ case Triple::hexagon:
+ switch (RelocType) {
+ case llvm::ELF::R_HEX_32:
+ return visitELF_HEX_32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
default:
HasError = true;
return RelocToApply();
@@ -300,6 +328,15 @@ private:
return RelocToApply(Res, 4);
}
+ /// BPF ELF
+ RelocToApply visitELF_BPF_64_32(RelocationRef R, uint64_t Value) {
+ uint32_t Res = Value & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+ RelocToApply visitELF_BPF_64_64(RelocationRef R, uint64_t Value) {
+ return RelocToApply(Value, 8);
+ }
+
/// PPC64 ELF
RelocToApply visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
@@ -403,6 +440,16 @@ private:
return RelocToApply(static_cast<uint32_t>(Res), 4);
}
+ RelocToApply visitELF_HEX_32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 4);
+ }
+
+ RelocToApply visitELF_AMDGPU_ABS32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 4);
+ }
+
/// I386 COFF
RelocToApply visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
diff --git a/include/llvm/Object/StackMapParser.h b/include/llvm/Object/StackMapParser.h
index e58162de1501..efea62bb3cb3 100644
--- a/include/llvm/Object/StackMapParser.h
+++ b/include/llvm/Object/StackMapParser.h
@@ -17,7 +17,7 @@
namespace llvm {
template <support::endianness Endianness>
-class StackMapV1Parser {
+class StackMapV2Parser {
public:
template <typename AccessorT>
@@ -47,7 +47,7 @@ public:
/// Accessor for function records.
class FunctionAccessor {
- friend class StackMapV1Parser;
+ friend class StackMapV2Parser;
public:
/// Get the function address.
@@ -56,14 +56,19 @@ public:
}
/// Get the function's stack size.
- uint32_t getStackSize() const {
+ uint64_t getStackSize() const {
return read<uint64_t>(P + sizeof(uint64_t));
}
+
+ /// Get the number of callsite records.
+ uint64_t getRecordCount() const {
+ return read<uint64_t>(P + (2 * sizeof(uint64_t)));
+ }
private:
FunctionAccessor(const uint8_t *P) : P(P) {}
- const static int FunctionAccessorSize = 2 * sizeof(uint64_t);
+ const static int FunctionAccessorSize = 3 * sizeof(uint64_t);
FunctionAccessor next() const {
return FunctionAccessor(P + FunctionAccessorSize);
@@ -74,7 +79,7 @@ public:
/// Accessor for constants.
class ConstantAccessor {
- friend class StackMapV1Parser;
+ friend class StackMapV2Parser;
public:
/// Return the value of this constant.
@@ -103,7 +108,7 @@ public:
/// Accessor for location records.
class LocationAccessor {
- friend class StackMapV1Parser;
+ friend class StackMapV2Parser;
friend class RecordAccessor;
public:
@@ -156,7 +161,7 @@ public:
/// Accessor for stackmap live-out fields.
class LiveOutAccessor {
- friend class StackMapV1Parser;
+ friend class StackMapV2Parser;
friend class RecordAccessor;
public:
@@ -188,7 +193,7 @@ public:
/// Accessor for stackmap records.
class RecordAccessor {
- friend class StackMapV1Parser;
+ friend class StackMapV2Parser;
public:
typedef AccessorIterator<LocationAccessor> location_iterator;
@@ -292,14 +297,14 @@ public:
const uint8_t *P;
};
- /// Construct a parser for a version-1 stackmap. StackMap data will be read
+ /// Construct a parser for a version-2 stackmap. StackMap data will be read
/// from the given array.
- StackMapV1Parser(ArrayRef<uint8_t> StackMapSection)
+ StackMapV2Parser(ArrayRef<uint8_t> StackMapSection)
: StackMapSection(StackMapSection) {
ConstantsListOffset = FunctionListOffset + getNumFunctions() * FunctionSize;
- assert(StackMapSection[0] == 1 &&
- "StackMapV1Parser can only parse version 1 stackmaps");
+ assert(StackMapSection[0] == 2 &&
+ "StackMapV2Parser can only parse version 2 stackmaps");
unsigned CurrentRecordOffset =
ConstantsListOffset + getNumConstants() * ConstantSize;
@@ -315,8 +320,8 @@ public:
typedef AccessorIterator<ConstantAccessor> constant_iterator;
typedef AccessorIterator<RecordAccessor> record_iterator;
- /// Get the version number of this stackmap. (Always returns 1).
- unsigned getVersion() const { return 1; }
+ /// Get the version number of this stackmap. (Always returns 2).
+ unsigned getVersion() const { return 2; }
/// Get the number of functions in the stack map.
uint32_t getNumFunctions() const {
@@ -420,7 +425,7 @@ private:
static const unsigned NumRecordsOffset = NumConstantsOffset + sizeof(uint32_t);
static const unsigned FunctionListOffset = NumRecordsOffset + sizeof(uint32_t);
- static const unsigned FunctionSize = 2 * sizeof(uint64_t);
+ static const unsigned FunctionSize = 3 * sizeof(uint64_t);
static const unsigned ConstantSize = sizeof(uint64_t);
std::size_t getFunctionOffset(unsigned FunctionIndex) const {
diff --git a/include/llvm/Object/SymbolSize.h b/include/llvm/Object/SymbolSize.h
index f2ce70f4208d..1a1dc8752943 100644
--- a/include/llvm/Object/SymbolSize.h
+++ b/include/llvm/Object/SymbolSize.h
@@ -15,8 +15,19 @@
namespace llvm {
namespace object {
+
+struct SymEntry {
+ symbol_iterator I;
+ uint64_t Address;
+ unsigned Number;
+ unsigned SectionID;
+};
+
+int compareAddress(const SymEntry *A, const SymEntry *B);
+
std::vector<std::pair<SymbolRef, uint64_t>>
computeSymbolSizes(const ObjectFile &O);
+
}
} // namespace llvm
diff --git a/include/llvm/Object/SymbolicFile.h b/include/llvm/Object/SymbolicFile.h
index 894c2670f265..af62e62c51d8 100644
--- a/include/llvm/Object/SymbolicFile.h
+++ b/include/llvm/Object/SymbolicFile.h
@@ -88,7 +88,6 @@ class BasicSymbolRef {
const SymbolicFile *OwningObject;
public:
- // FIXME: should we add a SF_Text?
enum Flags : unsigned {
SF_None = 0,
SF_Undefined = 1U << 0, // Symbol is defined in another object file
@@ -103,6 +102,8 @@ public:
SF_Thumb = 1U << 8, // Thumb symbol in a 32-bit ARM binary
SF_Hidden = 1U << 9, // Symbol has hidden visibility
SF_Const = 1U << 10, // Symbol value is constant
+ SF_Executable = 1U << 11, // Symbol points to an executable section
+ // (IR only)
};
BasicSymbolRef() : OwningObject(nullptr) { }
@@ -137,17 +138,11 @@ public:
virtual uint32_t getSymbolFlags(DataRefImpl Symb) const = 0;
- virtual basic_symbol_iterator symbol_begin_impl() const = 0;
+ virtual basic_symbol_iterator symbol_begin() const = 0;
- virtual basic_symbol_iterator symbol_end_impl() const = 0;
+ virtual basic_symbol_iterator symbol_end() const = 0;
// convenience wrappers.
- basic_symbol_iterator symbol_begin() const {
- return symbol_begin_impl();
- }
- basic_symbol_iterator symbol_end() const {
- return symbol_end_impl();
- }
typedef iterator_range<basic_symbol_iterator> basic_symbol_iterator_range;
basic_symbol_iterator_range symbols() const {
return basic_symbol_iterator_range(symbol_begin(), symbol_end());
diff --git a/include/llvm/Object/Wasm.h b/include/llvm/Object/Wasm.h
new file mode 100644
index 000000000000..2ece6a6c3770
--- /dev/null
+++ b/include/llvm/Object/Wasm.h
@@ -0,0 +1,99 @@
+//===- WasmObjectFile.h - Wasm object file implementation -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the WasmObjectFile class, which implements the ObjectFile
+// interface for Wasm files.
+//
+// See: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_WASM_H
+#define LLVM_OBJECT_WASM_H
+
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Wasm.h"
+
+namespace llvm {
+namespace object {
+
+class WasmObjectFile : public ObjectFile {
+public:
+ WasmObjectFile(MemoryBufferRef Object, Error &Err);
+ const wasm::WasmObjectHeader &getHeader() const;
+ const wasm::WasmSection *getWasmSection(const SectionRef &Section) const;
+ static bool classof(const Binary *v) { return v->isWasm(); }
+
+protected:
+ void moveSymbolNext(DataRefImpl &Symb) const override;
+
+ std::error_code printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const override;
+
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+
+ basic_symbol_iterator symbol_begin() const override;
+
+ basic_symbol_iterator symbol_end() const override;
+ Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
+
+ Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+ uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+ uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+ uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+ Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
+ Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+
+ // Overrides from SectionRef.
+ void moveSectionNext(DataRefImpl &Sec) const override;
+ std::error_code getSectionName(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionSize(DataRefImpl Sec) const override;
+ std::error_code getSectionContents(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+ bool isSectionCompressed(DataRefImpl Sec) const override;
+ bool isSectionText(DataRefImpl Sec) const override;
+ bool isSectionData(DataRefImpl Sec) const override;
+ bool isSectionBSS(DataRefImpl Sec) const override;
+ bool isSectionVirtual(DataRefImpl Sec) const override;
+ bool isSectionBitcode(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+ section_iterator getRelocatedSection(DataRefImpl Sec) const override;
+
+ // Overrides from RelocationRef.
+ void moveRelocationNext(DataRefImpl &Rel) const override;
+ uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+ symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+ uint64_t getRelocationType(DataRefImpl Rel) const override;
+ void getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const override;
+
+ section_iterator section_begin() const override;
+ section_iterator section_end() const override;
+ uint8_t getBytesInAddress() const override;
+ StringRef getFileFormatName() const override;
+ unsigned getArch() const override;
+ SubtargetFeatures getFeatures() const override;
+ bool isRelocatableObject() const override;
+
+private:
+ const uint8_t *getPtr(size_t Offset) const;
+ Error parseUserSection(wasm::WasmSection &Sec, const uint8_t *Ptr,
+ size_t Length);
+
+ wasm::WasmObjectHeader Header;
+ std::vector<wasm::WasmSection> Sections;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/ObjectYAML/DWARFYAML.h b/include/llvm/ObjectYAML/DWARFYAML.h
new file mode 100644
index 000000000000..222cad61a992
--- /dev/null
+++ b/include/llvm/ObjectYAML/DWARFYAML.h
@@ -0,0 +1,203 @@
+//===- DWARFYAML.h - DWARF YAMLIO implementation ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares classes for handling the YAML representation
+/// of DWARF Debug Info.
+///
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_OBJECTYAML_DWARFYAML_H
+#define LLVM_OBJECTYAML_DWARFYAML_H
+
+#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/Support/Dwarf.h"
+
+namespace llvm {
+namespace DWARFYAML {
+
+struct AttributeAbbrev {
+ llvm::dwarf::Attribute Attribute;
+ llvm::dwarf::Form Form;
+};
+
+struct Abbrev {
+ llvm::yaml::Hex32 Code;
+ llvm::dwarf::Tag Tag;
+ llvm::dwarf::Constants Children;
+ std::vector<AttributeAbbrev> Attributes;
+};
+
+struct ARangeDescriptor {
+ llvm::yaml::Hex64 Address;
+ uint64_t Length;
+};
+
+struct ARange {
+ uint32_t Length;
+ uint16_t Version;
+ uint32_t CuOffset;
+ uint8_t AddrSize;
+ uint8_t SegSize;
+ std::vector<ARangeDescriptor> Descriptors;
+};
+
+struct PubEntry {
+ llvm::yaml::Hex32 DieOffset;
+ llvm::yaml::Hex8 Descriptor;
+ StringRef Name;
+};
+
+struct PubSection {
+ PubSection() : IsGNUStyle(false) {}
+
+ uint32_t Length;
+ uint16_t Version;
+ uint32_t UnitOffset;
+ uint32_t UnitSize;
+ bool IsGNUStyle;
+ std::vector<PubEntry> Entries;
+};
+
+struct FormValue {
+ llvm::yaml::Hex64 Value;
+ StringRef CStr;
+ std::vector<llvm::yaml::Hex8> BlockData;
+};
+
+struct Entry {
+ llvm::yaml::Hex32 AbbrCode;
+ std::vector<FormValue> Values;
+};
+
+struct Unit {
+ uint32_t Length;
+ uint16_t Version;
+ uint32_t AbbrOffset;
+ uint8_t AddrSize;
+ std::vector<Entry> Entries;
+};
+
+struct Data {
+ bool IsLittleEndian;
+ std::vector<Abbrev> AbbrevDecls;
+ std::vector<StringRef> DebugStrings;
+ std::vector<ARange> ARanges;
+ PubSection PubNames;
+ PubSection PubTypes;
+
+ PubSection GNUPubNames;
+ PubSection GNUPubTypes;
+
+ std::vector<Unit> CompileUnits;
+
+ bool isEmpty() const;
+};
+
+} // namespace llvm::DWARFYAML
+} // namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(uint8_t)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::StringRef)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex8)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::AttributeAbbrev)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Abbrev)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::ARangeDescriptor)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::ARange)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::PubEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Unit)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::FormValue)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Entry)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<DWARFYAML::Data> {
+ static void mapping(IO &IO, DWARFYAML::Data &DWARF);
+};
+
+template <> struct MappingTraits<DWARFYAML::Abbrev> {
+ static void mapping(IO &IO, DWARFYAML::Abbrev &Abbrev);
+};
+
+template <> struct MappingTraits<DWARFYAML::AttributeAbbrev> {
+ static void mapping(IO &IO, DWARFYAML::AttributeAbbrev &AttAbbrev);
+};
+
+template <> struct MappingTraits<DWARFYAML::ARangeDescriptor> {
+ static void mapping(IO &IO, DWARFYAML::ARangeDescriptor &Descriptor);
+};
+
+template <> struct MappingTraits<DWARFYAML::ARange> {
+ static void mapping(IO &IO, DWARFYAML::ARange &Range);
+};
+
+template <> struct MappingTraits<DWARFYAML::PubEntry> {
+ static void mapping(IO &IO, DWARFYAML::PubEntry &Entry);
+};
+
+template <> struct MappingTraits<DWARFYAML::PubSection> {
+ static void mapping(IO &IO, DWARFYAML::PubSection &Section);
+};
+
+template <> struct MappingTraits<DWARFYAML::Unit> {
+ static void mapping(IO &IO, DWARFYAML::Unit &Unit);
+};
+
+template <> struct MappingTraits<DWARFYAML::Entry> {
+ static void mapping(IO &IO, DWARFYAML::Entry &Entry);
+};
+
+template <> struct MappingTraits<DWARFYAML::FormValue> {
+ static void mapping(IO &IO, DWARFYAML::FormValue &FormValue);
+};
+
+#define HANDLE_DW_TAG(unused, name) \
+ io.enumCase(value, "DW_TAG_" #name, dwarf::DW_TAG_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::Tag> {
+ static void enumeration(IO &io, dwarf::Tag &value) {
+#include "llvm/Support/Dwarf.def"
+ io.enumFallback<Hex16>(value);
+ }
+};
+
+#define HANDLE_DW_AT(unused, name) \
+ io.enumCase(value, "DW_AT_" #name, dwarf::DW_AT_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::Attribute> {
+ static void enumeration(IO &io, dwarf::Attribute &value) {
+#include "llvm/Support/Dwarf.def"
+ io.enumFallback<Hex16>(value);
+ }
+};
+
+#define HANDLE_DW_FORM(unused, name) \
+ io.enumCase(value, "DW_FORM_" #name, dwarf::DW_FORM_##name);
+
+template <> struct ScalarEnumerationTraits<dwarf::Form> {
+ static void enumeration(IO &io, dwarf::Form &value) {
+#include "llvm/Support/Dwarf.def"
+ io.enumFallback<Hex16>(value);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<dwarf::Constants> {
+ static void enumeration(IO &io, dwarf::Constants &value) {
+ io.enumCase(value, "DW_CHILDREN_no", dwarf::DW_CHILDREN_no);
+ io.enumCase(value, "DW_CHILDREN_yes", dwarf::DW_CHILDREN_yes);
+ io.enumFallback<Hex16>(value);
+ }
+};
+
+} // namespace llvm::yaml
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/ObjectYAML/MachOYAML.h b/include/llvm/ObjectYAML/MachOYAML.h
index bb15e64789d0..6b7a924f5143 100644
--- a/include/llvm/ObjectYAML/MachOYAML.h
+++ b/include/llvm/ObjectYAML/MachOYAML.h
@@ -17,6 +17,7 @@
#define LLVM_OBJECTYAML_MACHOYAML_H
#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/ObjectYAML/DWARFYAML.h"
#include "llvm/Support/MachO.h"
namespace llvm {
@@ -59,7 +60,7 @@ struct LoadCommand {
struct NListEntry {
uint32_t n_strx;
- uint8_t n_type;
+ llvm::yaml::Hex8 n_type;
uint8_t n_sect;
uint16_t n_desc;
uint64_t n_value;
@@ -100,13 +101,17 @@ struct LinkEditData {
MachOYAML::ExportEntry ExportTrie;
std::vector<NListEntry> NameList;
std::vector<StringRef> StringTable;
+
+ bool isEmpty() const;
};
struct Object {
+ bool IsLittleEndian;
FileHeader Header;
std::vector<LoadCommand> LoadCommands;
std::vector<Section> Sections;
LinkEditData LinkEdit;
+ DWARFYAML::Data DWARF;
};
struct FatHeader {
@@ -134,14 +139,12 @@ struct UniversalBinary {
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::LoadCommand)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Section)
-LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex8)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex64)
LLVM_YAML_IS_SEQUENCE_VECTOR(int64_t)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::RebaseOpcode)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::BindOpcode)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::ExportEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::NListEntry)
-LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::StringRef)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Object)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::FatArch)
diff --git a/include/llvm/Option/ArgList.h b/include/llvm/Option/ArgList.h
index 89771b5c3cf1..53cb0d8dec4d 100644
--- a/include/llvm/Option/ArgList.h
+++ b/include/llvm/Option/ArgList.h
@@ -259,6 +259,10 @@ public:
void AddLastArg(ArgStringList &Output, OptSpecifier Id0,
OptSpecifier Id1) const;
+ /// AddAllArgsExcept - Render all arguments matching any of the given ids
+ /// and not matching any of the excluded ids.
+ void AddAllArgsExcept(ArgStringList &Output, ArrayRef<OptSpecifier> Ids,
+ ArrayRef<OptSpecifier> ExcludeIds) const;
/// AddAllArgs - Render all arguments matching any of the given ids.
void AddAllArgs(ArgStringList &Output, ArrayRef<OptSpecifier> Ids) const;
diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h
index 2a21b82876bb..e9c8ca3072c7 100644
--- a/include/llvm/Pass.h
+++ b/include/llvm/Pass.h
@@ -97,7 +97,7 @@ public:
/// implemented in terms of the name that is registered by one of the
/// Registration templates, but can be overloaded directly.
///
- virtual const char *getPassName() const;
+ virtual StringRef getPassName() const;
/// getPassID - Return the PassID number that corresponds to this pass.
AnalysisID getPassID() const {
diff --git a/include/llvm/PassInfo.h b/include/llvm/PassInfo.h
index cee4ade323e4..21ade85b682f 100644
--- a/include/llvm/PassInfo.h
+++ b/include/llvm/PassInfo.h
@@ -13,6 +13,8 @@
#ifndef LLVM_PASSINFO_H
#define LLVM_PASSINFO_H
+#include "llvm/ADT/StringRef.h"
+
#include <cassert>
#include <vector>
@@ -33,8 +35,8 @@ public:
typedef Pass *(*TargetMachineCtor_t)(TargetMachine *);
private:
- const char *const PassName; // Nice name for Pass
- const char *const PassArgument; // Command Line argument to run this pass
+ StringRef PassName; // Nice name for Pass
+ StringRef PassArgument; // Command Line argument to run this pass
const void *PassID;
const bool IsCFGOnlyPass; // Pass only looks at the CFG.
const bool IsAnalysis; // True if an analysis pass.
@@ -47,8 +49,8 @@ private:
public:
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass.
- PassInfo(const char *name, const char *arg, const void *pi,
- NormalCtor_t normal, bool isCFGOnly, bool is_analysis,
+ PassInfo(StringRef name, StringRef arg, const void *pi, NormalCtor_t normal,
+ bool isCFGOnly, bool is_analysis,
TargetMachineCtor_t machine = nullptr)
: PassName(name), PassArgument(arg), PassID(pi), IsCFGOnlyPass(isCFGOnly),
IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal),
@@ -56,20 +58,20 @@ public:
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass. This version is for use by analysis groups; it
/// does not auto-register the pass.
- PassInfo(const char *name, const void *pi)
+ PassInfo(StringRef name, const void *pi)
: PassName(name), PassArgument(""), PassID(pi), IsCFGOnlyPass(false),
IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(nullptr),
TargetMachineCtor(nullptr) {}
/// getPassName - Return the friendly name for the pass, never returns null
///
- const char *getPassName() const { return PassName; }
+ StringRef getPassName() const { return PassName; }
/// getPassArgument - Return the command line option that may be passed to
/// 'opt' that will cause this pass to be run. This will return null if there
/// is no argument.
///
- const char *getPassArgument() const { return PassArgument; }
+ StringRef getPassArgument() const { return PassArgument; }
/// getTypeInfo - Return the id object for the pass...
/// TODO : Rename
diff --git a/include/llvm/PassSupport.h b/include/llvm/PassSupport.h
index ba6d84f04ba0..e77a0b9882b2 100644
--- a/include/llvm/PassSupport.h
+++ b/include/llvm/PassSupport.h
@@ -101,7 +101,7 @@ template <typename PassName> Pass *callTargetMachineCtor(TargetMachine *TM) {
///
template <typename passName> struct RegisterPass : public PassInfo {
// Register Pass using default constructor...
- RegisterPass(const char *PassArg, const char *Name, bool CFGOnly = false,
+ RegisterPass(StringRef PassArg, StringRef Name, bool CFGOnly = false,
bool is_analysis = false)
: PassInfo(Name, PassArg, &passName::ID,
PassInfo::NormalCtor_t(callDefaultCtor<passName>), CFGOnly,
@@ -131,7 +131,7 @@ template <typename passName> struct RegisterPass : public PassInfo {
///
class RegisterAGBase : public PassInfo {
public:
- RegisterAGBase(const char *Name, const void *InterfaceID,
+ RegisterAGBase(StringRef Name, const void *InterfaceID,
const void *PassID = nullptr, bool isDefault = false);
};
diff --git a/include/llvm/Passes/PassBuilder.h b/include/llvm/Passes/PassBuilder.h
index 9f0a9c6e1380..ba3238c86044 100644
--- a/include/llvm/Passes/PassBuilder.h
+++ b/include/llvm/Passes/PassBuilder.h
@@ -16,9 +16,11 @@
#ifndef LLVM_PASSES_PASSBUILDER_H
#define LLVM_PASSES_PASSBUILDER_H
+#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
+#include <vector>
namespace llvm {
class StringRef;
@@ -163,35 +165,68 @@ public:
/// additional analyses.
void registerLoopAnalyses(LoopAnalysisManager &LAM);
- /// \brief Add a per-module default optimization pipeline to a pass manager.
+ /// Construct the core LLVM function canonicalization and simplification
+ /// pipeline.
+ ///
+ /// This is a long pipeline and uses most of the per-function optimization
+ /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
+ /// repeatedly over the IR and is not expected to destroy important
+ /// information about the semantics of the IR.
+ ///
+ /// Note that \p Level cannot be `O0` here. The pipelines produced are
+ /// only intended for use when attempting to optimize code. If frontends
+ /// require some transformations for semantic reasons, they should explicitly
+ /// build them.
+ FunctionPassManager
+ buildFunctionSimplificationPipeline(OptimizationLevel Level,
+ bool DebugLogging = false);
+
+ /// Build a per-module default optimization pipeline.
///
/// This provides a good default optimization pipeline for per-module
/// optimization and code generation without any link-time optimization. It
/// typically correspond to frontend "-O[123]" options for optimization
/// levels \c O1, \c O2 and \c O3 resp.
- void addPerModuleDefaultPipeline(ModulePassManager &MPM,
- OptimizationLevel Level,
- bool DebugLogging = false);
+ ///
+ /// Note that \p Level cannot be `O0` here. The pipelines produced are
+ /// only intended for use when attempting to optimize code. If frontends
+ /// require some transformations for semantic reasons, they should explicitly
+ /// build them.
+ ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
+ bool DebugLogging = false);
- /// \brief Add a pre-link, LTO-targeting default optimization pipeline to
- /// a pass manager.
+ /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
+ /// manager.
///
/// This adds the pre-link optimizations tuned to work well with a later LTO
/// run. It works to minimize the IR which needs to be analyzed without
/// making irreversible decisions which could be made better during the LTO
/// run.
- void addLTOPreLinkDefaultPipeline(ModulePassManager &MPM,
- OptimizationLevel Level,
- bool DebugLogging = false);
+ ///
+ /// Note that \p Level cannot be `O0` here. The pipelines produced are
+ /// only intended for use when attempting to optimize code. If frontends
+ /// require some transformations for semantic reasons, they should explicitly
+ /// build them.
+ ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
+ bool DebugLogging = false);
- /// \brief Add an LTO default optimization pipeline to a pass manager.
+ /// Build an LTO default optimization pipeline to a pass manager.
///
/// This provides a good default optimization pipeline for link-time
/// optimization and code generation. It is particularly tuned to fit well
/// when IR coming into the LTO phase was first run through \c
/// addPreLinkLTODefaultPipeline, and the two coordinate closely.
- void addLTODefaultPipeline(ModulePassManager &MPM, OptimizationLevel Level,
- bool DebugLogging = false);
+ ///
+ /// Note that \p Level cannot be `O0` here. The pipelines produced are
+ /// only intended for use when attempting to optimize code. If frontends
+ /// require some transformations for semantic reasons, they should explicitly
+ /// build them.
+ ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
+ bool DebugLogging = false);
+
+ /// Build the default `AAManager` with the default alias analysis pipeline
+ /// registered.
+ AAManager buildDefaultAAPipeline();
/// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
///
@@ -242,20 +277,36 @@ public:
bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
private:
- bool parseModulePassName(ModulePassManager &MPM, StringRef Name,
- bool DebugLogging);
- bool parseCGSCCPassName(CGSCCPassManager &CGPM, StringRef Name);
- bool parseFunctionPassName(FunctionPassManager &FPM, StringRef Name);
- bool parseLoopPassName(LoopPassManager &LPM, StringRef Name);
+ /// A struct to capture parsed pass pipeline names.
+ struct PipelineElement {
+ StringRef Name;
+ std::vector<PipelineElement> InnerPipeline;
+ };
+
+ static Optional<std::vector<PipelineElement>>
+ parsePipelineText(StringRef Text);
+
+ bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
+ bool VerifyEachPass, bool DebugLogging);
+ bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
+ bool VerifyEachPass, bool DebugLogging);
+ bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
+ bool VerifyEachPass, bool DebugLogging);
+ bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
+ bool VerifyEachPass, bool DebugLogging);
bool parseAAPassName(AAManager &AA, StringRef Name);
- bool parseLoopPassPipeline(LoopPassManager &LPM, StringRef &PipelineText,
+
+ bool parseLoopPassPipeline(LoopPassManager &LPM,
+ ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
bool parseFunctionPassPipeline(FunctionPassManager &FPM,
- StringRef &PipelineText, bool VerifyEachPass,
- bool DebugLogging);
- bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM, StringRef &PipelineText,
+ ArrayRef<PipelineElement> Pipeline,
+ bool VerifyEachPass, bool DebugLogging);
+ bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
+ ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
- bool parseModulePassPipeline(ModulePassManager &MPM, StringRef &PipelineText,
+ bool parseModulePassPipeline(ModulePassManager &MPM,
+ ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
};
}
diff --git a/include/llvm/ProfileData/Coverage/CoverageMapping.h b/include/llvm/ProfileData/Coverage/CoverageMapping.h
index 6afde56122f0..d6051ffb3f8d 100644
--- a/include/llvm/ProfileData/Coverage/CoverageMapping.h
+++ b/include/llvm/ProfileData/Coverage/CoverageMapping.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ProfileData/InstrProf.h"
@@ -75,6 +76,7 @@ class IndexedInstrProfReader;
namespace coverage {
class CoverageMappingReader;
+struct CoverageMappingRecord;
class CoverageMapping;
struct CounterExpressions;
@@ -244,22 +246,6 @@ struct CounterMappingRegion {
inline std::pair<unsigned, unsigned> endLoc() const {
return std::pair<unsigned, unsigned>(LineEnd, ColumnEnd);
}
-
- bool operator<(const CounterMappingRegion &Other) const {
- if (FileID != Other.FileID)
- return FileID < Other.FileID;
- return startLoc() < Other.startLoc();
- }
-
- bool contains(const CounterMappingRegion &Other) const {
- if (FileID != Other.FileID)
- return false;
- if (startLoc() > Other.startLoc())
- return false;
- if (endLoc() < Other.endLoc())
- return false;
- return true;
- }
};
/// \brief Associates a source range with an execution count.
@@ -305,6 +291,9 @@ struct FunctionRecord {
FunctionRecord(StringRef Name, ArrayRef<StringRef> Filenames)
: Name(Name), Filenames(Filenames.begin(), Filenames.end()) {}
+ FunctionRecord(FunctionRecord &&FR) = default;
+ FunctionRecord &operator=(FunctionRecord &&) = default;
+
void pushRegion(CounterMappingRegion Region, uint64_t Count) {
if (CountedRegions.empty())
ExecutionCount = Count;
@@ -411,19 +400,19 @@ public:
CoverageData(StringRef Filename) : Filename(Filename) {}
- CoverageData(CoverageData &&RHS)
- : Filename(std::move(RHS.Filename)), Segments(std::move(RHS.Segments)),
- Expansions(std::move(RHS.Expansions)) {}
-
/// \brief Get the name of the file this data covers.
StringRef getFilename() const { return Filename; }
- std::vector<CoverageSegment>::iterator begin() { return Segments.begin(); }
- std::vector<CoverageSegment>::iterator end() { return Segments.end(); }
- bool empty() { return Segments.empty(); }
+ std::vector<CoverageSegment>::const_iterator begin() const {
+ return Segments.begin();
+ }
+ std::vector<CoverageSegment>::const_iterator end() const {
+ return Segments.end();
+ }
+ bool empty() const { return Segments.empty(); }
/// \brief Expansions that can be further processed.
- ArrayRef<ExpansionRecord> getExpansions() { return Expansions; }
+ ArrayRef<ExpansionRecord> getExpansions() const { return Expansions; }
};
/// \brief The mapping of profile information to coverage data.
@@ -431,20 +420,38 @@ public:
/// This is the main interface to get coverage information, using a profile to
/// fill out execution counts.
class CoverageMapping {
+ StringSet<> FunctionNames;
std::vector<FunctionRecord> Functions;
unsigned MismatchedFunctionCount;
CoverageMapping() : MismatchedFunctionCount(0) {}
+ CoverageMapping(const CoverageMapping &) = delete;
+ const CoverageMapping &operator=(const CoverageMapping &) = delete;
+
+ /// \brief Add a function record corresponding to \p Record.
+ Error loadFunctionRecord(const CoverageMappingRecord &Record,
+ IndexedInstrProfReader &ProfileReader);
+
public:
/// \brief Load the coverage mapping using the given readers.
static Expected<std::unique_ptr<CoverageMapping>>
load(CoverageMappingReader &CoverageReader,
IndexedInstrProfReader &ProfileReader);
+ static Expected<std::unique_ptr<CoverageMapping>>
+ load(ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders,
+ IndexedInstrProfReader &ProfileReader);
+
/// \brief Load the coverage mapping from the given files.
static Expected<std::unique_ptr<CoverageMapping>>
load(StringRef ObjectFilename, StringRef ProfileFilename,
+ StringRef Arch = StringRef()) {
+ return load(ArrayRef<StringRef>(ObjectFilename), ProfileFilename, Arch);
+ }
+
+ static Expected<std::unique_ptr<CoverageMapping>>
+ load(ArrayRef<StringRef> ObjectFilenames, StringRef ProfileFilename,
StringRef Arch = StringRef());
/// \brief The number of functions that couldn't have their profiles mapped.
@@ -453,7 +460,8 @@ public:
/// can't be associated with any coverage information.
unsigned getMismatchedCount() { return MismatchedFunctionCount; }
- /// \brief Returns the list of files that are covered.
+ /// \brief Returns a lexicographically sorted, unique list of files that are
+ /// covered.
std::vector<StringRef> getUniqueSourceFiles() const;
/// \brief Get the coverage for a particular file.
diff --git a/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h b/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h
index 10269cc50f35..24fb94647247 100644
--- a/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h
+++ b/include/llvm/ProfileData/Coverage/CoverageMappingWriter.h
@@ -49,10 +49,6 @@ public:
: VirtualFileMapping(VirtualFileMapping), Expressions(Expressions),
MappingRegions(MappingRegions) {}
- CoverageMappingWriter(ArrayRef<CounterExpression> Expressions,
- MutableArrayRef<CounterMappingRegion> MappingRegions)
- : Expressions(Expressions), MappingRegions(MappingRegions) {}
-
/// \brief Write encoded coverage mapping data to the given output stream.
void write(raw_ostream &OS);
};
diff --git a/include/llvm/ProfileData/InstrProf.h b/include/llvm/ProfileData/InstrProf.h
index 75646b761659..094f3af005d3 100644
--- a/include/llvm/ProfileData/InstrProf.h
+++ b/include/llvm/ProfileData/InstrProf.h
@@ -55,6 +55,7 @@ inline StringRef getInstrProfNameSectionName(bool AddSegment) {
/// data.
inline StringRef getInstrProfDataSectionName(bool AddSegment) {
return AddSegment ? "__DATA," INSTR_PROF_DATA_SECT_NAME_STR
+ ",regular,live_support"
: INSTR_PROF_DATA_SECT_NAME_STR;
}
@@ -81,7 +82,7 @@ inline StringRef getInstrProfValueProfFuncName() {
/// Return the name of the section containing function coverage mapping
/// data.
inline StringRef getInstrProfCoverageSectionName(bool AddSegment) {
- return AddSegment ? "__DATA," INSTR_PROF_COVMAP_SECT_NAME_STR
+ return AddSegment ? "__LLVM_COV," INSTR_PROF_COVMAP_SECT_NAME_STR
: INSTR_PROF_COVMAP_SECT_NAME_STR;
}
@@ -155,7 +156,7 @@ inline StringRef getInstrProfInitFuncName() { return "__llvm_profile_init"; }
/// A reference to the variable causes the linker to link in the runtime
/// initialization module (which defines the hook variable).
inline StringRef getInstrProfRuntimeHookVarName() {
- return "__llvm_profile_runtime";
+ return INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_RUNTIME_VAR);
}
/// Return the name of the compiler generated function that references the
@@ -164,12 +165,6 @@ inline StringRef getInstrProfRuntimeHookVarUseFuncName() {
return "__llvm_profile_runtime_user";
}
-/// Return the name of the profile runtime interface that overrides the default
-/// profile data file name.
-inline StringRef getInstrProfFileOverriderFuncName() {
- return "__llvm_profile_override_default_filename";
-}
-
/// Return the marker used to separate PGO names during serialization.
inline StringRef getInstrProfNameSeparator() { return "\01"; }
@@ -231,7 +226,7 @@ Error collectPGOFuncNameStrings(const std::vector<GlobalVariable *> &NameVars,
std::string &Result, bool doCompression = true);
class InstrProfSymtab;
/// \c NameStrings is a string composed of one of more sub-strings encoded in
-/// the format described above. The substrings are seperated by 0 or more zero
+/// the format described above. The substrings are separated by 0 or more zero
/// bytes. This method decodes the string and populates the \c Symtab.
Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab);
@@ -274,6 +269,10 @@ MDNode *getPGOFuncNameMetadata(const Function &F);
/// declared by users only.
void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName);
+/// Check if we can use Comdat for profile variables. This will eliminate
+/// the duplicated profile variables for Comdat functions.
+bool needsComdatForCounter(const Function &F, const Module &M);
+
const std::error_category &instrprof_category();
enum class instrprof_error {
@@ -293,7 +292,8 @@ enum class instrprof_error {
counter_overflow,
value_site_count_mismatch,
compress_failed,
- uncompress_failed
+ uncompress_failed,
+ empty_raw_profile
};
inline std::error_code make_error_code(instrprof_error E) {
diff --git a/include/llvm/ProfileData/InstrProfData.inc b/include/llvm/ProfileData/InstrProfData.inc
index 4138e18fa22f..f7c22d10763c 100644
--- a/include/llvm/ProfileData/InstrProfData.inc
+++ b/include/llvm/ProfileData/InstrProfData.inc
@@ -603,7 +603,12 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define IR_LEVEL_PROF_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
+
+/* The variable that holds the name of the profile data
+ * specified via command line. */
+#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
/* Runtime section names and name strings. */
#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data
diff --git a/include/llvm/ProfileData/InstrProfWriter.h b/include/llvm/ProfileData/InstrProfWriter.h
index 7d292731cccb..f7780fb45004 100644
--- a/include/llvm/ProfileData/InstrProfWriter.h
+++ b/include/llvm/ProfileData/InstrProfWriter.h
@@ -47,6 +47,8 @@ public:
/// for this function and the hash and number of counts match, each counter is
/// summed. Optionally scale counts by \p Weight.
Error addRecord(InstrProfRecord &&I, uint64_t Weight = 1);
+ /// Merge existing function counts from the given writer.
+ Error mergeRecordsFromWriter(InstrProfWriter &&IPW);
/// Write the profile to \c OS
void write(raw_fd_ostream &OS);
/// Write the profile in text format to \c OS
diff --git a/include/llvm/ProfileData/ProfileCommon.h b/include/llvm/ProfileData/ProfileCommon.h
index ecb228ca59c4..e955755e5c9a 100644
--- a/include/llvm/ProfileData/ProfileCommon.h
+++ b/include/llvm/ProfileData/ProfileCommon.h
@@ -45,22 +45,21 @@ inline const char *getUnlikelySectionPrefix() { return ".unlikely"; }
class ProfileSummaryBuilder {
private:
- // We keep track of the number of times a count (block count or samples)
- // appears in the profile. The map is kept sorted in the descending order of
- // counts.
+ /// We keep track of the number of times a count (block count or samples)
+ /// appears in the profile. The map is kept sorted in the descending order of
+ /// counts.
std::map<uint64_t, uint32_t, std::greater<uint64_t>> CountFrequencies;
std::vector<uint32_t> DetailedSummaryCutoffs;
protected:
SummaryEntryVector DetailedSummary;
ProfileSummaryBuilder(std::vector<uint32_t> Cutoffs)
- : DetailedSummaryCutoffs(std::move(Cutoffs)), TotalCount(0), MaxCount(0),
- MaxFunctionCount(0), NumCounts(0), NumFunctions(0) {}
+ : DetailedSummaryCutoffs(std::move(Cutoffs)) {}
inline void addCount(uint64_t Count);
~ProfileSummaryBuilder() = default;
void computeDetailedSummary();
- uint64_t TotalCount, MaxCount, MaxFunctionCount;
- uint32_t NumCounts, NumFunctions;
+ uint64_t TotalCount = 0, MaxCount = 0, MaxFunctionCount = 0;
+ uint32_t NumCounts = 0, NumFunctions = 0;
public:
/// \brief A vector of useful cutoff values for detailed summary.
@@ -68,13 +67,13 @@ public:
};
class InstrProfSummaryBuilder final : public ProfileSummaryBuilder {
- uint64_t MaxInternalBlockCount;
+ uint64_t MaxInternalBlockCount = 0;
inline void addEntryCount(uint64_t Count);
inline void addInternalCount(uint64_t Count);
public:
InstrProfSummaryBuilder(std::vector<uint32_t> Cutoffs)
- : ProfileSummaryBuilder(std::move(Cutoffs)), MaxInternalBlockCount(0) {}
+ : ProfileSummaryBuilder(std::move(Cutoffs)) {}
void addRecord(const InstrProfRecord &);
std::unique_ptr<ProfileSummary> getSummary();
};
@@ -88,7 +87,7 @@ public:
std::unique_ptr<ProfileSummary> getSummary();
};
-// This is called when a count is seen in the profile.
+/// This is called when a count is seen in the profile.
void ProfileSummaryBuilder::addCount(uint64_t Count) {
TotalCount += Count;
if (Count > MaxCount)
diff --git a/include/llvm/ProfileData/SampleProf.h b/include/llvm/ProfileData/SampleProf.h
index 9fefefa627b1..a96f83620f8b 100644
--- a/include/llvm/ProfileData/SampleProf.h
+++ b/include/llvm/ProfileData/SampleProf.h
@@ -222,6 +222,21 @@ public:
return ret->second.getSamples();
}
+ /// Return the total number of call target samples collected at a given
+ /// location. Each location is specified by \p LineOffset and
+ /// \p Discriminator. If the location is not found in profile, return error.
+ ErrorOr<uint64_t> findCallSamplesAt(uint32_t LineOffset,
+ uint32_t Discriminator) const {
+ const auto &ret = BodySamples.find(LineLocation(LineOffset, Discriminator));
+ if (ret == BodySamples.end())
+ return std::error_code();
+ uint64_t T = 0;
+ for (const auto &t_c : ret->second.getCallTargets()) {
+ T += t_c.second;
+ }
+ return T;
+ }
+
/// Return the function samples at the given callsite location.
FunctionSamples &functionSamplesAt(const LineLocation &Loc) {
return CallsiteSamples[Loc];
diff --git a/include/llvm/Support/AArch64TargetParser.def b/include/llvm/Support/AArch64TargetParser.def
index 67f981b8f2fa..c4416f099de1 100644
--- a/include/llvm/Support/AArch64TargetParser.def
+++ b/include/llvm/Support/AArch64TargetParser.def
@@ -16,19 +16,20 @@
#ifndef AARCH64_ARCH
#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
#endif
+AARCH64_ARCH("invalid", AK_INVALID, nullptr, nullptr,
+ ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE)
AARCH64_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
- AArch64::AEK_SIMD | AArch64::AEK_FP16 | AArch64::AEK_PROFILE))
+ AArch64::AEK_SIMD | AArch64::AEK_LSE))
AARCH64_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
- AArch64::AEK_SIMD | AArch64::AEK_FP16 | AArch64::AEK_PROFILE))
+ AArch64::AEK_SIMD | AArch64::AEK_LSE))
AARCH64_ARCH("armv8.2-a", AK_ARMV8_2A, "8.2-A", "v8.2a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
- AArch64::AEK_SIMD | AArch64::AEK_FP16 | AArch64::AEK_PROFILE |
- AArch64::AEK_RAS))
+ AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE))
#undef AARCH64_ARCH
#ifndef AARCH64_ARCH_EXT_NAME
@@ -38,6 +39,7 @@ AARCH64_ARCH("armv8.2-a", AK_ARMV8_2A, "8.2-A", "v8.2a",
AARCH64_ARCH_EXT_NAME("invalid", AArch64::AEK_INVALID, nullptr, nullptr)
AARCH64_ARCH_EXT_NAME("none", AArch64::AEK_NONE, nullptr, nullptr)
AARCH64_ARCH_EXT_NAME("crc", AArch64::AEK_CRC, "+crc", "-crc")
+AARCH64_ARCH_EXT_NAME("lse", AArch64::AEK_LSE, "+lse", "-lse")
AARCH64_ARCH_EXT_NAME("crypto", AArch64::AEK_CRYPTO, "+crypto","-crypto")
AARCH64_ARCH_EXT_NAME("fp", AArch64::AEK_FP, "+fp-armv8", "-fp-armv8")
AARCH64_ARCH_EXT_NAME("simd", AArch64::AEK_SIMD, "+neon", "-neon")
@@ -63,6 +65,12 @@ AARCH64_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRYPTO))
AARCH64_CPU_NAME("exynos-m1", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+AARCH64_CPU_NAME("exynos-m2", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+ (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+AARCH64_CPU_NAME("exynos-m3", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+ (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+AARCH64_CPU_NAME("falkor", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
+ (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
AARCH64_CPU_NAME("kryo", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
AARCH64_CPU_NAME("vulcan", AK_ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
diff --git a/include/llvm/Support/ARMBuildAttributes.h b/include/llvm/Support/ARMBuildAttributes.h
index f447cd072b5f..e25445790b0c 100644
--- a/include/llvm/Support/ARMBuildAttributes.h
+++ b/include/llvm/Support/ARMBuildAttributes.h
@@ -108,6 +108,7 @@ enum CPUArch {
v6S_M = 12, // v6_M with the System extensions
v7E_M = 13, // v7_M with DSP extensions
v8_A = 14, // v8_A AArch32
+ v8_R = 15, // e.g. Cortex R52
v8_M_Base= 16, // v8_M_Base AArch32
v8_M_Main= 17, // v8_M_Main AArch32
};
diff --git a/include/llvm/Support/ARMTargetParser.def b/include/llvm/Support/ARMTargetParser.def
index 195f7112d6a0..58cb6381a9ab 100644
--- a/include/llvm/Support/ARMTargetParser.def
+++ b/include/llvm/Support/ARMTargetParser.def
@@ -89,11 +89,15 @@ ARM_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
ARM_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
+ ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.2-a", AK_ARMV8_2A, "8.2-A", "v8.2a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC))
+ ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
+ARM_ARCH("armv8-r", AK_ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R,
+ FK_NEON_FP_ARMV8,
+ (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIV |
+ ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8-m.base", AK_ARMV8MBaseline, "8-M.Baseline", "v8m.base",
ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIV)
ARM_ARCH("armv8-m.main", AK_ARMV8MMainline, "8-M.Mainline", "v8m.main",
@@ -220,6 +224,7 @@ ARM_CPU_NAME("cortex-r7", AK_ARMV7R, FK_VFPV3_D16_FP16, false,
(ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r8", AK_ARMV7R, FK_VFPV3_D16_FP16, false,
(ARM::AEK_MP | ARM::AEK_HWDIVARM))
+ARM_CPU_NAME("cortex-r52", AK_ARMV8R, FK_NEON_FP_ARMV8, true, ARM::AEK_NONE)
ARM_CPU_NAME("sc300", AK_ARMV7M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m3", AK_ARMV7M, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m4", AK_ARMV7EM, FK_FPV4_SP_D16, true, ARM::AEK_NONE)
@@ -232,6 +237,8 @@ ARM_CPU_NAME("cortex-a72", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_C
ARM_CPU_NAME("cortex-a73", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m1", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m2", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
+ARM_CPU_NAME("exynos-m3", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
// Non-standard Arch names.
ARM_CPU_NAME("iwmmxt", AK_IWMMXT, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("xscale", AK_XSCALE, FK_NONE, true, ARM::AEK_NONE)
diff --git a/include/llvm/Support/AlignOf.h b/include/llvm/Support/AlignOf.h
index af7f20028b6d..abd19afa22f0 100644
--- a/include/llvm/Support/AlignOf.h
+++ b/include/llvm/Support/AlignOf.h
@@ -7,8 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the AlignOf function that computes alignments for
-// arbitrary types.
+// This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
//
//===----------------------------------------------------------------------===//
@@ -17,100 +16,15 @@
#include "llvm/Support/Compiler.h"
#include <cstddef>
-#include <type_traits>
namespace llvm {
-namespace detail {
-
-// For everything other than an abstract class we can calulate alignment by
-// building a class with a single character and a member of the given type.
-template <typename T, bool = std::is_abstract<T>::value>
-struct AlignmentCalcImpl {
- char x;
-#if defined(_MSC_VER)
-// Disables "structure was padded due to __declspec(align())" warnings that are
-// generated by any class using AlignOf<T> with a manually specified alignment.
-// Although the warning is disabled in the LLVM project we need this pragma
-// as AlignOf.h is a published support header that's available for use
-// out-of-tree, and we would like that to compile cleanly at /W4.
-#pragma warning(suppress : 4324)
-#endif
- T t;
-private:
- AlignmentCalcImpl() = delete;
-};
-
-// Abstract base class helper, this will have the minimal alignment and size
-// for any abstract class. We don't even define its destructor because this
-// type should never be used in a way that requires it.
-struct AlignmentCalcImplBase {
- virtual ~AlignmentCalcImplBase() = 0;
-};
-
-// When we have an abstract class type, specialize the alignment computation
-// engine to create another abstract class that derives from both an empty
-// abstract base class and the provided type. This has the same effect as the
-// above except that it handles the fact that we can't actually create a member
-// of type T.
-template <typename T>
-struct AlignmentCalcImpl<T, true> : AlignmentCalcImplBase, T {
- ~AlignmentCalcImpl() override = 0;
-};
-
-} // End detail namespace.
-
-/// AlignOf - A templated class that contains an enum value representing
-/// the alignment of the template argument. For example,
-/// AlignOf<int>::Alignment represents the alignment of type "int". The
-/// alignment calculated is the minimum alignment, and not necessarily
-/// the "desired" alignment returned by GCC's __alignof__ (for example). Note
-/// that because the alignment is an enum value, it can be used as a
-/// compile-time constant (e.g., for template instantiation).
-template <typename T>
-struct AlignOf {
-#ifndef _MSC_VER
- // Avoid warnings from GCC like:
- // comparison between 'enum llvm::AlignOf<X>::<anonymous>' and 'enum
- // llvm::AlignOf<Y>::<anonymous>' [-Wenum-compare]
- // by using constexpr instead of enum.
- // (except on MSVC, since it doesn't support constexpr yet).
- static constexpr unsigned Alignment = static_cast<unsigned int>(
- sizeof(detail::AlignmentCalcImpl<T>) - sizeof(T));
-#else
- enum {
- Alignment = static_cast<unsigned int>(
- sizeof(::llvm::detail::AlignmentCalcImpl<T>) - sizeof(T))
- };
-#endif
- enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 };
- enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 };
- enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 };
- enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 };
-
- enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 };
- enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 };
- enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 };
- enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 };
-};
-
-#ifndef _MSC_VER
-template <typename T> constexpr unsigned AlignOf<T>::Alignment;
-#endif
-
-/// alignOf - A templated function that returns the minimum alignment of
-/// of a type. This provides no extra functionality beyond the AlignOf
-/// class besides some cosmetic cleanliness. Example usage:
-/// alignOf<int>() returns the alignment of an int.
-template <typename T>
-inline unsigned alignOf() { return AlignOf<T>::Alignment; }
-
/// \struct AlignedCharArray
/// \brief Helper for building an aligned character array type.
///
/// This template is used to explicitly build up a collection of aligned
/// character array types. We have to build these up using a macro and explicit
-/// specialization to cope with old versions of MSVC and GCC where only an
+/// specialization to cope with MSVC (at least till 2015) where only an
/// integer literal can be used to specify an alignment constraint. Once built
/// up here, we can then begin to indirect between these using normal C++
/// template parameters.
@@ -118,38 +32,11 @@ inline unsigned alignOf() { return AlignOf<T>::Alignment; }
// MSVC requires special handling here.
#ifndef _MSC_VER
-#if __has_feature(cxx_alignas)
template<std::size_t Alignment, std::size_t Size>
struct AlignedCharArray {
- alignas(Alignment) char buffer[Size];
+ LLVM_ALIGNAS(Alignment) char buffer[Size];
};
-#elif defined(__GNUC__) || defined(__IBM_ATTRIBUTES)
-/// \brief Create a type with an aligned char buffer.
-template<std::size_t Alignment, std::size_t Size>
-struct AlignedCharArray;
-
-#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
- template<std::size_t Size> \
- struct AlignedCharArray<x, Size> { \
- __attribute__((aligned(x))) char buffer[Size]; \
- };
-
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
-LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
-
-#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
-
-#else
-# error No supported align as directive.
-#endif
-
#else // _MSC_VER
/// \brief Create a type with an aligned char buffer.
@@ -249,8 +136,8 @@ template <typename T1,
typename T5 = char, typename T6 = char, typename T7 = char,
typename T8 = char, typename T9 = char, typename T10 = char>
struct AlignedCharArrayUnion : llvm::AlignedCharArray<
- AlignOf<llvm::detail::AlignerImpl<T1, T2, T3, T4, T5,
- T6, T7, T8, T9, T10> >::Alignment,
+ alignof(llvm::detail::AlignerImpl<T1, T2, T3, T4, T5,
+ T6, T7, T8, T9, T10>),
sizeof(::llvm::detail::SizerImpl<T1, T2, T3, T4, T5,
T6, T7, T8, T9, T10>)> {
};
diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h
index 1c9508661d6f..c71759abd7d2 100644
--- a/include/llvm/Support/Allocator.h
+++ b/include/llvm/Support/Allocator.h
@@ -22,14 +22,16 @@
#define LLVM_SUPPORT_ALLOCATOR_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/AlignOf.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Memory.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
+#include <cstdint>
#include <cstdlib>
+#include <iterator>
+#include <type_traits>
+#include <utility>
namespace llvm {
@@ -74,7 +76,7 @@ public:
/// \brief Allocate space for a sequence of objects without constructing them.
template <typename T> T *Allocate(size_t Num = 1) {
- return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
+ return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
}
/// \brief Deallocate space for a sequence of objects without constructing them.
@@ -114,7 +116,8 @@ namespace detail {
// printing code uses Allocator.h in its implementation.
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
size_t TotalMemory);
-} // End namespace detail.
+
+} // end namespace detail
/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
///
@@ -366,7 +369,7 @@ template <typename T> class SpecificBumpPtrAllocator {
BumpPtrAllocator Allocator;
public:
- SpecificBumpPtrAllocator() : Allocator() {}
+ SpecificBumpPtrAllocator() = default;
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
: Allocator(std::move(Old.Allocator)) {}
~SpecificBumpPtrAllocator() { DestroyAll(); }
@@ -381,7 +384,7 @@ public:
/// all memory allocated so far.
void DestroyAll() {
auto DestroyElements = [](char *Begin, char *End) {
- assert(Begin == (char*)alignAddr(Begin, alignOf<T>()));
+ assert(Begin == (char *)alignAddr(Begin, alignof(T)));
for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
reinterpret_cast<T *>(Ptr)->~T();
};
@@ -390,7 +393,7 @@ public:
++I) {
size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
std::distance(Allocator.Slabs.begin(), I));
- char *Begin = (char*)alignAddr(*I, alignOf<T>());
+ char *Begin = (char *)alignAddr(*I, alignof(T));
char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
: (char *)*I + AllocatedSlabSize;
@@ -400,7 +403,7 @@ public:
for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
void *Ptr = PtrAndSize.first;
size_t Size = PtrAndSize.second;
- DestroyElements((char*)alignAddr(Ptr, alignOf<T>()), (char *)Ptr + Size);
+ DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
}
Allocator.Reset();
@@ -410,7 +413,7 @@ public:
T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
};
-} // end namespace llvm
+} // end namespace llvm
template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
void *operator new(size_t Size,
diff --git a/include/llvm/Support/ArrayRecycler.h b/include/llvm/Support/ArrayRecycler.h
index 36f644af2880..4698f12b3bbc 100644
--- a/include/llvm/Support/ArrayRecycler.h
+++ b/include/llvm/Support/ArrayRecycler.h
@@ -26,15 +26,14 @@ namespace llvm {
/// Arrays are allocated in a small number of fixed sizes. For each supported
/// array size, the ArrayRecycler keeps a free list of available arrays.
///
-template<class T, size_t Align = AlignOf<T>::Alignment>
-class ArrayRecycler {
+template <class T, size_t Align = alignof(T)> class ArrayRecycler {
// The free list for a given array size is a simple singly linked list.
// We can't use iplist or Recycler here since those classes can't be copied.
struct FreeList {
FreeList *Next;
};
- static_assert(Align >= AlignOf<FreeList>::Alignment, "Object underaligned");
+ static_assert(Align >= alignof(FreeList), "Object underaligned");
static_assert(sizeof(T) >= sizeof(FreeList), "Objects are too small");
// Keep a free list for each array size.
diff --git a/include/llvm/Support/AtomicOrdering.h b/include/llvm/Support/AtomicOrdering.h
index 8837fab19575..001804248b85 100644
--- a/include/llvm/Support/AtomicOrdering.h
+++ b/include/llvm/Support/AtomicOrdering.h
@@ -73,8 +73,8 @@ bool operator>=(AtomicOrdering, AtomicOrdering) = delete;
// Validate an integral value which isn't known to fit within the enum's range
// is a valid AtomicOrdering.
template <typename Int> static inline bool isValidAtomicOrdering(Int I) {
- return (Int)AtomicOrdering::NotAtomic <= I &&
- I <= (Int)AtomicOrdering::SequentiallyConsistent;
+ return static_cast<Int>(AtomicOrdering::NotAtomic) <= I &&
+ I <= static_cast<Int>(AtomicOrdering::SequentiallyConsistent);
}
/// String used by LLVM IR to represent atomic ordering.
@@ -82,40 +82,40 @@ static inline const char *toIRString(AtomicOrdering ao) {
static const char *names[8] = {"not_atomic", "unordered", "monotonic",
"consume", "acquire", "release",
"acq_rel", "seq_cst"};
- return names[(size_t)ao];
+ return names[static_cast<size_t>(ao)];
}
/// Returns true if ao is stronger than other as defined by the AtomicOrdering
/// lattice, which is based on C++'s definition.
static inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) {
static const bool lookup[8][8] = {
- // NA UN RX CO AC RE AR SC
- /* NotAtomic */ {0, 0, 0, 0, 0, 0, 0, 0},
- /* Unordered */ {1, 0, 0, 0, 0, 0, 0, 0},
- /* relaxed */ {1, 1, 0, 0, 0, 0, 0, 0},
- /* consume */ {1, 1, 1, 0, 0, 0, 0, 0},
- /* acquire */ {1, 1, 1, 1, 0, 0, 0, 0},
- /* release */ {1, 1, 1, 0, 0, 0, 0, 0},
- /* acq_rel */ {1, 1, 1, 1, 1, 1, 0, 0},
- /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 0},
+ // NA UN RX CO AC RE AR SC
+ /* NotAtomic */ {false, false, false, false, false, false, false, false},
+ /* Unordered */ { true, false, false, false, false, false, false, false},
+ /* relaxed */ { true, true, false, false, false, false, false, false},
+ /* consume */ { true, true, true, false, false, false, false, false},
+ /* acquire */ { true, true, true, true, false, false, false, false},
+ /* release */ { true, true, true, false, false, false, false, false},
+ /* acq_rel */ { true, true, true, true, true, true, false, false},
+ /* seq_cst */ { true, true, true, true, true, true, true, false},
};
- return lookup[(size_t)ao][(size_t)other];
+ return lookup[static_cast<size_t>(ao)][static_cast<size_t>(other)];
}
static inline bool isAtLeastOrStrongerThan(AtomicOrdering ao,
AtomicOrdering other) {
static const bool lookup[8][8] = {
- // NA UN RX CO AC RE AR SC
- /* NotAtomic */ {1, 0, 0, 0, 0, 0, 0, 0},
- /* Unordered */ {1, 1, 0, 0, 0, 0, 0, 0},
- /* relaxed */ {1, 1, 1, 0, 0, 0, 0, 0},
- /* consume */ {1, 1, 1, 1, 0, 0, 0, 0},
- /* acquire */ {1, 1, 1, 1, 1, 0, 0, 0},
- /* release */ {1, 1, 1, 0, 0, 1, 0, 0},
- /* acq_rel */ {1, 1, 1, 1, 1, 1, 1, 0},
- /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 1},
+ // NA UN RX CO AC RE AR SC
+ /* NotAtomic */ { true, false, false, false, false, false, false, false},
+ /* Unordered */ { true, true, false, false, false, false, false, false},
+ /* relaxed */ { true, true, true, false, false, false, false, false},
+ /* consume */ { true, true, true, true, false, false, false, false},
+ /* acquire */ { true, true, true, true, true, false, false, false},
+ /* release */ { true, true, true, false, false, true, false, false},
+ /* acq_rel */ { true, true, true, true, true, true, true, false},
+ /* seq_cst */ { true, true, true, true, true, true, true, true},
};
- return lookup[(size_t)ao][(size_t)other];
+ return lookup[static_cast<size_t>(ao)][static_cast<size_t>(other)];
}
static inline bool isStrongerThanUnordered(AtomicOrdering ao) {
@@ -145,7 +145,7 @@ static inline AtomicOrderingCABI toCABI(AtomicOrdering ao) {
/* acq_rel */ AtomicOrderingCABI::acq_rel,
/* seq_cst */ AtomicOrderingCABI::seq_cst,
};
- return lookup[(size_t)ao];
+ return lookup[static_cast<size_t>(ao)];
}
} // end namespace llvm
diff --git a/include/llvm/Support/COFF.h b/include/llvm/Support/COFF.h
index 7dad3e82bda6..19223306bd07 100644
--- a/include/llvm/Support/COFF.h
+++ b/include/llvm/Support/COFF.h
@@ -41,6 +41,11 @@ namespace COFF {
'\xaf', '\x20', '\xfa', '\xf6', '\x6a', '\xa4', '\xdc', '\xb8',
};
+ static const char ClGlObjMagic[] = {
+ '\x38', '\xfe', '\xb3', '\x0c', '\xa5', '\xd9', '\xab', '\x4d',
+ '\xac', '\x9b', '\xd6', '\xb6', '\x22', '\x26', '\x53', '\xc2',
+ };
+
// Sizes in bytes of various things in the COFF format.
enum {
Header16Size = 20,
@@ -657,7 +662,7 @@ namespace COFF {
}
ImportNameType getNameType() const {
- return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 3);
+ return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 2);
}
};
diff --git a/include/llvm/Support/CachePruning.h b/include/llvm/Support/CachePruning.h
index 383414119139..954fd8ae7ffb 100644
--- a/include/llvm/Support/CachePruning.h
+++ b/include/llvm/Support/CachePruning.h
@@ -16,6 +16,7 @@
#define LLVM_SUPPORT_CACHE_PRUNING_H
#include "llvm/ADT/StringRef.h"
+#include <chrono>
namespace llvm {
@@ -29,7 +30,7 @@ public:
/// Define the pruning interval. This is intended to be used to avoid scanning
/// the directory too often. It does not impact the decision of which file to
/// prune. A value of 0 forces the scan to occurs.
- CachePruning &setPruningInterval(int PruningInterval) {
+ CachePruning &setPruningInterval(std::chrono::seconds PruningInterval) {
Interval = PruningInterval;
return *this;
}
@@ -37,7 +38,7 @@ public:
/// Define the expiration for a file. When a file hasn't been accessed for
/// \p ExpireAfter seconds, it is removed from the cache. A value of 0 disable
/// the expiration-based pruning.
- CachePruning &setEntryExpiration(unsigned ExpireAfter) {
+ CachePruning &setEntryExpiration(std::chrono::seconds ExpireAfter) {
Expiration = ExpireAfter;
return *this;
}
@@ -59,11 +60,11 @@ public:
private:
// Options that matches the setters above.
std::string Path;
- unsigned Expiration = 0;
- unsigned Interval = 0;
+ std::chrono::seconds Expiration = std::chrono::seconds::zero();
+ std::chrono::seconds Interval = std::chrono::seconds::zero();
unsigned PercentageOfAvailableSpace = 0;
};
} // namespace llvm
-#endif \ No newline at end of file
+#endif
diff --git a/include/llvm/Support/Casting.h b/include/llvm/Support/Casting.h
index 6ba5efa47554..a73047b2b557 100644
--- a/include/llvm/Support/Casting.h
+++ b/include/llvm/Support/Casting.h
@@ -128,8 +128,7 @@ struct isa_impl_wrap<To, FromTy, FromTy> {
//
// if (isa<Type>(myVal)) { ... }
//
-template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline bool isa(const Y &Val) {
+template <class X, class Y> LLVM_NODISCARD inline bool isa(const Y &Val) {
return isa_impl_wrap<X, const Y,
typename simplify_type<const Y>::SimpleType>::doit(Val);
}
@@ -243,9 +242,10 @@ inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
// accepted.
//
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
- !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
-cast_or_null(const Y &Val) {
+LLVM_NODISCARD inline
+ typename std::enable_if<!is_simple_type<Y>::value,
+ typename cast_retty<X, const Y>::ret_type>::type
+ cast_or_null(const Y &Val) {
if (!Val)
return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
@@ -253,9 +253,10 @@ cast_or_null(const Y &Val) {
}
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
- !is_simple_type<Y>::value, typename cast_retty<X, Y>::ret_type>::type
-cast_or_null(Y &Val) {
+LLVM_NODISCARD inline
+ typename std::enable_if<!is_simple_type<Y>::value,
+ typename cast_retty<X, Y>::ret_type>::type
+ cast_or_null(Y &Val) {
if (!Val)
return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
@@ -263,7 +264,7 @@ cast_or_null(Y &Val) {
}
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
+LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type
cast_or_null(Y *Val) {
if (!Val) return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
@@ -280,21 +281,20 @@ cast_or_null(Y *Val) {
//
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
- !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
-dyn_cast(const Y &Val) {
+LLVM_NODISCARD inline
+ typename std::enable_if<!is_simple_type<Y>::value,
+ typename cast_retty<X, const Y>::ret_type>::type
+ dyn_cast(const Y &Val) {
return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y>::ret_type
-dyn_cast(Y &Val) {
+LLVM_NODISCARD inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
-dyn_cast(Y *Val) {
+LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
@@ -302,21 +302,23 @@ dyn_cast(Y *Val) {
// value is accepted.
//
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
- !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
-dyn_cast_or_null(const Y &Val) {
+LLVM_NODISCARD inline
+ typename std::enable_if<!is_simple_type<Y>::value,
+ typename cast_retty<X, const Y>::ret_type>::type
+ dyn_cast_or_null(const Y &Val) {
return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
- !is_simple_type<Y>::value, typename cast_retty<X, Y>::ret_type>::type
-dyn_cast_or_null(Y &Val) {
+LLVM_NODISCARD inline
+ typename std::enable_if<!is_simple_type<Y>::value,
+ typename cast_retty<X, Y>::ret_type>::type
+ dyn_cast_or_null(Y &Val) {
return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
-LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
+LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type
dyn_cast_or_null(Y *Val) {
return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
diff --git a/include/llvm/Support/Chrono.h b/include/llvm/Support/Chrono.h
new file mode 100644
index 000000000000..203439cab919
--- /dev/null
+++ b/include/llvm/Support/Chrono.h
@@ -0,0 +1,55 @@
+//===- llvm/Support/Chrono.h - Utilities for Timing Manipulation-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CHRONO_H
+#define LLVM_SUPPORT_CHRONO_H
+
+#include "llvm/Support/Compiler.h"
+
+#include <chrono>
+#include <ctime>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace sys {
+
+/// A time point on the system clock. This is provided for two reasons:
+/// - to insulate us agains subtle differences in behavoir to differences in
+/// system clock precision (which is implementation-defined and differs between
+/// platforms).
+/// - to shorten the type name
+/// The default precision is nanoseconds. If need a specific precision specify
+/// it explicitly. If unsure, use the default. If you need a time point on a
+/// clock other than the system_clock, use std::chrono directly.
+template <typename D = std::chrono::nanoseconds>
+using TimePoint = std::chrono::time_point<std::chrono::system_clock, D>;
+
+/// Convert a TimePoint to std::time_t
+LLVM_ATTRIBUTE_ALWAYS_INLINE inline std::time_t toTimeT(TimePoint<> TP) {
+ using namespace std::chrono;
+ return system_clock::to_time_t(
+ time_point_cast<system_clock::time_point::duration>(TP));
+}
+
+/// Convert a std::time_t to a TimePoint
+LLVM_ATTRIBUTE_ALWAYS_INLINE inline TimePoint<std::chrono::seconds>
+toTimePoint(std::time_t T) {
+ using namespace std::chrono;
+ return time_point_cast<seconds>(system_clock::from_time_t(T));
+}
+
+} // namespace sys
+
+raw_ostream &operator<<(raw_ostream &OS, sys::TimePoint<> TP);
+
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_CHRONO_H
diff --git a/include/llvm/Support/CodeGen.h b/include/llvm/Support/CodeGen.h
index e19abf8271eb..941c112b0dd2 100644
--- a/include/llvm/Support/CodeGen.h
+++ b/include/llvm/Support/CodeGen.h
@@ -19,7 +19,7 @@ namespace llvm {
// Relocation model types.
namespace Reloc {
- enum Model { Static, PIC_, DynamicNoPIC };
+ enum Model { Static, PIC_, DynamicNoPIC, ROPI, RWPI, ROPI_RWPI };
}
// Code model types.
diff --git a/include/llvm/Support/CommandLine.h b/include/llvm/Support/CommandLine.h
index 70465a0e3fd3..204672f88dd9 100644
--- a/include/llvm/Support/CommandLine.h
+++ b/include/llvm/Support/CommandLine.h
@@ -21,16 +21,21 @@
#define LLVM_SUPPORT_COMMANDLINE_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include <cassert>
#include <climits>
-#include <cstdarg>
-#include <utility>
+#include <cstddef>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
#include <vector>
namespace llvm {
@@ -46,7 +51,7 @@ namespace cl {
// ParseCommandLineOptions - Command line option processing entry point.
//
bool ParseCommandLineOptions(int argc, const char *const *argv,
- const char *Overview = nullptr,
+ StringRef Overview = "",
bool IgnoreErrors = false);
//===----------------------------------------------------------------------===//
@@ -54,7 +59,7 @@ bool ParseCommandLineOptions(int argc, const char *const *argv,
// entry point.
//
void ParseEnvironmentOptions(const char *progName, const char *envvar,
- const char *Overview = nullptr);
+ const char *Overview = "");
///===---------------------------------------------------------------------===//
/// SetVersionPrinter - Override the default (LLVM specific) version printer
@@ -88,7 +93,7 @@ class Option;
///
/// Literal options are used by some parsers to register special option values.
/// This is how the PassNameParser registers pass names for opt.
-void AddLiteralOption(Option &O, const char *Name);
+void AddLiteralOption(Option &O, StringRef Name);
//===----------------------------------------------------------------------===//
// Flags permitted to be passed to command line arguments
@@ -156,18 +161,20 @@ enum MiscFlags { // Miscellaneous flags to adjust argument
//
class OptionCategory {
private:
- const char *const Name;
- const char *const Description;
+ StringRef const Name;
+ StringRef const Description;
+
void registerCategory();
public:
- OptionCategory(const char *const Name,
- const char *const Description = nullptr)
+ OptionCategory(StringRef const Name,
+ StringRef const Description = "")
: Name(Name), Description(Description) {
registerCategory();
}
- const char *getName() const { return Name; }
- const char *getDescription() const { return Description; }
+
+ StringRef getName() const { return Name; }
+ StringRef getDescription() const { return Description; }
};
// The general Option Category (used as default category).
@@ -178,26 +185,26 @@ extern OptionCategory GeneralCategory;
//
class SubCommand {
private:
- const char *const Name = nullptr;
- const char *const Description = nullptr;
+ StringRef Name;
+ StringRef Description;
protected:
void registerSubCommand();
void unregisterSubCommand();
public:
- SubCommand(const char *const Name, const char *const Description = nullptr)
+ SubCommand(StringRef Name, StringRef Description = "")
: Name(Name), Description(Description) {
- registerSubCommand();
+ registerSubCommand();
}
- SubCommand() {}
+ SubCommand() = default;
void reset();
operator bool() const;
- const char *getName() const { return Name; }
- const char *getDescription() const { return Description; }
+ StringRef getName() const { return Name; }
+ StringRef getDescription() const { return Description; }
SmallVector<Option *, 4> PositionalOpts;
SmallVector<Option *, 4> SinkOpts;
@@ -215,7 +222,6 @@ extern ManagedStatic<SubCommand> AllSubCommands;
//===----------------------------------------------------------------------===//
// Option Base class
//
-class alias;
class Option {
friend class alias;
@@ -257,15 +263,19 @@ public:
inline enum NumOccurrencesFlag getNumOccurrencesFlag() const {
return (enum NumOccurrencesFlag)Occurrences;
}
+
inline enum ValueExpected getValueExpectedFlag() const {
return Value ? ((enum ValueExpected)Value) : getValueExpectedFlagDefault();
}
+
inline enum OptionHidden getOptionHiddenFlag() const {
return (enum OptionHidden)HiddenFlag;
}
+
inline enum FormattingFlags getFormattingFlag() const {
return (enum FormattingFlags)Formatting;
}
+
inline unsigned getMiscFlags() const { return Misc; }
inline unsigned getPosition() const { return Position; }
inline unsigned getNumAdditionalVals() const { return AdditionalVals; }
@@ -274,11 +284,13 @@ public:
bool hasArgStr() const { return !ArgStr.empty(); }
bool isPositional() const { return getFormattingFlag() == cl::Positional; }
bool isSink() const { return getMiscFlags() & cl::Sink; }
+
bool isConsumeAfter() const {
return getNumOccurrencesFlag() == cl::ConsumeAfter;
}
+
bool isInAllSubCommands() const {
- return std::any_of(Subs.begin(), Subs.end(), [](const SubCommand *SC) {
+ return any_of(Subs, [](const SubCommand *SC) {
return SC == &*AllSubCommands;
});
}
@@ -303,12 +315,14 @@ protected:
enum OptionHidden Hidden)
: NumOccurrences(0), Occurrences(OccurrencesFlag), Value(0),
HiddenFlag(Hidden), Formatting(NormalFormatting), Misc(0), Position(0),
- AdditionalVals(0), ArgStr(""), HelpStr(""), ValueStr(""),
- Category(&GeneralCategory), FullyInitialized(false) {}
+ AdditionalVals(0), Category(&GeneralCategory), FullyInitialized(false) {
+ }
inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; }
public:
+ virtual ~Option() = default;
+
// addArgument - Register this argument with the commandline system.
//
void addArgument();
@@ -339,10 +353,8 @@ public:
// Prints option name followed by message. Always returns true.
bool error(const Twine &Message, StringRef ArgName = StringRef());
-public:
inline int getNumOccurrences() const { return NumOccurrences; }
inline void reset() { NumOccurrences = 0; }
- virtual ~Option() {}
};
//===----------------------------------------------------------------------===//
@@ -352,16 +364,20 @@ public:
// desc - Modifier to set the description shown in the -help output...
struct desc {
- const char *Desc;
- desc(const char *Str) : Desc(Str) {}
+ StringRef Desc;
+
+ desc(StringRef Str) : Desc(Str) {}
+
void apply(Option &O) const { O.setDescription(Desc); }
};
// value_desc - Modifier to set the value description shown in the -help
// output...
struct value_desc {
- const char *Desc;
- value_desc(const char *Str) : Desc(Str) {}
+ StringRef Desc;
+
+ value_desc(StringRef Str) : Desc(Str) {}
+
void apply(Option &O) const { O.setValueStr(Desc); }
};
@@ -386,6 +402,7 @@ template <class Ty> initializer<Ty> init(const Ty &Val) {
//
template <class Ty> struct LocationClass {
Ty &Loc;
+
LocationClass(Ty &L) : Loc(L) {}
template <class Opt> void apply(Opt &O) const { O.setLocation(O, Loc); }
@@ -399,6 +416,7 @@ template <class Ty> LocationClass<Ty> location(Ty &L) {
// to.
struct cat {
OptionCategory &Category;
+
cat(OptionCategory &c) : Category(c) {}
template <class Opt> void apply(Opt &O) const { O.setCategory(Category); }
@@ -407,6 +425,7 @@ struct cat {
// sub - Specify the subcommand that this option belongs to.
struct sub {
SubCommand &Sub;
+
sub(SubCommand &S) : Sub(S) {}
template <class Opt> void apply(Opt &O) const { O.addSubCommand(Sub); }
@@ -420,9 +439,9 @@ struct GenericOptionValue {
virtual bool compare(const GenericOptionValue &V) const = 0;
protected:
- ~GenericOptionValue() = default;
GenericOptionValue() = default;
GenericOptionValue(const GenericOptionValue&) = default;
+ ~GenericOptionValue() = default;
GenericOptionValue &operator=(const GenericOptionValue &) = default;
private:
@@ -458,15 +477,15 @@ protected:
// Simple copy of the option value.
template <class DataType> class OptionValueCopy : public GenericOptionValue {
DataType Value;
- bool Valid;
+ bool Valid = false;
protected:
- ~OptionValueCopy() = default;
OptionValueCopy(const OptionValueCopy&) = default;
+ ~OptionValueCopy() = default;
OptionValueCopy &operator=(const OptionValueCopy&) = default;
public:
- OptionValueCopy() : Valid(false) {}
+ OptionValueCopy() = default;
bool hasValue() const { return Valid; }
@@ -497,9 +516,9 @@ struct OptionValueBase<DataType, false> : OptionValueCopy<DataType> {
typedef DataType WrapperType;
protected:
- ~OptionValueBase() = default;
OptionValueBase() = default;
OptionValueBase(const OptionValueBase&) = default;
+ ~OptionValueBase() = default;
OptionValueBase &operator=(const OptionValueBase&) = default;
};
@@ -510,6 +529,7 @@ struct OptionValue final
OptionValue() = default;
OptionValue(const DataType &V) { this->setValue(V); }
+
// Some options may take their value from a different data type.
template <class DT> OptionValue<DataType> &operator=(const DT &V) {
this->setValue(V);
@@ -524,9 +544,10 @@ struct OptionValue<cl::boolOrDefault> final
: OptionValueCopy<cl::boolOrDefault> {
typedef cl::boolOrDefault WrapperType;
- OptionValue() {}
+ OptionValue() = default;
OptionValue(const cl::boolOrDefault &V) { this->setValue(V); }
+
OptionValue<cl::boolOrDefault> &operator=(const cl::boolOrDefault &V) {
setValue(V);
return *this;
@@ -540,9 +561,10 @@ template <>
struct OptionValue<std::string> final : OptionValueCopy<std::string> {
typedef StringRef WrapperType;
- OptionValue() {}
+ OptionValue() = default;
OptionValue(const std::string &V) { this->setValue(V); }
+
OptionValue<std::string> &operator=(const std::string &V) {
setValue(V);
return *this;
@@ -555,52 +577,43 @@ private:
//===----------------------------------------------------------------------===//
// Enum valued command line option
//
-#define clEnumVal(ENUMVAL, DESC) #ENUMVAL, int(ENUMVAL), DESC
-#define clEnumValN(ENUMVAL, FLAGNAME, DESC) FLAGNAME, int(ENUMVAL), DESC
-#define clEnumValEnd (reinterpret_cast<void *>(0))
+
+// This represents a single enum value, using "int" as the underlying type.
+struct OptionEnumValue {
+ StringRef Name;
+ int Value;
+ StringRef Description;
+};
+
+#define clEnumVal(ENUMVAL, DESC) \
+ llvm::cl::OptionEnumValue { #ENUMVAL, int(ENUMVAL), DESC }
+#define clEnumValN(ENUMVAL, FLAGNAME, DESC) \
+ llvm::cl::OptionEnumValue { FLAGNAME, int(ENUMVAL), DESC }
// values - For custom data types, allow specifying a group of values together
-// as the values that go into the mapping that the option handler uses. Note
-// that the values list must always have a 0 at the end of the list to indicate
-// that the list has ended.
+// as the values that go into the mapping that the option handler uses.
//
-template <class DataType> class ValuesClass {
+class ValuesClass {
// Use a vector instead of a map, because the lists should be short,
// the overhead is less, and most importantly, it keeps them in the order
// inserted so we can print our option out nicely.
- SmallVector<std::pair<const char *, std::pair<int, const char *>>, 4> Values;
- void processValues(va_list Vals);
+ SmallVector<OptionEnumValue, 4> Values;
public:
- ValuesClass(const char *EnumName, DataType Val, const char *Desc,
- va_list ValueArgs) {
- // Insert the first value, which is required.
- Values.push_back(std::make_pair(EnumName, std::make_pair(Val, Desc)));
-
- // Process the varargs portion of the values...
- while (const char *enumName = va_arg(ValueArgs, const char *)) {
- DataType EnumVal = static_cast<DataType>(va_arg(ValueArgs, int));
- const char *EnumDesc = va_arg(ValueArgs, const char *);
- Values.push_back(std::make_pair(enumName, // Add value to value map
- std::make_pair(EnumVal, EnumDesc)));
- }
- }
+ ValuesClass(std::initializer_list<OptionEnumValue> Options)
+ : Values(Options) {}
template <class Opt> void apply(Opt &O) const {
- for (size_t i = 0, e = Values.size(); i != e; ++i)
- O.getParser().addLiteralOption(Values[i].first, Values[i].second.first,
- Values[i].second.second);
+ for (auto Value : Values)
+ O.getParser().addLiteralOption(Value.Name, Value.Value,
+ Value.Description);
}
};
-template <class DataType>
-ValuesClass<DataType> LLVM_END_WITH_NULL
-values(const char *Arg, DataType Val, const char *Desc, ...) {
- va_list ValueArgs;
- va_start(ValueArgs, Desc);
- ValuesClass<DataType> Vals(Arg, Val, Desc, ValueArgs);
- va_end(ValueArgs);
- return Vals;
+/// Helper to build a ValuesClass by forwarding a variable number of arguments
+/// as an initializer list to the ValuesClass constructor.
+template <typename... OptsTy> ValuesClass values(OptsTy... Options) {
+ return ValuesClass({Options...});
}
//===----------------------------------------------------------------------===//
@@ -619,16 +632,17 @@ class generic_parser_base {
protected:
class GenericOptionInfo {
public:
- GenericOptionInfo(const char *name, const char *helpStr)
+ GenericOptionInfo(StringRef name, StringRef helpStr)
: Name(name), HelpStr(helpStr) {}
- const char *Name;
- const char *HelpStr;
+ StringRef Name;
+ StringRef HelpStr;
};
public:
generic_parser_base(Option &O) : Owner(O) {}
- virtual ~generic_parser_base() {} // Base class should have virtual-dtor
+ virtual ~generic_parser_base() = default;
+ // Base class should have virtual-destructor
// getNumOptions - Virtual function implemented by generic subclass to
// indicate how many entries are in Values.
@@ -636,10 +650,10 @@ public:
virtual unsigned getNumOptions() const = 0;
// getOption - Return option name N.
- virtual const char *getOption(unsigned N) const = 0;
+ virtual StringRef getOption(unsigned N) const = 0;
// getDescription - Return description N
- virtual const char *getDescription(unsigned N) const = 0;
+ virtual StringRef getDescription(unsigned N) const = 0;
// Return the width of the option tag for printing...
virtual size_t getOptionWidth(const Option &O) const;
@@ -698,7 +712,7 @@ public:
// findOption - Return the option number corresponding to the specified
// argument string. If the option is not found, getNumOptions() is returned.
//
- unsigned findOption(const char *Name);
+ unsigned findOption(StringRef Name);
protected:
Option &Owner;
@@ -714,7 +728,7 @@ template <class DataType> class parser : public generic_parser_base {
protected:
class OptionInfo : public GenericOptionInfo {
public:
- OptionInfo(const char *name, DataType v, const char *helpStr)
+ OptionInfo(StringRef name, DataType v, StringRef helpStr)
: GenericOptionInfo(name, helpStr), V(v) {}
OptionValue<DataType> V;
};
@@ -726,8 +740,8 @@ public:
// Implement virtual functions needed by generic_parser_base
unsigned getNumOptions() const override { return unsigned(Values.size()); }
- const char *getOption(unsigned N) const override { return Values[N].Name; }
- const char *getDescription(unsigned N) const override {
+ StringRef getOption(unsigned N) const override { return Values[N].Name; }
+ StringRef getDescription(unsigned N) const override {
return Values[N].HelpStr;
}
@@ -756,7 +770,7 @@ public:
/// addLiteralOption - Add an entry to the mapping table.
///
template <class DT>
- void addLiteralOption(const char *Name, const DT &V, const char *HelpStr) {
+ void addLiteralOption(StringRef Name, const DT &V, StringRef HelpStr) {
assert(findOption(Name) == Values.size() && "Option already exists!");
OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
Values.push_back(X);
@@ -765,7 +779,7 @@ public:
/// removeLiteralOption - Remove the specified option.
///
- void removeLiteralOption(const char *Name) {
+ void removeLiteralOption(StringRef Name) {
unsigned N = findOption(Name);
assert(N != Values.size() && "Option not found!");
Values.erase(Values.begin() + N);
@@ -779,7 +793,6 @@ class basic_parser_impl { // non-template implementation of basic_parser<t>
public:
basic_parser_impl(Option &) {}
-
enum ValueExpected getValueExpectedFlagDefault() const {
return ValueRequired;
}
@@ -801,13 +814,14 @@ public:
void printOptionNoValue(const Option &O, size_t GlobalWidth) const;
// getValueName - Overload in subclass to provide a better default value.
- virtual const char *getValueName() const { return "value"; }
+ virtual StringRef getValueName() const { return "value"; }
// An out-of-line virtual method to provide a 'home' for this class.
virtual void anchor();
protected:
~basic_parser_impl() = default;
+
// A helper for basic_parser::printOptionDiff.
void printOptionName(const Option &O, size_t GlobalWidth) const;
};
@@ -818,12 +832,12 @@ protected:
template <class DataType> class basic_parser : public basic_parser_impl {
public:
basic_parser(Option &O) : basic_parser_impl(O) {}
+
typedef DataType parser_data_type;
typedef OptionValue<DataType> OptVal;
protected:
- // Workaround Clang PR22793
- ~basic_parser() {}
+ ~basic_parser() = default;
};
//--------------------------------------------------
@@ -843,7 +857,7 @@ public:
}
// getValueName - Do not print =<value> at all.
- const char *getValueName() const override { return nullptr; }
+ StringRef getValueName() const override { return StringRef(); }
void printOptionDiff(const Option &O, bool V, OptVal Default,
size_t GlobalWidth) const;
@@ -869,7 +883,7 @@ public:
}
// getValueName - Do not print =<value> at all.
- const char *getValueName() const override { return nullptr; }
+ StringRef getValueName() const override { return StringRef(); }
void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default,
size_t GlobalWidth) const;
@@ -891,7 +905,7 @@ public:
bool parse(Option &O, StringRef ArgName, StringRef Arg, int &Val);
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "int"; }
+ StringRef getValueName() const override { return "int"; }
void printOptionDiff(const Option &O, int V, OptVal Default,
size_t GlobalWidth) const;
@@ -913,7 +927,7 @@ public:
bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned &Val);
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "uint"; }
+ StringRef getValueName() const override { return "uint"; }
void printOptionDiff(const Option &O, unsigned V, OptVal Default,
size_t GlobalWidth) const;
@@ -938,7 +952,7 @@ public:
unsigned long long &Val);
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "uint"; }
+ StringRef getValueName() const override { return "uint"; }
void printOptionDiff(const Option &O, unsigned long long V, OptVal Default,
size_t GlobalWidth) const;
@@ -960,7 +974,7 @@ public:
bool parse(Option &O, StringRef ArgName, StringRef Arg, double &Val);
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "number"; }
+ StringRef getValueName() const override { return "number"; }
void printOptionDiff(const Option &O, double V, OptVal Default,
size_t GlobalWidth) const;
@@ -982,7 +996,7 @@ public:
bool parse(Option &O, StringRef ArgName, StringRef Arg, float &Val);
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "number"; }
+ StringRef getValueName() const override { return "number"; }
void printOptionDiff(const Option &O, float V, OptVal Default,
size_t GlobalWidth) const;
@@ -1007,7 +1021,7 @@ public:
}
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "string"; }
+ StringRef getValueName() const override { return "string"; }
void printOptionDiff(const Option &O, StringRef V, const OptVal &Default,
size_t GlobalWidth) const;
@@ -1032,7 +1046,7 @@ public:
}
// getValueName - Overload in subclass to provide a better default value.
- const char *getValueName() const override { return "char"; }
+ StringRef getValueName() const override { return "char"; }
void printOptionDiff(const Option &O, char V, OptVal Default,
size_t GlobalWidth) const;
@@ -1100,17 +1114,17 @@ template <class Mod> struct applicator {
// Handle const char* as a special case...
template <unsigned n> struct applicator<char[n]> {
- template <class Opt> static void opt(const char *Str, Opt &O) {
+ template <class Opt> static void opt(StringRef Str, Opt &O) {
O.setArgStr(Str);
}
};
template <unsigned n> struct applicator<const char[n]> {
- template <class Opt> static void opt(const char *Str, Opt &O) {
+ template <class Opt> static void opt(StringRef Str, Opt &O) {
O.setArgStr(Str);
}
};
-template <> struct applicator<const char *> {
- template <class Opt> static void opt(const char *Str, Opt &O) {
+template <> struct applicator<StringRef > {
+ template <class Opt> static void opt(StringRef Str, Opt &O) {
O.setArgStr(Str);
}
};
@@ -1120,15 +1134,19 @@ template <> struct applicator<NumOccurrencesFlag> {
O.setNumOccurrencesFlag(N);
}
};
+
template <> struct applicator<ValueExpected> {
static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); }
};
+
template <> struct applicator<OptionHidden> {
static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); }
};
+
template <> struct applicator<FormattingFlags> {
static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); }
};
+
template <> struct applicator<MiscFlags> {
static void opt(MiscFlags MF, Option &O) { O.setMiscFlag(MF); }
};
@@ -1153,7 +1171,7 @@ template <class Opt, class Mod> void apply(Opt *O, const Mod &M) {
//
template <class DataType, bool ExternalStorage, bool isClass>
class opt_storage {
- DataType *Location; // Where to store the object...
+ DataType *Location = nullptr; // Where to store the object...
OptionValue<DataType> Default;
void check_location() const {
@@ -1163,7 +1181,7 @@ class opt_storage {
}
public:
- opt_storage() : Location(nullptr) {}
+ opt_storage() = default;
bool setLocation(Option &O, DataType &L) {
if (Location)
@@ -1292,11 +1310,11 @@ class opt : public Option,
Parser.initialize();
}
+public:
// Command line options should not be copyable
opt(const opt &) = delete;
opt &operator=(const opt &) = delete;
-public:
// setInitialValue - Used by the cl::init modifier...
void setInitialValue(const DataType &V) { this->setValue(V, true); }
@@ -1329,10 +1347,10 @@ extern template class opt<bool>;
// cl::location(x) modifier.
//
template <class DataType, class StorageClass> class list_storage {
- StorageClass *Location; // Where to store the object...
+ StorageClass *Location = nullptr; // Where to store the object...
public:
- list_storage() : Location(0) {}
+ list_storage() = default;
bool setLocation(Option &O, StorageClass &L) {
if (Location)
@@ -1462,11 +1480,11 @@ class list : public Option, public list_storage<DataType, StorageClass> {
Parser.initialize();
}
+public:
// Command line options should not be copyable
list(const list &) = delete;
list &operator=(const list &) = delete;
-public:
ParserClass &getParser() { return Parser; }
unsigned getPosition(unsigned optnum) const {
@@ -1503,7 +1521,7 @@ struct multi_val {
// cl::location(x) modifier.
//
template <class DataType, class StorageClass> class bits_storage {
- unsigned *Location; // Where to store the bits...
+ unsigned *Location = nullptr; // Where to store the bits...
template <class T> static unsigned Bit(const T &V) {
unsigned BitPos = reinterpret_cast<unsigned>(V);
@@ -1513,7 +1531,7 @@ template <class DataType, class StorageClass> class bits_storage {
}
public:
- bits_storage() : Location(nullptr) {}
+ bits_storage() = default;
bool setLocation(Option &O, unsigned &L) {
if (Location)
@@ -1601,11 +1619,11 @@ class bits : public Option, public bits_storage<DataType, Storage> {
Parser.initialize();
}
+public:
// Command line options should not be copyable
bits(const bits &) = delete;
bits &operator=(const bits &) = delete;
-public:
ParserClass &getParser() { return Parser; }
unsigned getPosition(unsigned optnum) const {
@@ -1627,14 +1645,17 @@ public:
class alias : public Option {
Option *AliasFor;
+
bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
StringRef Arg) override {
return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
}
+
bool addOccurrence(unsigned pos, StringRef /*ArgName*/, StringRef Value,
bool MultiArg = false) override {
return AliasFor->addOccurrence(pos, AliasFor->ArgStr, Value, MultiArg);
}
+
// Handle printing stuff...
size_t getOptionWidth() const override;
void printOptionInfo(size_t GlobalWidth) const override;
@@ -1656,11 +1677,11 @@ class alias : public Option {
addArgument();
}
+public:
// Command line options should not be copyable
alias(const alias &) = delete;
alias &operator=(const alias &) = delete;
-public:
void setAliasFor(Option &O) {
if (AliasFor)
error("cl::alias must only have one cl::aliasopt(...) specified!");
@@ -1678,7 +1699,9 @@ public:
// aliasfor - Modifier to set the option an alias aliases.
struct aliasopt {
Option &Opt;
+
explicit aliasopt(Option &O) : Opt(O) {}
+
void apply(alias &A) const { A.setAliasFor(Opt); }
};
@@ -1687,8 +1710,9 @@ struct aliasopt {
// printed to stderr at the end of the regular help, just before
// exit is called.
struct extrahelp {
- const char *morehelp;
- explicit extrahelp(const char *help);
+ StringRef morehelp;
+
+ explicit extrahelp(StringRef help);
};
void PrintVersionMessage();
@@ -1730,11 +1754,33 @@ void PrintHelpMessage(bool Hidden = false, bool Categorized = false);
/// the control of the client. The options should be modified before calling
/// llvm::cl::ParseCommandLineOptions().
///
-/// Hopefully this API can be depricated soon. Any situation where options need
+/// Hopefully this API can be deprecated soon. Any situation where options need
/// to be modified by tools or libraries should be handled by sane APIs rather
/// than just handing around a global list.
StringMap<Option *> &getRegisteredOptions(SubCommand &Sub = *TopLevelSubCommand);
+/// \brief Use this to get all registered SubCommands from the provided parser.
+///
+/// \return A range of all SubCommand pointers registered with the parser.
+///
+/// Typical usage:
+/// \code
+/// main(int argc, char* argv[]) {
+/// llvm::cl::ParseCommandLineOptions(argc, argv);
+/// for (auto* S : llvm::cl::getRegisteredSubcommands()) {
+/// if (*S) {
+/// std::cout << "Executing subcommand: " << S->getName() << std::endl;
+/// // Execute some function based on the name...
+/// }
+/// }
+/// }
+/// \endcode
+///
+/// This interface is useful for defining subcommands in libraries and
+/// the dispatch from a single point (like in the main function).
+iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
+getRegisteredSubcommands();
+
//===----------------------------------------------------------------------===//
// Standalone command line processing utilities.
//
@@ -1789,10 +1835,12 @@ typedef void (*TokenizerCallback)(StringRef Source, StringSaver &Saver,
/// \param [in,out] Argv Command line into which to expand response files.
/// \param [in] MarkEOLs Mark end of lines and the end of the response file
/// with nullptrs in the Argv vector.
+/// \param [in] RelativeNames true if names of nested response files must be
+/// resolved relative to including file.
/// \return true if all @files were expanded successfully or there were none.
bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &Argv,
- bool MarkEOLs = false);
+ bool MarkEOLs = false, bool RelativeNames = false);
/// \brief Mark all options not part of this category as cl::ReallyHidden.
///
@@ -1825,8 +1873,7 @@ void ResetAllOptionOccurrences();
/// where no options are supported.
void ResetCommandLineParser();
-} // End namespace cl
-
-} // End namespace llvm
+} // end namespace cl
+} // end namespace llvm
-#endif
+#endif // LLVM_SUPPORT_COMMANDLINE_H
diff --git a/include/llvm/Support/Compiler.h b/include/llvm/Support/Compiler.h
index fae0d8f4419e..55148a490c29 100644
--- a/include/llvm/Support/Compiler.h
+++ b/include/llvm/Support/Compiler.h
@@ -33,6 +33,10 @@
# define __has_attribute(x) 0
#endif
+#ifndef __has_cpp_attribute
+# define __has_cpp_attribute(x) 0
+#endif
+
#ifndef __has_builtin
# define __has_builtin(x) 0
#endif
@@ -56,26 +60,19 @@
/// \macro LLVM_MSC_PREREQ
/// \brief Is the compiler MSVC of at least the specified version?
/// The common \param version values to check for are:
-/// * 1800: Microsoft Visual Studio 2013 / 12.0
/// * 1900: Microsoft Visual Studio 2015 / 14.0
#ifdef _MSC_VER
#define LLVM_MSC_PREREQ(version) (_MSC_VER >= (version))
-// We require at least MSVC 2013.
-#if !LLVM_MSC_PREREQ(1800)
-#error LLVM requires at least MSVC 2013.
+// We require at least MSVC 2015.
+#if !LLVM_MSC_PREREQ(1900)
+#error LLVM requires at least MSVC 2015.
#endif
#else
#define LLVM_MSC_PREREQ(version) 0
#endif
-#if !defined(_MSC_VER) || defined(__clang__) || LLVM_MSC_PREREQ(1900)
-#define LLVM_NOEXCEPT noexcept
-#else
-#define LLVM_NOEXCEPT throw()
-#endif
-
/// \brief Does the compiler support ref-qualifiers for *this?
///
/// Sadly, this is separate from just rvalue reference support because GCC
@@ -96,12 +93,6 @@
#define LLVM_LVALUE_FUNCTION
#endif
-#if __has_feature(cxx_constexpr) || defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1900)
-# define LLVM_CONSTEXPR constexpr
-#else
-# define LLVM_CONSTEXPR
-#endif
-
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
@@ -114,6 +105,12 @@
#define LLVM_LIBRARY_VISIBILITY
#endif
+#if defined(__GNUC__)
+#define LLVM_PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
+#else
+#define LLVM_PREFETCH(addr, rw, locality)
+#endif
+
#if __has_attribute(sentinel) || LLVM_GNUC_PREREQ(3, 0, 0)
#define LLVM_END_WITH_NULL __attribute__((sentinel))
#else
@@ -126,12 +123,17 @@
#define LLVM_ATTRIBUTE_USED
#endif
-#if __has_attribute(warn_unused_result) || LLVM_GNUC_PREREQ(3, 4, 0)
-#define LLVM_ATTRIBUTE_UNUSED_RESULT __attribute__((__warn_unused_result__))
-#elif defined(_MSC_VER)
-#define LLVM_ATTRIBUTE_UNUSED_RESULT _Check_return_
+/// LLVM_NODISCARD - Warn if a type or return value is discarded.
+#if __cplusplus > 201402L && __has_cpp_attribute(nodiscard)
+#define LLVM_NODISCARD [[nodiscard]]
+#elif !__cplusplus
+// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
+// error when __has_cpp_attribute is given a scoped attribute in C mode.
+#define LLVM_NODISCARD
+#elif __has_cpp_attribute(clang::warn_unused_result)
+#define LLVM_NODISCARD [[clang::warn_unused_result]]
#else
-#define LLVM_ATTRIBUTE_UNUSED_RESULT
+#define LLVM_NODISCARD
#endif
// Some compilers warn about unused functions. When a function is sometimes
@@ -228,6 +230,19 @@
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
#endif
+/// LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
+#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
+#define LLVM_FALLTHROUGH [[fallthrough]]
+#elif !__cplusplus
+// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
+// error when __has_cpp_attribute is given a scoped attribute in C mode.
+#define LLVM_FALLTHROUGH
+#elif __has_cpp_attribute(clang::fallthrough)
+#define LLVM_FALLTHROUGH [[clang::fallthrough]]
+#else
+#define LLVM_FALLTHROUGH
+#endif
+
/// LLVM_EXTENSION - Support compilers where we have a keyword to suppress
/// pedantic diagnostics.
#ifdef __GNUC__
@@ -304,15 +319,8 @@
#endif
/// \macro LLVM_ALIGNAS
-/// \brief Used to specify a minimum alignment for a structure or variable. The
-/// alignment must be a constant integer. Use LLVM_PTR_SIZE to compute
-/// alignments in terms of the size of a pointer.
-///
-/// Note that __declspec(align) has special quirks, it's not legal to pass a
-/// structure with __declspec(align) as a formal parameter.
-#ifdef _MSC_VER
-# define LLVM_ALIGNAS(x) __declspec(align(x))
-#elif __GNUC__ && !__has_feature(cxx_alignas) && !LLVM_GNUC_PREREQ(4, 8, 0)
+/// \brief Used to specify a minimum alignment for a structure or variable.
+#if __GNUC__ && !__has_feature(cxx_alignas) && !LLVM_GNUC_PREREQ(4, 8, 1)
# define LLVM_ALIGNAS(x) __attribute__((aligned(x)))
#else
# define LLVM_ALIGNAS(x) alignas(x)
@@ -362,15 +370,6 @@
# define LLVM_PTR_SIZE sizeof(void *)
#endif
-/// \macro LLVM_FUNCTION_NAME
-/// \brief Expands to __func__ on compilers which support it. Otherwise,
-/// expands to a compiler-dependent replacement.
-#if defined(_MSC_VER)
-# define LLVM_FUNCTION_NAME __FUNCTION__
-#else
-# define LLVM_FUNCTION_NAME __func__
-#endif
-
/// \macro LLVM_MEMORY_SANITIZER_BUILD
/// \brief Whether LLVM itself is built with MemorySanitizer instrumentation.
#if __has_feature(memory_sanitizer)
@@ -405,12 +404,16 @@
// Thread Sanitizer is a tool that finds races in code.
// See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations .
// tsan detects these exact functions by name.
+#ifdef __cplusplus
extern "C" {
+#endif
void AnnotateHappensAfter(const char *file, int line, const volatile void *cv);
void AnnotateHappensBefore(const char *file, int line, const volatile void *cv);
void AnnotateIgnoreWritesBegin(const char *file, int line);
void AnnotateIgnoreWritesEnd(const char *file, int line);
+#ifdef __cplusplus
}
+#endif
// This marker is used to define a happens-before arc. The race detector will
// infer an arc from the begin to the end when they share the same pointer
@@ -449,6 +452,19 @@ void AnnotateIgnoreWritesEnd(const char *file, int line);
#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE
#endif
+/// \macro LLVM_PRETTY_FUNCTION
+/// \brief Gets a user-friendly looking function signature for the current scope
+/// using the best available method on each platform. The exact format of the
+/// resulting string is implementation specific and non-portable, so this should
+/// only be used, for example, for logging or diagnostics.
+#if defined(_MSC_VER)
+#define LLVM_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__) || defined(__clang__)
+#define LLVM_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define LLVM_PRETTY_FUNCTION __func__
+#endif
+
/// \macro LLVM_THREAD_LOCAL
/// \brief A thread-local storage specifier which can be used with globals,
/// extern globals, and static globals.
diff --git a/include/llvm/Support/Compression.h b/include/llvm/Support/Compression.h
index 28274d67aad2..5bf7031fe9f9 100644
--- a/include/llvm/Support/Compression.h
+++ b/include/llvm/Support/Compression.h
@@ -43,6 +43,9 @@ bool isAvailable();
Status compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer,
CompressionLevel Level = DefaultCompression);
+Status uncompress(StringRef InputBuffer, char *UncompressedBuffer,
+ size_t &UncompressedSize);
+
Status uncompress(StringRef InputBuffer,
SmallVectorImpl<char> &UncompressedBuffer,
size_t UncompressedSize);
diff --git a/include/llvm/Support/ConvertUTF.h b/include/llvm/Support/ConvertUTF.h
index 5de5774f9db5..f714c0ed997e 100644
--- a/include/llvm/Support/ConvertUTF.h
+++ b/include/llvm/Support/ConvertUTF.h
@@ -90,6 +90,14 @@
#ifndef LLVM_SUPPORT_CONVERTUTF_H
#define LLVM_SUPPORT_CONVERTUTF_H
+#include <string>
+#include <cstddef>
+
+// Wrap everything in namespace llvm so that programs can link with llvm and
+// their own version of the unicode libraries.
+
+namespace llvm {
+
/* ---------------------------------------------------------------------
The following 4 definitions are compiler-specific.
The C standard does not guarantee that wchar_t has at least
@@ -127,11 +135,6 @@ typedef enum {
lenientConversion
} ConversionFlags;
-/* This is for C++ and does no harm in C */
-#ifdef __cplusplus
-extern "C" {
-#endif
-
ConversionResult ConvertUTF8toUTF16 (
const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
@@ -174,16 +177,9 @@ Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
unsigned getNumBytesForUTF8(UTF8 firstByte);
-#ifdef __cplusplus
-}
-
/*************************************************************************/
/* Below are LLVM-specific wrappers of the functions above. */
-#include <string>
-#include <cstddef>
-
-namespace llvm {
template <typename T> class ArrayRef;
template <typename T> class SmallVectorImpl;
class StringRef;
@@ -293,7 +289,3 @@ bool convertUTF8ToUTF16String(StringRef SrcUTF8,
} /* end namespace llvm */
#endif
-
-/* --------------------------------------------------------------------- */
-
-#endif
diff --git a/include/llvm/Support/DataExtractor.h b/include/llvm/Support/DataExtractor.h
index 3ffa9bc90dd1..2d1180c228e3 100644
--- a/include/llvm/Support/DataExtractor.h
+++ b/include/llvm/Support/DataExtractor.h
@@ -29,7 +29,7 @@ public:
/// \brief Get the data pointed to by this extractor.
StringRef getData() const { return Data; }
- /// \brief Get the endianess for this extractor.
+ /// \brief Get the endianness for this extractor.
bool isLittleEndian() const { return IsLittleEndian; }
/// \brief Get the address size for this extractor.
uint8_t getAddressSize() const { return AddressSize; }
diff --git a/include/llvm/Support/DataStream.h b/include/llvm/Support/DataStream.h
deleted file mode 100644
index a544316f430d..000000000000
--- a/include/llvm/Support/DataStream.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===---- llvm/Support/DataStream.h - Lazy bitcode streaming ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header defines DataStreamer, which fetches bytes of data from
-// a stream source. It provides support for streaming (lazy reading) of
-// data, e.g. bitcode
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_SUPPORT_DATASTREAM_H
-#define LLVM_SUPPORT_DATASTREAM_H
-
-#include <memory>
-#include <string>
-
-namespace llvm {
-
-class DataStreamer {
-public:
- /// Fetch bytes [start-end) from the stream, and write them to the
- /// buffer pointed to by buf. Returns the number of bytes actually written.
- virtual size_t GetBytes(unsigned char *buf, size_t len) = 0;
-
- virtual ~DataStreamer();
-};
-
-std::unique_ptr<DataStreamer> getDataFileStreamer(const std::string &Filename,
- std::string *Err);
-}
-
-#endif // LLVM_SUPPORT_DATASTREAM_H_
diff --git a/include/llvm/Support/Debug.h b/include/llvm/Support/Debug.h
index 6e213477d710..3465c403361f 100644
--- a/include/llvm/Support/Debug.h
+++ b/include/llvm/Support/Debug.h
@@ -29,6 +29,7 @@
#define LLVM_SUPPORT_DEBUG_H
namespace llvm {
+
class raw_ostream;
#ifndef NDEBUG
@@ -50,6 +51,12 @@ bool isCurrentDebugType(const char *Type);
///
void setCurrentDebugType(const char *Type);
+/// setCurrentDebugTypes - Set the current debug type, as if the
+/// -debug-only=X,Y,Z option were specified. Note that DebugFlag
+/// also needs to be set to true for debug output to be produced.
+///
+void setCurrentDebugTypes(const char **Types, unsigned Count);
+
/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
/// information. In the '-debug' option is specified on the commandline, and if
/// this is a debug build, then the code specified as the option to the macro
@@ -61,12 +68,13 @@ void setCurrentDebugType(const char *Type);
/// is not specified, or is specified as "bitset".
#define DEBUG_WITH_TYPE(TYPE, X) \
do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(TYPE)) { X; } \
- } while (0)
+ } while (false)
#else
#define isCurrentDebugType(X) (false)
#define setCurrentDebugType(X)
-#define DEBUG_WITH_TYPE(TYPE, X) do { } while (0)
+#define setCurrentDebugTypes(X, N)
+#define DEBUG_WITH_TYPE(TYPE, X) do { } while (false)
#endif
/// EnableDebugBuffering - This defaults to false. If true, the debug
@@ -91,6 +99,6 @@ raw_ostream &dbgs();
//
#define DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_SUPPORT_DEBUG_H
diff --git a/include/llvm/Support/Dwarf.def b/include/llvm/Support/Dwarf.def
index b73f2aed5311..841fc7d4ae22 100644
--- a/include/llvm/Support/Dwarf.def
+++ b/include/llvm/Support/Dwarf.def
@@ -12,9 +12,14 @@
//===----------------------------------------------------------------------===//
// TODO: Add other DW-based macros.
-#if !(defined HANDLE_DW_TAG || defined HANDLE_DW_OP || \
+#if !(defined HANDLE_DW_TAG || defined HANDLE_DW_AT || \
+ defined HANDLE_DW_FORM || defined HANDLE_DW_OP || \
defined HANDLE_DW_LANG || defined HANDLE_DW_ATE || \
- defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_CC)
+ defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_DEFAULTED || \
+ defined HANDLE_DW_CC || defined HANDLE_DW_LNS || \
+ defined HANDLE_DW_LNE || defined HANDLE_DW_LNCT || \
+ defined HANDLE_DW_MACRO || defined HANDLE_DW_RLE || \
+ defined HANDLE_DW_CFA || defined HANDLE_DW_APPLE_PROPERTY)
#error "Missing macro definition of HANDLE_DW*"
#endif
@@ -22,6 +27,14 @@
#define HANDLE_DW_TAG(ID, NAME)
#endif
+#ifndef HANDLE_DW_AT
+#define HANDLE_DW_AT(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_FORM
+#define HANDLE_DW_FORM(ID, NAME)
+#endif
+
#ifndef HANDLE_DW_OP
#define HANDLE_DW_OP(ID, NAME)
#endif
@@ -38,11 +51,43 @@
#define HANDLE_DW_VIRTUALITY(ID, NAME)
#endif
+#ifndef HANDLE_DW_DEFAULTED
+#define HANDLE_DW_DEFAULTED(ID, NAME)
+#endif
+
#ifndef HANDLE_DW_CC
#define HANDLE_DW_CC(ID, NAME)
#endif
+#ifndef HANDLE_DW_LNS
+#define HANDLE_DW_LNS(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_LNE
+#define HANDLE_DW_LNE(ID, NAME)
+#endif
+#ifndef HANDLE_DW_LNCT
+#define HANDLE_DW_LNCT(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_MACRO
+#define HANDLE_DW_MACRO(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_RLE
+#define HANDLE_DW_RLE(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_CFA
+#define HANDLE_DW_CFA(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_APPLE_PROPERTY
+#define HANDLE_DW_APPLE_PROPERTY(ID, NAME)
+#endif
+
+HANDLE_DW_TAG(0x0000, null)
HANDLE_DW_TAG(0x0001, array_type)
HANDLE_DW_TAG(0x0002, class_type)
HANDLE_DW_TAG(0x0003, entry_point)
@@ -108,6 +153,11 @@ HANDLE_DW_TAG(0x0043, template_alias)
HANDLE_DW_TAG(0x0044, coarray_type)
HANDLE_DW_TAG(0x0045, generic_subrange)
HANDLE_DW_TAG(0x0046, dynamic_type)
+HANDLE_DW_TAG(0x0047, atomic_type)
+HANDLE_DW_TAG(0x0048, call_site)
+HANDLE_DW_TAG(0x0049, call_site_parameter)
+HANDLE_DW_TAG(0x004a, skeleton_unit)
+HANDLE_DW_TAG(0x004b, immutable_type)
// User-defined tags.
HANDLE_DW_TAG(0x4081, MIPS_loop)
@@ -124,6 +174,260 @@ HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array)
HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set)
HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant)
+// Attributes.
+HANDLE_DW_AT(0x01, sibling)
+HANDLE_DW_AT(0x02, location)
+HANDLE_DW_AT(0x03, name)
+HANDLE_DW_AT(0x09, ordering)
+HANDLE_DW_AT(0x0b, byte_size)
+HANDLE_DW_AT(0x0c, bit_offset)
+HANDLE_DW_AT(0x0d, bit_size)
+HANDLE_DW_AT(0x10, stmt_list)
+HANDLE_DW_AT(0x11, low_pc)
+HANDLE_DW_AT(0x12, high_pc)
+HANDLE_DW_AT(0x13, language)
+HANDLE_DW_AT(0x15, discr)
+HANDLE_DW_AT(0x16, discr_value)
+HANDLE_DW_AT(0x17, visibility)
+HANDLE_DW_AT(0x18, import)
+HANDLE_DW_AT(0x19, string_length)
+HANDLE_DW_AT(0x1a, common_reference)
+HANDLE_DW_AT(0x1b, comp_dir)
+HANDLE_DW_AT(0x1c, const_value)
+HANDLE_DW_AT(0x1d, containing_type)
+HANDLE_DW_AT(0x1e, default_value)
+HANDLE_DW_AT(0x20, inline)
+HANDLE_DW_AT(0x21, is_optional)
+HANDLE_DW_AT(0x22, lower_bound)
+HANDLE_DW_AT(0x25, producer)
+HANDLE_DW_AT(0x27, prototyped)
+HANDLE_DW_AT(0x2a, return_addr)
+HANDLE_DW_AT(0x2c, start_scope)
+HANDLE_DW_AT(0x2e, bit_stride)
+HANDLE_DW_AT(0x2f, upper_bound)
+HANDLE_DW_AT(0x31, abstract_origin)
+HANDLE_DW_AT(0x32, accessibility)
+HANDLE_DW_AT(0x33, address_class)
+HANDLE_DW_AT(0x34, artificial)
+HANDLE_DW_AT(0x35, base_types)
+HANDLE_DW_AT(0x36, calling_convention)
+HANDLE_DW_AT(0x37, count)
+HANDLE_DW_AT(0x38, data_member_location)
+HANDLE_DW_AT(0x39, decl_column)
+HANDLE_DW_AT(0x3a, decl_file)
+HANDLE_DW_AT(0x3b, decl_line)
+HANDLE_DW_AT(0x3c, declaration)
+HANDLE_DW_AT(0x3d, discr_list)
+HANDLE_DW_AT(0x3e, encoding)
+HANDLE_DW_AT(0x3f, external)
+HANDLE_DW_AT(0x40, frame_base)
+HANDLE_DW_AT(0x41, friend)
+HANDLE_DW_AT(0x42, identifier_case)
+HANDLE_DW_AT(0x43, macro_info)
+HANDLE_DW_AT(0x44, namelist_item)
+HANDLE_DW_AT(0x45, priority)
+HANDLE_DW_AT(0x46, segment)
+HANDLE_DW_AT(0x47, specification)
+HANDLE_DW_AT(0x48, static_link)
+HANDLE_DW_AT(0x49, type)
+HANDLE_DW_AT(0x4a, use_location)
+HANDLE_DW_AT(0x4b, variable_parameter)
+HANDLE_DW_AT(0x4c, virtuality)
+HANDLE_DW_AT(0x4d, vtable_elem_location)
+HANDLE_DW_AT(0x4e, allocated)
+HANDLE_DW_AT(0x4f, associated)
+HANDLE_DW_AT(0x50, data_location)
+HANDLE_DW_AT(0x51, byte_stride)
+HANDLE_DW_AT(0x52, entry_pc)
+HANDLE_DW_AT(0x53, use_UTF8)
+HANDLE_DW_AT(0x54, extension)
+HANDLE_DW_AT(0x55, ranges)
+HANDLE_DW_AT(0x56, trampoline)
+HANDLE_DW_AT(0x57, call_column)
+HANDLE_DW_AT(0x58, call_file)
+HANDLE_DW_AT(0x59, call_line)
+HANDLE_DW_AT(0x5a, description)
+HANDLE_DW_AT(0x5b, binary_scale)
+HANDLE_DW_AT(0x5c, decimal_scale)
+HANDLE_DW_AT(0x5d, small)
+HANDLE_DW_AT(0x5e, decimal_sign)
+HANDLE_DW_AT(0x5f, digit_count)
+HANDLE_DW_AT(0x60, picture_string)
+HANDLE_DW_AT(0x61, mutable)
+HANDLE_DW_AT(0x62, threads_scaled)
+HANDLE_DW_AT(0x63, explicit)
+HANDLE_DW_AT(0x64, object_pointer)
+HANDLE_DW_AT(0x65, endianity)
+HANDLE_DW_AT(0x66, elemental)
+HANDLE_DW_AT(0x67, pure)
+HANDLE_DW_AT(0x68, recursive)
+HANDLE_DW_AT(0x69, signature)
+HANDLE_DW_AT(0x6a, main_subprogram)
+HANDLE_DW_AT(0x6b, data_bit_offset)
+HANDLE_DW_AT(0x6c, const_expr)
+HANDLE_DW_AT(0x6d, enum_class)
+HANDLE_DW_AT(0x6e, linkage_name)
+
+// New in DWARF 5:
+HANDLE_DW_AT(0x6f, string_length_bit_size)
+HANDLE_DW_AT(0x70, string_length_byte_size)
+HANDLE_DW_AT(0x71, rank)
+HANDLE_DW_AT(0x72, str_offsets_base)
+HANDLE_DW_AT(0x73, addr_base)
+HANDLE_DW_AT(0x74, rnglists_base)
+HANDLE_DW_AT(0x75, dwo_id) ///< Retracted from DWARF 5.
+HANDLE_DW_AT(0x76, dwo_name)
+HANDLE_DW_AT(0x77, reference)
+HANDLE_DW_AT(0x78, rvalue_reference)
+HANDLE_DW_AT(0x79, macros)
+HANDLE_DW_AT(0x7a, call_all_calls)
+HANDLE_DW_AT(0x7b, call_all_source_calls)
+HANDLE_DW_AT(0x7c, call_all_tail_calls)
+HANDLE_DW_AT(0x7d, call_return_pc)
+HANDLE_DW_AT(0x7e, call_value)
+HANDLE_DW_AT(0x7f, call_origin)
+HANDLE_DW_AT(0x80, call_parameter)
+HANDLE_DW_AT(0x81, call_pc)
+HANDLE_DW_AT(0x82, call_tail_call)
+HANDLE_DW_AT(0x83, call_target)
+HANDLE_DW_AT(0x84, call_target_clobbered)
+HANDLE_DW_AT(0x85, call_data_location)
+HANDLE_DW_AT(0x86, call_data_value)
+HANDLE_DW_AT(0x87, noreturn)
+HANDLE_DW_AT(0x88, alignment)
+HANDLE_DW_AT(0x89, export_symbols)
+HANDLE_DW_AT(0x8a, deleted)
+HANDLE_DW_AT(0x8b, defaulted)
+HANDLE_DW_AT(0x8c, loclists_base)
+
+HANDLE_DW_AT(0x2002, MIPS_loop_begin)
+HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin)
+HANDLE_DW_AT(0x2004, MIPS_epilog_begin)
+HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor)
+HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth)
+HANDLE_DW_AT(0x2007, MIPS_linkage_name)
+HANDLE_DW_AT(0x2008, MIPS_stride)
+HANDLE_DW_AT(0x2009, MIPS_abstract_name)
+HANDLE_DW_AT(0x200a, MIPS_clone_origin)
+HANDLE_DW_AT(0x200b, MIPS_has_inlines)
+HANDLE_DW_AT(0x200c, MIPS_stride_byte)
+HANDLE_DW_AT(0x200d, MIPS_stride_elem)
+HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype)
+HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype)
+HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype)
+
+// This one appears to have only been implemented by Open64 for
+// fortran and may conflict with other extensions.
+HANDLE_DW_AT(0x2011, MIPS_assumed_size)
+
+// GNU extensions
+HANDLE_DW_AT(0x2101, sf_names)
+HANDLE_DW_AT(0x2102, src_info)
+HANDLE_DW_AT(0x2103, mac_info)
+HANDLE_DW_AT(0x2104, src_coords)
+HANDLE_DW_AT(0x2105, body_begin)
+HANDLE_DW_AT(0x2106, body_end)
+HANDLE_DW_AT(0x2107, GNU_vector)
+HANDLE_DW_AT(0x2110, GNU_template_name)
+
+HANDLE_DW_AT(0x210f, GNU_odr_signature)
+HANDLE_DW_AT(0x2119, GNU_macros)
+
+// Extensions for Fission proposal.
+HANDLE_DW_AT(0x2130, GNU_dwo_name)
+HANDLE_DW_AT(0x2131, GNU_dwo_id)
+HANDLE_DW_AT(0x2132, GNU_ranges_base)
+HANDLE_DW_AT(0x2133, GNU_addr_base)
+HANDLE_DW_AT(0x2134, GNU_pubnames)
+HANDLE_DW_AT(0x2135, GNU_pubtypes)
+HANDLE_DW_AT(0x2136, GNU_discriminator)
+
+// Borland extensions.
+HANDLE_DW_AT(0x3b11, BORLAND_property_read)
+HANDLE_DW_AT(0x3b12, BORLAND_property_write)
+HANDLE_DW_AT(0x3b13, BORLAND_property_implements)
+HANDLE_DW_AT(0x3b14, BORLAND_property_index)
+HANDLE_DW_AT(0x3b15, BORLAND_property_default)
+HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit)
+HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class)
+HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record)
+HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass)
+HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor)
+HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor)
+HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method)
+HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface)
+HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI)
+HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return)
+HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr)
+HANDLE_DW_AT(0x3b31, BORLAND_closure)
+
+// LLVM project extensions.
+HANDLE_DW_AT(0x3e00, LLVM_include_path)
+HANDLE_DW_AT(0x3e01, LLVM_config_macros)
+HANDLE_DW_AT(0x3e02, LLVM_isysroot)
+
+// Apple extensions.
+HANDLE_DW_AT(0x3fe1, APPLE_optimized)
+HANDLE_DW_AT(0x3fe2, APPLE_flags)
+HANDLE_DW_AT(0x3fe3, APPLE_isa)
+HANDLE_DW_AT(0x3fe4, APPLE_block)
+HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers)
+HANDLE_DW_AT(0x3fe6, APPLE_runtime_class)
+HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr)
+HANDLE_DW_AT(0x3fe8, APPLE_property_name)
+HANDLE_DW_AT(0x3fe9, APPLE_property_getter)
+HANDLE_DW_AT(0x3fea, APPLE_property_setter)
+HANDLE_DW_AT(0x3feb, APPLE_property_attribute)
+HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type)
+HANDLE_DW_AT(0x3fed, APPLE_property)
+
+// Attribute form encodings.
+HANDLE_DW_FORM(0x01, addr)
+HANDLE_DW_FORM(0x03, block2)
+HANDLE_DW_FORM(0x04, block4)
+HANDLE_DW_FORM(0x05, data2)
+HANDLE_DW_FORM(0x06, data4)
+HANDLE_DW_FORM(0x07, data8)
+HANDLE_DW_FORM(0x08, string)
+HANDLE_DW_FORM(0x09, block)
+HANDLE_DW_FORM(0x0a, block1)
+HANDLE_DW_FORM(0x0b, data1)
+HANDLE_DW_FORM(0x0c, flag)
+HANDLE_DW_FORM(0x0d, sdata)
+HANDLE_DW_FORM(0x0e, strp)
+HANDLE_DW_FORM(0x0f, udata)
+HANDLE_DW_FORM(0x10, ref_addr)
+HANDLE_DW_FORM(0x11, ref1)
+HANDLE_DW_FORM(0x12, ref2)
+HANDLE_DW_FORM(0x13, ref4)
+HANDLE_DW_FORM(0x14, ref8)
+HANDLE_DW_FORM(0x15, ref_udata)
+HANDLE_DW_FORM(0x16, indirect)
+HANDLE_DW_FORM(0x17, sec_offset)
+HANDLE_DW_FORM(0x18, exprloc)
+HANDLE_DW_FORM(0x19, flag_present)
+
+// New in DWARF v5.
+HANDLE_DW_FORM(0x1a, strx)
+HANDLE_DW_FORM(0x1b, addrx)
+HANDLE_DW_FORM(0x1c, ref_sup)
+HANDLE_DW_FORM(0x1d, strp_sup)
+HANDLE_DW_FORM(0x1e, data16)
+HANDLE_DW_FORM(0x1f, line_strp)
+HANDLE_DW_FORM(0x20, ref_sig8)
+HANDLE_DW_FORM(0x21, implicit_const)
+HANDLE_DW_FORM(0x22, loclistx)
+HANDLE_DW_FORM(0x23, rnglistx)
+
+// Extensions for Fission proposal
+HANDLE_DW_FORM(0x1f01, GNU_addr_index)
+HANDLE_DW_FORM(0x1f02, GNU_str_index)
+
+// Alternate debug sections proposal (output of "dwz" tool).
+HANDLE_DW_FORM(0x1f20, GNU_ref_alt)
+HANDLE_DW_FORM(0x1f21, GNU_strp_alt)
+
+// DWARF Expression operators.
HANDLE_DW_OP(0x03, addr)
HANDLE_DW_OP(0x06, deref)
HANDLE_DW_OP(0x08, const1u)
@@ -151,7 +455,7 @@ HANDLE_DW_OP(0x1d, mod)
HANDLE_DW_OP(0x1e, mul)
HANDLE_DW_OP(0x1f, neg)
HANDLE_DW_OP(0x20, not)
-HANDLE_DW_OP(0x21, or )
+HANDLE_DW_OP(0x21, or)
HANDLE_DW_OP(0x22, plus)
HANDLE_DW_OP(0x23, plus_uconst)
HANDLE_DW_OP(0x24, shl)
@@ -278,7 +582,18 @@ HANDLE_DW_OP(0x9c, call_frame_cfa)
HANDLE_DW_OP(0x9d, bit_piece)
HANDLE_DW_OP(0x9e, implicit_value)
HANDLE_DW_OP(0x9f, stack_value)
+HANDLE_DW_OP(0xa0, implicit_pointer)
+HANDLE_DW_OP(0xa1, addrx)
+HANDLE_DW_OP(0xa2, constx)
+HANDLE_DW_OP(0xa3, entry_value)
+HANDLE_DW_OP(0xa4, const_type)
+HANDLE_DW_OP(0xa5, regval_type)
+HANDLE_DW_OP(0xa6, deref_type)
+HANDLE_DW_OP(0xa7, xderef_type)
+HANDLE_DW_OP(0xa8, convert)
+HANDLE_DW_OP(0xa9, reinterpret)
+// Vendor extensions.
// Extensions for GNU-style thread-local storage.
HANDLE_DW_OP(0xe0, GNU_push_tls_address)
@@ -324,6 +639,9 @@ HANDLE_DW_LANG(0x0020, Dylan)
HANDLE_DW_LANG(0x0021, C_plus_plus_14)
HANDLE_DW_LANG(0x0022, Fortran03)
HANDLE_DW_LANG(0x0023, Fortran08)
+HANDLE_DW_LANG(0x0024, RenderScript)
+
+// Vendor extensions.
HANDLE_DW_LANG(0x8001, Mips_Assembler)
HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript)
HANDLE_DW_LANG(0xb000, BORLAND_Delphi)
@@ -345,16 +663,25 @@ HANDLE_DW_ATE(0x0d, signed_fixed)
HANDLE_DW_ATE(0x0e, unsigned_fixed)
HANDLE_DW_ATE(0x0f, decimal_float)
HANDLE_DW_ATE(0x10, UTF)
+HANDLE_DW_ATE(0x11, UCS)
+HANDLE_DW_ATE(0x12, ASCII)
// DWARF virtuality codes.
HANDLE_DW_VIRTUALITY(0x00, none)
HANDLE_DW_VIRTUALITY(0x01, virtual)
HANDLE_DW_VIRTUALITY(0x02, pure_virtual)
+// DWARF v5 Defaulted Member Encodings.
+HANDLE_DW_DEFAULTED(0x00, no)
+HANDLE_DW_DEFAULTED(0x01, in_class)
+HANDLE_DW_DEFAULTED(0x02, out_of_class)
+
// DWARF calling convention codes.
HANDLE_DW_CC(0x01, normal)
HANDLE_DW_CC(0x02, program)
HANDLE_DW_CC(0x03, nocall)
+HANDLE_DW_CC(0x04, pass_by_reference)
+HANDLE_DW_CC(0x05, pass_by_value)
HANDLE_DW_CC(0x41, GNU_borland_fastcall_i386)
HANDLE_DW_CC(0xb0, BORLAND_safecall)
HANDLE_DW_CC(0xb1, BORLAND_stdcall)
@@ -365,9 +692,120 @@ HANDLE_DW_CC(0xb5, BORLAND_thiscall)
HANDLE_DW_CC(0xb6, BORLAND_fastcall)
HANDLE_DW_CC(0xc0, LLVM_vectorcall)
+// Line Number Extended Opcode Encodings
+HANDLE_DW_LNE(0x01, end_sequence)
+HANDLE_DW_LNE(0x02, set_address)
+HANDLE_DW_LNE(0x03, define_file)
+HANDLE_DW_LNE(0x04, set_discriminator)
+
+// Line Number Standard Opcode Encodings.
+HANDLE_DW_LNS(0x00, extended_op)
+HANDLE_DW_LNS(0x01, copy)
+HANDLE_DW_LNS(0x02, advance_pc)
+HANDLE_DW_LNS(0x03, advance_line)
+HANDLE_DW_LNS(0x04, set_file)
+HANDLE_DW_LNS(0x05, set_column)
+HANDLE_DW_LNS(0x06, negate_stmt)
+HANDLE_DW_LNS(0x07, set_basic_block)
+HANDLE_DW_LNS(0x08, const_add_pc)
+HANDLE_DW_LNS(0x09, fixed_advance_pc)
+HANDLE_DW_LNS(0x0a, set_prologue_end)
+HANDLE_DW_LNS(0x0b, set_epilogue_begin)
+HANDLE_DW_LNS(0x0c, set_isa)
+
+// DWARF v5 Line number header entry format.
+HANDLE_DW_LNCT(0x01, path)
+HANDLE_DW_LNCT(0x02, directory_index)
+HANDLE_DW_LNCT(0x03, timestamp)
+HANDLE_DW_LNCT(0x04, size)
+HANDLE_DW_LNCT(0x05, MD5)
+
+HANDLE_DW_MACRO(0x01, define)
+HANDLE_DW_MACRO(0x02, undef)
+HANDLE_DW_MACRO(0x03, start_file)
+HANDLE_DW_MACRO(0x04, end_file)
+HANDLE_DW_MACRO(0x05, define_strp)
+HANDLE_DW_MACRO(0x06, undef_strp)
+HANDLE_DW_MACRO(0x07, import)
+HANDLE_DW_MACRO(0x08, define_sup)
+HANDLE_DW_MACRO(0x09, undef_sup)
+HANDLE_DW_MACRO(0x0a, import_sup)
+HANDLE_DW_MACRO(0x0b, define_strx)
+HANDLE_DW_MACRO(0x0c, undef_strx)
+
+// Range list entry encoding values.
+HANDLE_DW_RLE(0x00, end_of_list)
+HANDLE_DW_RLE(0x01, base_addressx)
+HANDLE_DW_RLE(0x02, startx_endx)
+HANDLE_DW_RLE(0x03, startx_length)
+HANDLE_DW_RLE(0x04, offset_pair)
+HANDLE_DW_RLE(0x05, base_address)
+HANDLE_DW_RLE(0x06, start_end)
+HANDLE_DW_RLE(0x07, start_length)
+
+// Call frame instruction encodings.
+HANDLE_DW_CFA(0x00, nop)
+HANDLE_DW_CFA(0x40, advance_loc)
+HANDLE_DW_CFA(0x80, offset)
+HANDLE_DW_CFA(0xc0, restore)
+HANDLE_DW_CFA(0x01, set_loc)
+HANDLE_DW_CFA(0x02, advance_loc1)
+HANDLE_DW_CFA(0x03, advance_loc2)
+HANDLE_DW_CFA(0x04, advance_loc4)
+HANDLE_DW_CFA(0x05, offset_extended)
+HANDLE_DW_CFA(0x06, restore_extended)
+HANDLE_DW_CFA(0x07, undefined)
+HANDLE_DW_CFA(0x08, same_value)
+HANDLE_DW_CFA(0x09, register)
+HANDLE_DW_CFA(0x0a, remember_state)
+HANDLE_DW_CFA(0x0b, restore_state)
+HANDLE_DW_CFA(0x0c, def_cfa)
+HANDLE_DW_CFA(0x0d, def_cfa_register)
+HANDLE_DW_CFA(0x0e, def_cfa_offset)
+HANDLE_DW_CFA(0x0f, def_cfa_expression)
+HANDLE_DW_CFA(0x10, expression)
+HANDLE_DW_CFA(0x11, offset_extended_sf)
+HANDLE_DW_CFA(0x12, def_cfa_sf)
+HANDLE_DW_CFA(0x13, def_cfa_offset_sf)
+HANDLE_DW_CFA(0x14, val_offset)
+HANDLE_DW_CFA(0x15, val_offset_sf)
+HANDLE_DW_CFA(0x16, val_expression)
+HANDLE_DW_CFA(0x1d, MIPS_advance_loc8)
+HANDLE_DW_CFA(0x2d, GNU_window_save)
+HANDLE_DW_CFA(0x2e, GNU_args_size)
+
+// Apple Objective-C Property Attributes.
+// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind!
+HANDLE_DW_APPLE_PROPERTY(0x01, readonly)
+HANDLE_DW_APPLE_PROPERTY(0x02, getter)
+HANDLE_DW_APPLE_PROPERTY(0x04, assign)
+HANDLE_DW_APPLE_PROPERTY(0x08, readwrite)
+HANDLE_DW_APPLE_PROPERTY(0x10, retain)
+HANDLE_DW_APPLE_PROPERTY(0x20, copy)
+HANDLE_DW_APPLE_PROPERTY(0x40, nonatomic)
+HANDLE_DW_APPLE_PROPERTY(0x80, setter)
+HANDLE_DW_APPLE_PROPERTY(0x100, atomic)
+HANDLE_DW_APPLE_PROPERTY(0x200, weak)
+HANDLE_DW_APPLE_PROPERTY(0x400, strong)
+HANDLE_DW_APPLE_PROPERTY(0x800, unsafe_unretained)
+HANDLE_DW_APPLE_PROPERTY(0x1000, nullability)
+HANDLE_DW_APPLE_PROPERTY(0x2000, null_resettable)
+HANDLE_DW_APPLE_PROPERTY(0x4000, class)
+
+
#undef HANDLE_DW_TAG
+#undef HANDLE_DW_AT
+#undef HANDLE_DW_FORM
#undef HANDLE_DW_OP
#undef HANDLE_DW_LANG
#undef HANDLE_DW_ATE
#undef HANDLE_DW_VIRTUALITY
+#undef HANDLE_DW_DEFAULTED
#undef HANDLE_DW_CC
+#undef HANDLE_DW_LNS
+#undef HANDLE_DW_LNE
+#undef HANDLE_DW_LNCT
+#undef HANDLE_DW_MACRO
+#undef HANDLE_DW_RLE
+#undef HANDLE_DW_CFA
+#undef HANDLE_DW_APPLE_PROPERTY
diff --git a/include/llvm/Support/Dwarf.h b/include/llvm/Support/Dwarf.h
index 86b19676c345..1a984037da09 100644
--- a/include/llvm/Support/Dwarf.h
+++ b/include/llvm/Support/Dwarf.h
@@ -90,241 +90,26 @@ inline bool isType(Tag T) {
}
}
+/// Attributes.
enum Attribute : uint16_t {
- // Attributes
- DW_AT_sibling = 0x01,
- DW_AT_location = 0x02,
- DW_AT_name = 0x03,
- DW_AT_ordering = 0x09,
- DW_AT_byte_size = 0x0b,
- DW_AT_bit_offset = 0x0c,
- DW_AT_bit_size = 0x0d,
- DW_AT_stmt_list = 0x10,
- DW_AT_low_pc = 0x11,
- DW_AT_high_pc = 0x12,
- DW_AT_language = 0x13,
- DW_AT_discr = 0x15,
- DW_AT_discr_value = 0x16,
- DW_AT_visibility = 0x17,
- DW_AT_import = 0x18,
- DW_AT_string_length = 0x19,
- DW_AT_common_reference = 0x1a,
- DW_AT_comp_dir = 0x1b,
- DW_AT_const_value = 0x1c,
- DW_AT_containing_type = 0x1d,
- DW_AT_default_value = 0x1e,
- DW_AT_inline = 0x20,
- DW_AT_is_optional = 0x21,
- DW_AT_lower_bound = 0x22,
- DW_AT_producer = 0x25,
- DW_AT_prototyped = 0x27,
- DW_AT_return_addr = 0x2a,
- DW_AT_start_scope = 0x2c,
- DW_AT_bit_stride = 0x2e,
- DW_AT_upper_bound = 0x2f,
- DW_AT_abstract_origin = 0x31,
- DW_AT_accessibility = 0x32,
- DW_AT_address_class = 0x33,
- DW_AT_artificial = 0x34,
- DW_AT_base_types = 0x35,
- DW_AT_calling_convention = 0x36,
- DW_AT_count = 0x37,
- DW_AT_data_member_location = 0x38,
- DW_AT_decl_column = 0x39,
- DW_AT_decl_file = 0x3a,
- DW_AT_decl_line = 0x3b,
- DW_AT_declaration = 0x3c,
- DW_AT_discr_list = 0x3d,
- DW_AT_encoding = 0x3e,
- DW_AT_external = 0x3f,
- DW_AT_frame_base = 0x40,
- DW_AT_friend = 0x41,
- DW_AT_identifier_case = 0x42,
- DW_AT_macro_info = 0x43,
- DW_AT_namelist_item = 0x44,
- DW_AT_priority = 0x45,
- DW_AT_segment = 0x46,
- DW_AT_specification = 0x47,
- DW_AT_static_link = 0x48,
- DW_AT_type = 0x49,
- DW_AT_use_location = 0x4a,
- DW_AT_variable_parameter = 0x4b,
- DW_AT_virtuality = 0x4c,
- DW_AT_vtable_elem_location = 0x4d,
- DW_AT_allocated = 0x4e,
- DW_AT_associated = 0x4f,
- DW_AT_data_location = 0x50,
- DW_AT_byte_stride = 0x51,
- DW_AT_entry_pc = 0x52,
- DW_AT_use_UTF8 = 0x53,
- DW_AT_extension = 0x54,
- DW_AT_ranges = 0x55,
- DW_AT_trampoline = 0x56,
- DW_AT_call_column = 0x57,
- DW_AT_call_file = 0x58,
- DW_AT_call_line = 0x59,
- DW_AT_description = 0x5a,
- DW_AT_binary_scale = 0x5b,
- DW_AT_decimal_scale = 0x5c,
- DW_AT_small = 0x5d,
- DW_AT_decimal_sign = 0x5e,
- DW_AT_digit_count = 0x5f,
- DW_AT_picture_string = 0x60,
- DW_AT_mutable = 0x61,
- DW_AT_threads_scaled = 0x62,
- DW_AT_explicit = 0x63,
- DW_AT_object_pointer = 0x64,
- DW_AT_endianity = 0x65,
- DW_AT_elemental = 0x66,
- DW_AT_pure = 0x67,
- DW_AT_recursive = 0x68,
- DW_AT_signature = 0x69,
- DW_AT_main_subprogram = 0x6a,
- DW_AT_data_bit_offset = 0x6b,
- DW_AT_const_expr = 0x6c,
- DW_AT_enum_class = 0x6d,
- DW_AT_linkage_name = 0x6e,
-
- // New in DWARF 5:
- DW_AT_string_length_bit_size = 0x6f,
- DW_AT_string_length_byte_size = 0x70,
- DW_AT_rank = 0x71,
- DW_AT_str_offsets_base = 0x72,
- DW_AT_addr_base = 0x73,
- DW_AT_ranges_base = 0x74,
- DW_AT_dwo_id = 0x75,
- DW_AT_dwo_name = 0x76,
- DW_AT_reference = 0x77,
- DW_AT_rvalue_reference = 0x78,
- DW_AT_macros = 0x79,
-
+#define HANDLE_DW_AT(ID, NAME) DW_AT_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
DW_AT_lo_user = 0x2000,
DW_AT_hi_user = 0x3fff,
-
- DW_AT_MIPS_loop_begin = 0x2002,
- DW_AT_MIPS_tail_loop_begin = 0x2003,
- DW_AT_MIPS_epilog_begin = 0x2004,
- DW_AT_MIPS_loop_unroll_factor = 0x2005,
- DW_AT_MIPS_software_pipeline_depth = 0x2006,
- DW_AT_MIPS_linkage_name = 0x2007,
- DW_AT_MIPS_stride = 0x2008,
- DW_AT_MIPS_abstract_name = 0x2009,
- DW_AT_MIPS_clone_origin = 0x200a,
- DW_AT_MIPS_has_inlines = 0x200b,
- DW_AT_MIPS_stride_byte = 0x200c,
- DW_AT_MIPS_stride_elem = 0x200d,
- DW_AT_MIPS_ptr_dopetype = 0x200e,
- DW_AT_MIPS_allocatable_dopetype = 0x200f,
- DW_AT_MIPS_assumed_shape_dopetype = 0x2010,
-
- // This one appears to have only been implemented by Open64 for
- // fortran and may conflict with other extensions.
- DW_AT_MIPS_assumed_size = 0x2011,
-
- // GNU extensions
- DW_AT_sf_names = 0x2101,
- DW_AT_src_info = 0x2102,
- DW_AT_mac_info = 0x2103,
- DW_AT_src_coords = 0x2104,
- DW_AT_body_begin = 0x2105,
- DW_AT_body_end = 0x2106,
- DW_AT_GNU_vector = 0x2107,
- DW_AT_GNU_template_name = 0x2110,
-
- DW_AT_GNU_odr_signature = 0x210f,
- DW_AT_GNU_macros = 0x2119,
-
- // Extensions for Fission proposal.
- DW_AT_GNU_dwo_name = 0x2130,
- DW_AT_GNU_dwo_id = 0x2131,
- DW_AT_GNU_ranges_base = 0x2132,
- DW_AT_GNU_addr_base = 0x2133,
- DW_AT_GNU_pubnames = 0x2134,
- DW_AT_GNU_pubtypes = 0x2135,
- DW_AT_GNU_discriminator = 0x2136,
-
- // Borland extensions.
- DW_AT_BORLAND_property_read = 0x3b11,
- DW_AT_BORLAND_property_write = 0x3b12,
- DW_AT_BORLAND_property_implements = 0x3b13,
- DW_AT_BORLAND_property_index = 0x3b14,
- DW_AT_BORLAND_property_default = 0x3b15,
- DW_AT_BORLAND_Delphi_unit = 0x3b20,
- DW_AT_BORLAND_Delphi_class = 0x3b21,
- DW_AT_BORLAND_Delphi_record = 0x3b22,
- DW_AT_BORLAND_Delphi_metaclass = 0x3b23,
- DW_AT_BORLAND_Delphi_constructor = 0x3b24,
- DW_AT_BORLAND_Delphi_destructor = 0x3b25,
- DW_AT_BORLAND_Delphi_anonymous_method = 0x3b26,
- DW_AT_BORLAND_Delphi_interface = 0x3b27,
- DW_AT_BORLAND_Delphi_ABI = 0x3b28,
- DW_AT_BORLAND_Delphi_return = 0x3b29,
- DW_AT_BORLAND_Delphi_frameptr = 0x3b30,
- DW_AT_BORLAND_closure = 0x3b31,
-
- // LLVM project extensions.
- DW_AT_LLVM_include_path = 0x3e00,
- DW_AT_LLVM_config_macros = 0x3e01,
- DW_AT_LLVM_isysroot = 0x3e02,
-
- // Apple extensions.
- DW_AT_APPLE_optimized = 0x3fe1,
- DW_AT_APPLE_flags = 0x3fe2,
- DW_AT_APPLE_isa = 0x3fe3,
- DW_AT_APPLE_block = 0x3fe4,
- DW_AT_APPLE_major_runtime_vers = 0x3fe5,
- DW_AT_APPLE_runtime_class = 0x3fe6,
- DW_AT_APPLE_omit_frame_ptr = 0x3fe7,
- DW_AT_APPLE_property_name = 0x3fe8,
- DW_AT_APPLE_property_getter = 0x3fe9,
- DW_AT_APPLE_property_setter = 0x3fea,
- DW_AT_APPLE_property_attribute = 0x3feb,
- DW_AT_APPLE_objc_complete_type = 0x3fec,
- DW_AT_APPLE_property = 0x3fed
};
enum Form : uint16_t {
- // Attribute form encodings
- DW_FORM_addr = 0x01,
- DW_FORM_block2 = 0x03,
- DW_FORM_block4 = 0x04,
- DW_FORM_data2 = 0x05,
- DW_FORM_data4 = 0x06,
- DW_FORM_data8 = 0x07,
- DW_FORM_string = 0x08,
- DW_FORM_block = 0x09,
- DW_FORM_block1 = 0x0a,
- DW_FORM_data1 = 0x0b,
- DW_FORM_flag = 0x0c,
- DW_FORM_sdata = 0x0d,
- DW_FORM_strp = 0x0e,
- DW_FORM_udata = 0x0f,
- DW_FORM_ref_addr = 0x10,
- DW_FORM_ref1 = 0x11,
- DW_FORM_ref2 = 0x12,
- DW_FORM_ref4 = 0x13,
- DW_FORM_ref8 = 0x14,
- DW_FORM_ref_udata = 0x15,
- DW_FORM_indirect = 0x16,
- DW_FORM_sec_offset = 0x17,
- DW_FORM_exprloc = 0x18,
- DW_FORM_flag_present = 0x19,
- DW_FORM_ref_sig8 = 0x20,
-
- // Extensions for Fission proposal
- DW_FORM_GNU_addr_index = 0x1f01,
- DW_FORM_GNU_str_index = 0x1f02,
-
- // Alternate debug sections proposal (output of "dwz" tool).
- DW_FORM_GNU_ref_alt = 0x1f20,
- DW_FORM_GNU_strp_alt = 0x1f21
+#define HANDLE_DW_FORM(ID, NAME) DW_FORM_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_FORM_lo_user = 0x1f00, ///< Not specified by DWARF.
};
enum LocationAtom {
#define HANDLE_DW_OP(ID, NAME) DW_OP_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_OP_lo_user = 0xe0,
- DW_OP_hi_user = 0xff
+ DW_OP_hi_user = 0xff,
+ DW_OP_LLVM_fragment = 0x1000 ///< Only used in LLVM metadata.
};
enum TypeKind {
@@ -372,6 +157,12 @@ enum VirtualityAttribute {
DW_VIRTUALITY_max = 0x02
};
+enum DefaultedMemberAttribute {
+#define HANDLE_DW_DEFAULTED(ID, NAME) DW_DEFAULTED_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_DEFAULTED_max = 0x02
+};
+
enum SourceLanguage {
#define HANDLE_DW_LANG(ID, NAME) DW_LANG_##NAME = ID,
#include "llvm/Support/Dwarf.def"
@@ -415,33 +206,27 @@ enum DiscriminantList {
DW_DSC_range = 0x01
};
+/// Line Number Standard Opcode Encodings.
enum LineNumberOps {
- // Line Number Standard Opcode Encodings
- DW_LNS_extended_op = 0x00,
- DW_LNS_copy = 0x01,
- DW_LNS_advance_pc = 0x02,
- DW_LNS_advance_line = 0x03,
- DW_LNS_set_file = 0x04,
- DW_LNS_set_column = 0x05,
- DW_LNS_negate_stmt = 0x06,
- DW_LNS_set_basic_block = 0x07,
- DW_LNS_const_add_pc = 0x08,
- DW_LNS_fixed_advance_pc = 0x09,
- DW_LNS_set_prologue_end = 0x0a,
- DW_LNS_set_epilogue_begin = 0x0b,
- DW_LNS_set_isa = 0x0c
+#define HANDLE_DW_LNS(ID, NAME) DW_LNS_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
};
+/// Line Number Extended Opcode Encodings.
enum LineNumberExtendedOps {
- // Line Number Extended Opcode Encodings
- DW_LNE_end_sequence = 0x01,
- DW_LNE_set_address = 0x02,
- DW_LNE_define_file = 0x03,
- DW_LNE_set_discriminator = 0x04,
+#define HANDLE_DW_LNE(ID, NAME) DW_LNE_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
DW_LNE_lo_user = 0x80,
DW_LNE_hi_user = 0xff
};
+enum LinerNumberEntryFormat {
+#define HANDLE_DW_LNCT(ID, NAME) DW_DEFAULTED_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_LNCT_lo_user = 0x2000,
+ DW_LNCT_hi_user = 0x3fff,
+};
+
enum MacinfoRecordType {
// Macinfo Type Encodings
DW_MACINFO_define = 0x01,
@@ -451,56 +236,27 @@ enum MacinfoRecordType {
DW_MACINFO_vendor_ext = 0xff
};
+/// DWARF v5 macro information entry type encodings.
enum MacroEntryType {
- // Macro Information Entry Type Encodings
- DW_MACRO_define = 0x01,
- DW_MACRO_undef = 0x02,
- DW_MACRO_start_file = 0x03,
- DW_MACRO_end_file = 0x04,
- DW_MACRO_define_indirect = 0x05,
- DW_MACRO_undef_indirect = 0x06,
- DW_MACRO_transparent_include = 0x07,
- DW_MACRO_define_indirect_sup = 0x08,
- DW_MACRO_undef_indirect_sup = 0x09,
- DW_MACRO_transparent_include_sup = 0x0a,
- DW_MACRO_define_indirectx = 0x0b,
- DW_MACRO_undef_indirectx = 0x0c,
+#define HANDLE_DW_MACRO(ID, NAME) DW_MACRO_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
DW_MACRO_lo_user = 0xe0,
DW_MACRO_hi_user = 0xff
};
+/// DWARF v5 range list entry encoding values.
+enum RangeListEntries {
+#define HANDLE_DW_RLE(ID, NAME) DW_RLE_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+};
+
+
+/// Call frame instruction encodings.
enum CallFrameInfo {
- // Call frame instruction encodings
+#define HANDLE_DW_CFA(ID, NAME) DW_CFA_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
DW_CFA_extended = 0x00,
- DW_CFA_nop = 0x00,
- DW_CFA_advance_loc = 0x40,
- DW_CFA_offset = 0x80,
- DW_CFA_restore = 0xc0,
- DW_CFA_set_loc = 0x01,
- DW_CFA_advance_loc1 = 0x02,
- DW_CFA_advance_loc2 = 0x03,
- DW_CFA_advance_loc4 = 0x04,
- DW_CFA_offset_extended = 0x05,
- DW_CFA_restore_extended = 0x06,
- DW_CFA_undefined = 0x07,
- DW_CFA_same_value = 0x08,
- DW_CFA_register = 0x09,
- DW_CFA_remember_state = 0x0a,
- DW_CFA_restore_state = 0x0b,
- DW_CFA_def_cfa = 0x0c,
- DW_CFA_def_cfa_register = 0x0d,
- DW_CFA_def_cfa_offset = 0x0e,
- DW_CFA_def_cfa_expression = 0x0f,
- DW_CFA_expression = 0x10,
- DW_CFA_offset_extended_sf = 0x11,
- DW_CFA_def_cfa_sf = 0x12,
- DW_CFA_def_cfa_offset_sf = 0x13,
- DW_CFA_val_offset = 0x14,
- DW_CFA_val_offset_sf = 0x15,
- DW_CFA_val_expression = 0x16,
- DW_CFA_MIPS_advance_loc8 = 0x1d,
- DW_CFA_GNU_window_save = 0x2d,
- DW_CFA_GNU_args_size = 0x2e,
+
DW_CFA_lo_user = 0x1c,
DW_CFA_hi_user = 0x3f
};
@@ -529,34 +285,24 @@ enum Constants {
DW_EH_PE_indirect = 0x80
};
-// Constants for debug_loc.dwo in the DWARF5 Split Debug Info Proposal
+/// Constants for location lists in DWARF v5.
enum LocationListEntry : unsigned char {
- DW_LLE_end_of_list_entry,
- DW_LLE_base_address_selection_entry,
- DW_LLE_start_end_entry,
- DW_LLE_start_length_entry,
- DW_LLE_offset_pair_entry
+ DW_LLE_end_of_list = 0x00,
+ DW_LLE_base_addressx = 0x01,
+ DW_LLE_startx_endx = 0x02,
+ DW_LLE_startx_length = 0x03,
+ DW_LLE_offset_pair = 0x04,
+ DW_LLE_default_location = 0x05,
+ DW_LLE_base_address = 0x06,
+ DW_LLE_start_end = 0x07,
+ DW_LLE_start_length = 0x08
};
/// Constants for the DW_APPLE_PROPERTY_attributes attribute.
-/// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind.
+/// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind!
enum ApplePropertyAttributes {
- // Apple Objective-C Property Attributes
- DW_APPLE_PROPERTY_readonly = 0x01,
- DW_APPLE_PROPERTY_getter = 0x02,
- DW_APPLE_PROPERTY_assign = 0x04,
- DW_APPLE_PROPERTY_readwrite = 0x08,
- DW_APPLE_PROPERTY_retain = 0x10,
- DW_APPLE_PROPERTY_copy = 0x20,
- DW_APPLE_PROPERTY_nonatomic = 0x40,
- DW_APPLE_PROPERTY_setter = 0x80,
- DW_APPLE_PROPERTY_atomic = 0x100,
- DW_APPLE_PROPERTY_weak = 0x200,
- DW_APPLE_PROPERTY_strong = 0x400,
- DW_APPLE_PROPERTY_unsafe_unretained = 0x800,
- DW_APPLE_PROPERTY_nullability = 0x1000,
- DW_APPLE_PROPERTY_null_resettable = 0x2000,
- DW_APPLE_PROPERTY_class = 0x4000
+#define HANDLE_DW_APPLE_PROPERTY(ID, NAME) DW_APPLE_PROPERTY_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
};
// Constants for the DWARF5 Accelerator Table Proposal
@@ -605,31 +351,31 @@ enum GDBIndexEntryLinkage {
/// known.
///
/// @{
-const char *TagString(unsigned Tag);
-const char *ChildrenString(unsigned Children);
-const char *AttributeString(unsigned Attribute);
-const char *FormEncodingString(unsigned Encoding);
-const char *OperationEncodingString(unsigned Encoding);
-const char *AttributeEncodingString(unsigned Encoding);
-const char *DecimalSignString(unsigned Sign);
-const char *EndianityString(unsigned Endian);
-const char *AccessibilityString(unsigned Access);
-const char *VisibilityString(unsigned Visibility);
-const char *VirtualityString(unsigned Virtuality);
-const char *LanguageString(unsigned Language);
-const char *CaseString(unsigned Case);
-const char *ConventionString(unsigned Convention);
-const char *InlineCodeString(unsigned Code);
-const char *ArrayOrderString(unsigned Order);
-const char *DiscriminantString(unsigned Discriminant);
-const char *LNStandardString(unsigned Standard);
-const char *LNExtendedString(unsigned Encoding);
-const char *MacinfoString(unsigned Encoding);
-const char *CallFrameString(unsigned Encoding);
-const char *ApplePropertyString(unsigned);
-const char *AtomTypeString(unsigned Atom);
-const char *GDBIndexEntryKindString(GDBIndexEntryKind Kind);
-const char *GDBIndexEntryLinkageString(GDBIndexEntryLinkage Linkage);
+StringRef TagString(unsigned Tag);
+StringRef ChildrenString(unsigned Children);
+StringRef AttributeString(unsigned Attribute);
+StringRef FormEncodingString(unsigned Encoding);
+StringRef OperationEncodingString(unsigned Encoding);
+StringRef AttributeEncodingString(unsigned Encoding);
+StringRef DecimalSignString(unsigned Sign);
+StringRef EndianityString(unsigned Endian);
+StringRef AccessibilityString(unsigned Access);
+StringRef VisibilityString(unsigned Visibility);
+StringRef VirtualityString(unsigned Virtuality);
+StringRef LanguageString(unsigned Language);
+StringRef CaseString(unsigned Case);
+StringRef ConventionString(unsigned Convention);
+StringRef InlineCodeString(unsigned Code);
+StringRef ArrayOrderString(unsigned Order);
+StringRef DiscriminantString(unsigned Discriminant);
+StringRef LNStandardString(unsigned Standard);
+StringRef LNExtendedString(unsigned Encoding);
+StringRef MacinfoString(unsigned Encoding);
+StringRef CallFrameString(unsigned Encoding);
+StringRef ApplePropertyString(unsigned);
+StringRef AtomTypeString(unsigned Atom);
+StringRef GDBIndexEntryKindString(GDBIndexEntryKind Kind);
+StringRef GDBIndexEntryLinkageString(GDBIndexEntryLinkage Linkage);
/// @}
/// \defgroup DwarfConstantsParsing Dwarf constants parsing functions
@@ -653,7 +399,7 @@ unsigned getMacinfo(StringRef MacinfoString);
/// \brief Returns the symbolic string representing Val when used as a value
/// for attribute Attr.
-const char *AttributeValueString(uint16_t Attr, unsigned Val);
+StringRef AttributeValueString(uint16_t Attr, unsigned Val);
/// \brief Decsribes an entry of the various gnu_pub* debug sections.
///
@@ -677,7 +423,9 @@ struct PubIndexEntryDescriptor {
KIND_OFFSET)),
Linkage(static_cast<GDBIndexEntryLinkage>((Value & LINKAGE_MASK) >>
LINKAGE_OFFSET)) {}
- uint8_t toBits() { return Kind << KIND_OFFSET | Linkage << LINKAGE_OFFSET; }
+ uint8_t toBits() const {
+ return Kind << KIND_OFFSET | Linkage << LINKAGE_OFFSET;
+ }
private:
enum {
@@ -688,6 +436,9 @@ private:
};
};
+/// Constants that define the DWARF format as 32 or 64 bit.
+enum DwarfFormat { DWARF32, DWARF64 };
+
} // End of namespace dwarf
} // End of namespace llvm
diff --git a/include/llvm/Support/ELF.h b/include/llvm/Support/ELF.h
index 70b9daab8360..3ea4da81ad94 100644
--- a/include/llvm/Support/ELF.h
+++ b/include/llvm/Support/ELF.h
@@ -32,49 +32,49 @@ typedef uint32_t Elf32_Addr; // Program address
typedef uint32_t Elf32_Off; // File offset
typedef uint16_t Elf32_Half;
typedef uint32_t Elf32_Word;
-typedef int32_t Elf32_Sword;
+typedef int32_t Elf32_Sword;
typedef uint64_t Elf64_Addr;
typedef uint64_t Elf64_Off;
typedef uint16_t Elf64_Half;
typedef uint32_t Elf64_Word;
-typedef int32_t Elf64_Sword;
+typedef int32_t Elf64_Sword;
typedef uint64_t Elf64_Xword;
-typedef int64_t Elf64_Sxword;
+typedef int64_t Elf64_Sxword;
// Object file magic string.
-static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' };
+static const char ElfMagic[] = {0x7f, 'E', 'L', 'F', '\0'};
// e_ident size and indices.
enum {
- EI_MAG0 = 0, // File identification index.
- EI_MAG1 = 1, // File identification index.
- EI_MAG2 = 2, // File identification index.
- EI_MAG3 = 3, // File identification index.
- EI_CLASS = 4, // File class.
- EI_DATA = 5, // Data encoding.
- EI_VERSION = 6, // File version.
- EI_OSABI = 7, // OS/ABI identification.
- EI_ABIVERSION = 8, // ABI version.
- EI_PAD = 9, // Start of padding bytes.
- EI_NIDENT = 16 // Number of bytes in e_ident.
+ EI_MAG0 = 0, // File identification index.
+ EI_MAG1 = 1, // File identification index.
+ EI_MAG2 = 2, // File identification index.
+ EI_MAG3 = 3, // File identification index.
+ EI_CLASS = 4, // File class.
+ EI_DATA = 5, // Data encoding.
+ EI_VERSION = 6, // File version.
+ EI_OSABI = 7, // OS/ABI identification.
+ EI_ABIVERSION = 8, // ABI version.
+ EI_PAD = 9, // Start of padding bytes.
+ EI_NIDENT = 16 // Number of bytes in e_ident.
};
struct Elf32_Ehdr {
unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
- Elf32_Half e_type; // Type of file (see ET_* below)
- Elf32_Half e_machine; // Required architecture for this file (see EM_*)
- Elf32_Word e_version; // Must be equal to 1
- Elf32_Addr e_entry; // Address to jump to in order to start program
- Elf32_Off e_phoff; // Program header table's file offset, in bytes
- Elf32_Off e_shoff; // Section header table's file offset, in bytes
- Elf32_Word e_flags; // Processor-specific flags
- Elf32_Half e_ehsize; // Size of ELF header, in bytes
- Elf32_Half e_phentsize; // Size of an entry in the program header table
- Elf32_Half e_phnum; // Number of entries in the program header table
- Elf32_Half e_shentsize; // Size of an entry in the section header table
- Elf32_Half e_shnum; // Number of entries in the section header table
- Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
+ Elf32_Half e_type; // Type of file (see ET_* below)
+ Elf32_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf32_Word e_version; // Must be equal to 1
+ Elf32_Addr e_entry; // Address to jump to in order to start program
+ Elf32_Off e_phoff; // Program header table's file offset, in bytes
+ Elf32_Off e_shoff; // Section header table's file offset, in bytes
+ Elf32_Word e_flags; // Processor-specific flags
+ Elf32_Half e_ehsize; // Size of ELF header, in bytes
+ Elf32_Half e_phentsize; // Size of an entry in the program header table
+ Elf32_Half e_phnum; // Number of entries in the program header table
+ Elf32_Half e_shentsize; // Size of an entry in the section header table
+ Elf32_Half e_shnum; // Number of entries in the section header table
+ Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
bool checkMagic() const {
return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
@@ -86,19 +86,19 @@ struct Elf32_Ehdr {
// types (see above).
struct Elf64_Ehdr {
unsigned char e_ident[EI_NIDENT];
- Elf64_Half e_type;
- Elf64_Half e_machine;
- Elf64_Word e_version;
- Elf64_Addr e_entry;
- Elf64_Off e_phoff;
- Elf64_Off e_shoff;
- Elf64_Word e_flags;
- Elf64_Half e_ehsize;
- Elf64_Half e_phentsize;
- Elf64_Half e_phnum;
- Elf64_Half e_shentsize;
- Elf64_Half e_shnum;
- Elf64_Half e_shstrndx;
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry;
+ Elf64_Off e_phoff;
+ Elf64_Off e_shoff;
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
bool checkMagic() const {
return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
@@ -108,215 +108,213 @@ struct Elf64_Ehdr {
// File types
enum {
- ET_NONE = 0, // No file type
- ET_REL = 1, // Relocatable file
- ET_EXEC = 2, // Executable file
- ET_DYN = 3, // Shared object file
- ET_CORE = 4, // Core file
+ ET_NONE = 0, // No file type
+ ET_REL = 1, // Relocatable file
+ ET_EXEC = 2, // Executable file
+ ET_DYN = 3, // Shared object file
+ ET_CORE = 4, // Core file
ET_LOPROC = 0xff00, // Beginning of processor-specific codes
ET_HIPROC = 0xffff // Processor-specific
};
// Versioning
-enum {
- EV_NONE = 0,
- EV_CURRENT = 1
-};
+enum { EV_NONE = 0, EV_CURRENT = 1 };
// Machine architectures
// See current registered ELF machine architectures at:
// http://www.uxsglobal.com/developers/gabi/latest/ch4.eheader.html
enum {
- EM_NONE = 0, // No machine
- EM_M32 = 1, // AT&T WE 32100
- EM_SPARC = 2, // SPARC
- EM_386 = 3, // Intel 386
- EM_68K = 4, // Motorola 68000
- EM_88K = 5, // Motorola 88000
- EM_IAMCU = 6, // Intel MCU
- EM_860 = 7, // Intel 80860
- EM_MIPS = 8, // MIPS R3000
- EM_S370 = 9, // IBM System/370
- EM_MIPS_RS3_LE = 10, // MIPS RS3000 Little-endian
- EM_PARISC = 15, // Hewlett-Packard PA-RISC
- EM_VPP500 = 17, // Fujitsu VPP500
- EM_SPARC32PLUS = 18, // Enhanced instruction set SPARC
- EM_960 = 19, // Intel 80960
- EM_PPC = 20, // PowerPC
- EM_PPC64 = 21, // PowerPC64
- EM_S390 = 22, // IBM System/390
- EM_SPU = 23, // IBM SPU/SPC
- EM_V800 = 36, // NEC V800
- EM_FR20 = 37, // Fujitsu FR20
- EM_RH32 = 38, // TRW RH-32
- EM_RCE = 39, // Motorola RCE
- EM_ARM = 40, // ARM
- EM_ALPHA = 41, // DEC Alpha
- EM_SH = 42, // Hitachi SH
- EM_SPARCV9 = 43, // SPARC V9
- EM_TRICORE = 44, // Siemens TriCore
- EM_ARC = 45, // Argonaut RISC Core
- EM_H8_300 = 46, // Hitachi H8/300
- EM_H8_300H = 47, // Hitachi H8/300H
- EM_H8S = 48, // Hitachi H8S
- EM_H8_500 = 49, // Hitachi H8/500
- EM_IA_64 = 50, // Intel IA-64 processor architecture
- EM_MIPS_X = 51, // Stanford MIPS-X
- EM_COLDFIRE = 52, // Motorola ColdFire
- EM_68HC12 = 53, // Motorola M68HC12
- EM_MMA = 54, // Fujitsu MMA Multimedia Accelerator
- EM_PCP = 55, // Siemens PCP
- EM_NCPU = 56, // Sony nCPU embedded RISC processor
- EM_NDR1 = 57, // Denso NDR1 microprocessor
- EM_STARCORE = 58, // Motorola Star*Core processor
- EM_ME16 = 59, // Toyota ME16 processor
- EM_ST100 = 60, // STMicroelectronics ST100 processor
- EM_TINYJ = 61, // Advanced Logic Corp. TinyJ embedded processor family
- EM_X86_64 = 62, // AMD x86-64 architecture
- EM_PDSP = 63, // Sony DSP Processor
- EM_PDP10 = 64, // Digital Equipment Corp. PDP-10
- EM_PDP11 = 65, // Digital Equipment Corp. PDP-11
- EM_FX66 = 66, // Siemens FX66 microcontroller
- EM_ST9PLUS = 67, // STMicroelectronics ST9+ 8/16 bit microcontroller
- EM_ST7 = 68, // STMicroelectronics ST7 8-bit microcontroller
- EM_68HC16 = 69, // Motorola MC68HC16 Microcontroller
- EM_68HC11 = 70, // Motorola MC68HC11 Microcontroller
- EM_68HC08 = 71, // Motorola MC68HC08 Microcontroller
- EM_68HC05 = 72, // Motorola MC68HC05 Microcontroller
- EM_SVX = 73, // Silicon Graphics SVx
- EM_ST19 = 74, // STMicroelectronics ST19 8-bit microcontroller
- EM_VAX = 75, // Digital VAX
- EM_CRIS = 76, // Axis Communications 32-bit embedded processor
- EM_JAVELIN = 77, // Infineon Technologies 32-bit embedded processor
- EM_FIREPATH = 78, // Element 14 64-bit DSP Processor
- EM_ZSP = 79, // LSI Logic 16-bit DSP Processor
- EM_MMIX = 80, // Donald Knuth's educational 64-bit processor
- EM_HUANY = 81, // Harvard University machine-independent object files
- EM_PRISM = 82, // SiTera Prism
- EM_AVR = 83, // Atmel AVR 8-bit microcontroller
- EM_FR30 = 84, // Fujitsu FR30
- EM_D10V = 85, // Mitsubishi D10V
- EM_D30V = 86, // Mitsubishi D30V
- EM_V850 = 87, // NEC v850
- EM_M32R = 88, // Mitsubishi M32R
- EM_MN10300 = 89, // Matsushita MN10300
- EM_MN10200 = 90, // Matsushita MN10200
- EM_PJ = 91, // picoJava
- EM_OPENRISC = 92, // OpenRISC 32-bit embedded processor
- EM_ARC_COMPACT = 93, // ARC International ARCompact processor (old
+ EM_NONE = 0, // No machine
+ EM_M32 = 1, // AT&T WE 32100
+ EM_SPARC = 2, // SPARC
+ EM_386 = 3, // Intel 386
+ EM_68K = 4, // Motorola 68000
+ EM_88K = 5, // Motorola 88000
+ EM_IAMCU = 6, // Intel MCU
+ EM_860 = 7, // Intel 80860
+ EM_MIPS = 8, // MIPS R3000
+ EM_S370 = 9, // IBM System/370
+ EM_MIPS_RS3_LE = 10, // MIPS RS3000 Little-endian
+ EM_PARISC = 15, // Hewlett-Packard PA-RISC
+ EM_VPP500 = 17, // Fujitsu VPP500
+ EM_SPARC32PLUS = 18, // Enhanced instruction set SPARC
+ EM_960 = 19, // Intel 80960
+ EM_PPC = 20, // PowerPC
+ EM_PPC64 = 21, // PowerPC64
+ EM_S390 = 22, // IBM System/390
+ EM_SPU = 23, // IBM SPU/SPC
+ EM_V800 = 36, // NEC V800
+ EM_FR20 = 37, // Fujitsu FR20
+ EM_RH32 = 38, // TRW RH-32
+ EM_RCE = 39, // Motorola RCE
+ EM_ARM = 40, // ARM
+ EM_ALPHA = 41, // DEC Alpha
+ EM_SH = 42, // Hitachi SH
+ EM_SPARCV9 = 43, // SPARC V9
+ EM_TRICORE = 44, // Siemens TriCore
+ EM_ARC = 45, // Argonaut RISC Core
+ EM_H8_300 = 46, // Hitachi H8/300
+ EM_H8_300H = 47, // Hitachi H8/300H
+ EM_H8S = 48, // Hitachi H8S
+ EM_H8_500 = 49, // Hitachi H8/500
+ EM_IA_64 = 50, // Intel IA-64 processor architecture
+ EM_MIPS_X = 51, // Stanford MIPS-X
+ EM_COLDFIRE = 52, // Motorola ColdFire
+ EM_68HC12 = 53, // Motorola M68HC12
+ EM_MMA = 54, // Fujitsu MMA Multimedia Accelerator
+ EM_PCP = 55, // Siemens PCP
+ EM_NCPU = 56, // Sony nCPU embedded RISC processor
+ EM_NDR1 = 57, // Denso NDR1 microprocessor
+ EM_STARCORE = 58, // Motorola Star*Core processor
+ EM_ME16 = 59, // Toyota ME16 processor
+ EM_ST100 = 60, // STMicroelectronics ST100 processor
+ EM_TINYJ = 61, // Advanced Logic Corp. TinyJ embedded processor family
+ EM_X86_64 = 62, // AMD x86-64 architecture
+ EM_PDSP = 63, // Sony DSP Processor
+ EM_PDP10 = 64, // Digital Equipment Corp. PDP-10
+ EM_PDP11 = 65, // Digital Equipment Corp. PDP-11
+ EM_FX66 = 66, // Siemens FX66 microcontroller
+ EM_ST9PLUS = 67, // STMicroelectronics ST9+ 8/16 bit microcontroller
+ EM_ST7 = 68, // STMicroelectronics ST7 8-bit microcontroller
+ EM_68HC16 = 69, // Motorola MC68HC16 Microcontroller
+ EM_68HC11 = 70, // Motorola MC68HC11 Microcontroller
+ EM_68HC08 = 71, // Motorola MC68HC08 Microcontroller
+ EM_68HC05 = 72, // Motorola MC68HC05 Microcontroller
+ EM_SVX = 73, // Silicon Graphics SVx
+ EM_ST19 = 74, // STMicroelectronics ST19 8-bit microcontroller
+ EM_VAX = 75, // Digital VAX
+ EM_CRIS = 76, // Axis Communications 32-bit embedded processor
+ EM_JAVELIN = 77, // Infineon Technologies 32-bit embedded processor
+ EM_FIREPATH = 78, // Element 14 64-bit DSP Processor
+ EM_ZSP = 79, // LSI Logic 16-bit DSP Processor
+ EM_MMIX = 80, // Donald Knuth's educational 64-bit processor
+ EM_HUANY = 81, // Harvard University machine-independent object files
+ EM_PRISM = 82, // SiTera Prism
+ EM_AVR = 83, // Atmel AVR 8-bit microcontroller
+ EM_FR30 = 84, // Fujitsu FR30
+ EM_D10V = 85, // Mitsubishi D10V
+ EM_D30V = 86, // Mitsubishi D30V
+ EM_V850 = 87, // NEC v850
+ EM_M32R = 88, // Mitsubishi M32R
+ EM_MN10300 = 89, // Matsushita MN10300
+ EM_MN10200 = 90, // Matsushita MN10200
+ EM_PJ = 91, // picoJava
+ EM_OPENRISC = 92, // OpenRISC 32-bit embedded processor
+ EM_ARC_COMPACT = 93, // ARC International ARCompact processor (old
// spelling/synonym: EM_ARC_A5)
- EM_XTENSA = 94, // Tensilica Xtensa Architecture
- EM_VIDEOCORE = 95, // Alphamosaic VideoCore processor
- EM_TMM_GPP = 96, // Thompson Multimedia General Purpose Processor
- EM_NS32K = 97, // National Semiconductor 32000 series
- EM_TPC = 98, // Tenor Network TPC processor
- EM_SNP1K = 99, // Trebia SNP 1000 processor
- EM_ST200 = 100, // STMicroelectronics (www.st.com) ST200
- EM_IP2K = 101, // Ubicom IP2xxx microcontroller family
- EM_MAX = 102, // MAX Processor
- EM_CR = 103, // National Semiconductor CompactRISC microprocessor
- EM_F2MC16 = 104, // Fujitsu F2MC16
- EM_MSP430 = 105, // Texas Instruments embedded microcontroller msp430
- EM_BLACKFIN = 106, // Analog Devices Blackfin (DSP) processor
- EM_SE_C33 = 107, // S1C33 Family of Seiko Epson processors
- EM_SEP = 108, // Sharp embedded microprocessor
- EM_ARCA = 109, // Arca RISC Microprocessor
- EM_UNICORE = 110, // Microprocessor series from PKU-Unity Ltd. and MPRC
- // of Peking University
- EM_EXCESS = 111, // eXcess: 16/32/64-bit configurable embedded CPU
- EM_DXP = 112, // Icera Semiconductor Inc. Deep Execution Processor
- EM_ALTERA_NIOS2 = 113, // Altera Nios II soft-core processor
- EM_CRX = 114, // National Semiconductor CompactRISC CRX
- EM_XGATE = 115, // Motorola XGATE embedded processor
- EM_C166 = 116, // Infineon C16x/XC16x processor
- EM_M16C = 117, // Renesas M16C series microprocessors
- EM_DSPIC30F = 118, // Microchip Technology dsPIC30F Digital Signal
- // Controller
- EM_CE = 119, // Freescale Communication Engine RISC core
- EM_M32C = 120, // Renesas M32C series microprocessors
- EM_TSK3000 = 131, // Altium TSK3000 core
- EM_RS08 = 132, // Freescale RS08 embedded processor
- EM_SHARC = 133, // Analog Devices SHARC family of 32-bit DSP
- // processors
- EM_ECOG2 = 134, // Cyan Technology eCOG2 microprocessor
- EM_SCORE7 = 135, // Sunplus S+core7 RISC processor
- EM_DSP24 = 136, // New Japan Radio (NJR) 24-bit DSP Processor
- EM_VIDEOCORE3 = 137, // Broadcom VideoCore III processor
+ EM_XTENSA = 94, // Tensilica Xtensa Architecture
+ EM_VIDEOCORE = 95, // Alphamosaic VideoCore processor
+ EM_TMM_GPP = 96, // Thompson Multimedia General Purpose Processor
+ EM_NS32K = 97, // National Semiconductor 32000 series
+ EM_TPC = 98, // Tenor Network TPC processor
+ EM_SNP1K = 99, // Trebia SNP 1000 processor
+ EM_ST200 = 100, // STMicroelectronics (www.st.com) ST200
+ EM_IP2K = 101, // Ubicom IP2xxx microcontroller family
+ EM_MAX = 102, // MAX Processor
+ EM_CR = 103, // National Semiconductor CompactRISC microprocessor
+ EM_F2MC16 = 104, // Fujitsu F2MC16
+ EM_MSP430 = 105, // Texas Instruments embedded microcontroller msp430
+ EM_BLACKFIN = 106, // Analog Devices Blackfin (DSP) processor
+ EM_SE_C33 = 107, // S1C33 Family of Seiko Epson processors
+ EM_SEP = 108, // Sharp embedded microprocessor
+ EM_ARCA = 109, // Arca RISC Microprocessor
+ EM_UNICORE = 110, // Microprocessor series from PKU-Unity Ltd. and MPRC
+ // of Peking University
+ EM_EXCESS = 111, // eXcess: 16/32/64-bit configurable embedded CPU
+ EM_DXP = 112, // Icera Semiconductor Inc. Deep Execution Processor
+ EM_ALTERA_NIOS2 = 113, // Altera Nios II soft-core processor
+ EM_CRX = 114, // National Semiconductor CompactRISC CRX
+ EM_XGATE = 115, // Motorola XGATE embedded processor
+ EM_C166 = 116, // Infineon C16x/XC16x processor
+ EM_M16C = 117, // Renesas M16C series microprocessors
+ EM_DSPIC30F = 118, // Microchip Technology dsPIC30F Digital Signal
+ // Controller
+ EM_CE = 119, // Freescale Communication Engine RISC core
+ EM_M32C = 120, // Renesas M32C series microprocessors
+ EM_TSK3000 = 131, // Altium TSK3000 core
+ EM_RS08 = 132, // Freescale RS08 embedded processor
+ EM_SHARC = 133, // Analog Devices SHARC family of 32-bit DSP
+ // processors
+ EM_ECOG2 = 134, // Cyan Technology eCOG2 microprocessor
+ EM_SCORE7 = 135, // Sunplus S+core7 RISC processor
+ EM_DSP24 = 136, // New Japan Radio (NJR) 24-bit DSP Processor
+ EM_VIDEOCORE3 = 137, // Broadcom VideoCore III processor
EM_LATTICEMICO32 = 138, // RISC processor for Lattice FPGA architecture
- EM_SE_C17 = 139, // Seiko Epson C17 family
- EM_TI_C6000 = 140, // The Texas Instruments TMS320C6000 DSP family
- EM_TI_C2000 = 141, // The Texas Instruments TMS320C2000 DSP family
- EM_TI_C5500 = 142, // The Texas Instruments TMS320C55x DSP family
- EM_MMDSP_PLUS = 160, // STMicroelectronics 64bit VLIW Data Signal Processor
- EM_CYPRESS_M8C = 161, // Cypress M8C microprocessor
- EM_R32C = 162, // Renesas R32C series microprocessors
- EM_TRIMEDIA = 163, // NXP Semiconductors TriMedia architecture family
- EM_HEXAGON = 164, // Qualcomm Hexagon processor
- EM_8051 = 165, // Intel 8051 and variants
- EM_STXP7X = 166, // STMicroelectronics STxP7x family of configurable
+ EM_SE_C17 = 139, // Seiko Epson C17 family
+ EM_TI_C6000 = 140, // The Texas Instruments TMS320C6000 DSP family
+ EM_TI_C2000 = 141, // The Texas Instruments TMS320C2000 DSP family
+ EM_TI_C5500 = 142, // The Texas Instruments TMS320C55x DSP family
+ EM_MMDSP_PLUS = 160, // STMicroelectronics 64bit VLIW Data Signal Processor
+ EM_CYPRESS_M8C = 161, // Cypress M8C microprocessor
+ EM_R32C = 162, // Renesas R32C series microprocessors
+ EM_TRIMEDIA = 163, // NXP Semiconductors TriMedia architecture family
+ EM_HEXAGON = 164, // Qualcomm Hexagon processor
+ EM_8051 = 165, // Intel 8051 and variants
+ EM_STXP7X = 166, // STMicroelectronics STxP7x family of configurable
// and extensible RISC processors
- EM_NDS32 = 167, // Andes Technology compact code size embedded RISC
+ EM_NDS32 = 167, // Andes Technology compact code size embedded RISC
// processor family
- EM_ECOG1 = 168, // Cyan Technology eCOG1X family
- EM_ECOG1X = 168, // Cyan Technology eCOG1X family
- EM_MAXQ30 = 169, // Dallas Semiconductor MAXQ30 Core Micro-controllers
- EM_XIMO16 = 170, // New Japan Radio (NJR) 16-bit DSP Processor
- EM_MANIK = 171, // M2000 Reconfigurable RISC Microprocessor
- EM_CRAYNV2 = 172, // Cray Inc. NV2 vector architecture
- EM_RX = 173, // Renesas RX family
- EM_METAG = 174, // Imagination Technologies META processor
+ EM_ECOG1 = 168, // Cyan Technology eCOG1X family
+ EM_ECOG1X = 168, // Cyan Technology eCOG1X family
+ EM_MAXQ30 = 169, // Dallas Semiconductor MAXQ30 Core Micro-controllers
+ EM_XIMO16 = 170, // New Japan Radio (NJR) 16-bit DSP Processor
+ EM_MANIK = 171, // M2000 Reconfigurable RISC Microprocessor
+ EM_CRAYNV2 = 172, // Cray Inc. NV2 vector architecture
+ EM_RX = 173, // Renesas RX family
+ EM_METAG = 174, // Imagination Technologies META processor
// architecture
- EM_MCST_ELBRUS = 175, // MCST Elbrus general purpose hardware architecture
- EM_ECOG16 = 176, // Cyan Technology eCOG16 family
- EM_CR16 = 177, // National Semiconductor CompactRISC CR16 16-bit
+ EM_MCST_ELBRUS = 175, // MCST Elbrus general purpose hardware architecture
+ EM_ECOG16 = 176, // Cyan Technology eCOG16 family
+ EM_CR16 = 177, // National Semiconductor CompactRISC CR16 16-bit
// microprocessor
- EM_ETPU = 178, // Freescale Extended Time Processing Unit
- EM_SLE9X = 179, // Infineon Technologies SLE9X core
- EM_L10M = 180, // Intel L10M
- EM_K10M = 181, // Intel K10M
- EM_AARCH64 = 183, // ARM AArch64
- EM_AVR32 = 185, // Atmel Corporation 32-bit microprocessor family
- EM_STM8 = 186, // STMicroeletronics STM8 8-bit microcontroller
- EM_TILE64 = 187, // Tilera TILE64 multicore architecture family
- EM_TILEPRO = 188, // Tilera TILEPro multicore architecture family
- EM_CUDA = 190, // NVIDIA CUDA architecture
- EM_TILEGX = 191, // Tilera TILE-Gx multicore architecture family
- EM_CLOUDSHIELD = 192, // CloudShield architecture family
- EM_COREA_1ST = 193, // KIPO-KAIST Core-A 1st generation processor family
- EM_COREA_2ND = 194, // KIPO-KAIST Core-A 2nd generation processor family
- EM_ARC_COMPACT2 = 195, // Synopsys ARCompact V2
- EM_OPEN8 = 196, // Open8 8-bit RISC soft processor core
- EM_RL78 = 197, // Renesas RL78 family
- EM_VIDEOCORE5 = 198, // Broadcom VideoCore V processor
- EM_78KOR = 199, // Renesas 78KOR family
- EM_56800EX = 200, // Freescale 56800EX Digital Signal Controller (DSC)
- EM_BA1 = 201, // Beyond BA1 CPU architecture
- EM_BA2 = 202, // Beyond BA2 CPU architecture
- EM_XCORE = 203, // XMOS xCORE processor family
- EM_MCHP_PIC = 204, // Microchip 8-bit PIC(r) family
- EM_INTEL205 = 205, // Reserved by Intel
- EM_INTEL206 = 206, // Reserved by Intel
- EM_INTEL207 = 207, // Reserved by Intel
- EM_INTEL208 = 208, // Reserved by Intel
- EM_INTEL209 = 209, // Reserved by Intel
- EM_KM32 = 210, // KM211 KM32 32-bit processor
- EM_KMX32 = 211, // KM211 KMX32 32-bit processor
- EM_KMX16 = 212, // KM211 KMX16 16-bit processor
- EM_KMX8 = 213, // KM211 KMX8 8-bit processor
- EM_KVARC = 214, // KM211 KVARC processor
- EM_CDP = 215, // Paneve CDP architecture family
- EM_COGE = 216, // Cognitive Smart Memory Processor
- EM_COOL = 217, // iCelero CoolEngine
- EM_NORC = 218, // Nanoradio Optimized RISC
- EM_CSR_KALIMBA = 219, // CSR Kalimba architecture family
- EM_AMDGPU = 224, // AMD GPU architecture
- EM_LANAI = 244, // Lanai 32-bit processor
- EM_BPF = 247, // Linux kernel bpf virtual machine
+ EM_ETPU = 178, // Freescale Extended Time Processing Unit
+ EM_SLE9X = 179, // Infineon Technologies SLE9X core
+ EM_L10M = 180, // Intel L10M
+ EM_K10M = 181, // Intel K10M
+ EM_AARCH64 = 183, // ARM AArch64
+ EM_AVR32 = 185, // Atmel Corporation 32-bit microprocessor family
+ EM_STM8 = 186, // STMicroeletronics STM8 8-bit microcontroller
+ EM_TILE64 = 187, // Tilera TILE64 multicore architecture family
+ EM_TILEPRO = 188, // Tilera TILEPro multicore architecture family
+ EM_CUDA = 190, // NVIDIA CUDA architecture
+ EM_TILEGX = 191, // Tilera TILE-Gx multicore architecture family
+ EM_CLOUDSHIELD = 192, // CloudShield architecture family
+ EM_COREA_1ST = 193, // KIPO-KAIST Core-A 1st generation processor family
+ EM_COREA_2ND = 194, // KIPO-KAIST Core-A 2nd generation processor family
+ EM_ARC_COMPACT2 = 195, // Synopsys ARCompact V2
+ EM_OPEN8 = 196, // Open8 8-bit RISC soft processor core
+ EM_RL78 = 197, // Renesas RL78 family
+ EM_VIDEOCORE5 = 198, // Broadcom VideoCore V processor
+ EM_78KOR = 199, // Renesas 78KOR family
+ EM_56800EX = 200, // Freescale 56800EX Digital Signal Controller (DSC)
+ EM_BA1 = 201, // Beyond BA1 CPU architecture
+ EM_BA2 = 202, // Beyond BA2 CPU architecture
+ EM_XCORE = 203, // XMOS xCORE processor family
+ EM_MCHP_PIC = 204, // Microchip 8-bit PIC(r) family
+ EM_INTEL205 = 205, // Reserved by Intel
+ EM_INTEL206 = 206, // Reserved by Intel
+ EM_INTEL207 = 207, // Reserved by Intel
+ EM_INTEL208 = 208, // Reserved by Intel
+ EM_INTEL209 = 209, // Reserved by Intel
+ EM_KM32 = 210, // KM211 KM32 32-bit processor
+ EM_KMX32 = 211, // KM211 KMX32 32-bit processor
+ EM_KMX16 = 212, // KM211 KMX16 16-bit processor
+ EM_KMX8 = 213, // KM211 KMX8 8-bit processor
+ EM_KVARC = 214, // KM211 KVARC processor
+ EM_CDP = 215, // Paneve CDP architecture family
+ EM_COGE = 216, // Cognitive Smart Memory Processor
+ EM_COOL = 217, // iCelero CoolEngine
+ EM_NORC = 218, // Nanoradio Optimized RISC
+ EM_CSR_KALIMBA = 219, // CSR Kalimba architecture family
+ EM_AMDGPU = 224, // AMD GPU architecture
+ EM_RISCV = 243, // RISC-V
+ EM_LANAI = 244, // Lanai 32-bit processor
+ EM_BPF = 247, // Linux kernel bpf virtual machine
// A request has been made to the maintainer of the official registry for
// such numbers for an official value for WebAssembly. As soon as one is
// allocated, this enum will be updated to use it.
- EM_WEBASSEMBLY = 0x4157, // WebAssembly architecture
+ EM_WEBASSEMBLY = 0x4157, // WebAssembly architecture
};
// Object file classes.
@@ -391,20 +389,14 @@ enum {
STO_PPC64_LOCAL_BIT = 5,
STO_PPC64_LOCAL_MASK = (7 << STO_PPC64_LOCAL_BIT)
};
-static inline int64_t
-decodePPC64LocalEntryOffset(unsigned Other) {
+static inline int64_t decodePPC64LocalEntryOffset(unsigned Other) {
unsigned Val = (Other & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT;
return ((1 << Val) >> 2) << 2;
}
-static inline unsigned
-encodePPC64LocalEntryOffset(int64_t Offset) {
- unsigned Val = (Offset >= 4 * 4
- ? (Offset >= 8 * 4
- ? (Offset >= 16 * 4 ? 6 : 5)
- : 4)
- : (Offset >= 2 * 4
- ? 3
- : (Offset >= 1 * 4 ? 2 : 0)));
+static inline unsigned encodePPC64LocalEntryOffset(int64_t Offset) {
+ unsigned Val =
+ (Offset >= 4 * 4 ? (Offset >= 8 * 4 ? (Offset >= 16 * 4 ? 6 : 5) : 4)
+ : (Offset >= 2 * 4 ? 3 : (Offset >= 1 * 4 ? 2 : 0)));
return Val << STO_PPC64_LOCAL_BIT;
}
@@ -420,15 +412,15 @@ enum {
// ARM Specific e_flags
enum : unsigned {
- EF_ARM_SOFT_FLOAT = 0x00000200U,
- EF_ARM_VFP_FLOAT = 0x00000400U,
- EF_ARM_EABI_UNKNOWN = 0x00000000U,
- EF_ARM_EABI_VER1 = 0x01000000U,
- EF_ARM_EABI_VER2 = 0x02000000U,
- EF_ARM_EABI_VER3 = 0x03000000U,
- EF_ARM_EABI_VER4 = 0x04000000U,
- EF_ARM_EABI_VER5 = 0x05000000U,
- EF_ARM_EABIMASK = 0xFF000000U
+ EF_ARM_SOFT_FLOAT = 0x00000200U,
+ EF_ARM_VFP_FLOAT = 0x00000400U,
+ EF_ARM_EABI_UNKNOWN = 0x00000000U,
+ EF_ARM_EABI_VER1 = 0x01000000U,
+ EF_ARM_EABI_VER2 = 0x02000000U,
+ EF_ARM_EABI_VER3 = 0x03000000U,
+ EF_ARM_EABI_VER4 = 0x04000000U,
+ EF_ARM_EABI_VER5 = 0x05000000U,
+ EF_ARM_EABIMASK = 0xFF000000U
};
// ELF Relocation types for ARM
@@ -438,24 +430,24 @@ enum {
// AVR specific e_flags
enum : unsigned {
- EF_AVR_ARCH_AVR1 = 1,
- EF_AVR_ARCH_AVR2 = 2,
- EF_AVR_ARCH_AVR25 = 25,
- EF_AVR_ARCH_AVR3 = 3,
- EF_AVR_ARCH_AVR31 = 31,
- EF_AVR_ARCH_AVR35 = 35,
- EF_AVR_ARCH_AVR4 = 4,
- EF_AVR_ARCH_AVR5 = 5,
- EF_AVR_ARCH_AVR51 = 51,
- EF_AVR_ARCH_AVR6 = 6,
+ EF_AVR_ARCH_AVR1 = 1,
+ EF_AVR_ARCH_AVR2 = 2,
+ EF_AVR_ARCH_AVR25 = 25,
+ EF_AVR_ARCH_AVR3 = 3,
+ EF_AVR_ARCH_AVR31 = 31,
+ EF_AVR_ARCH_AVR35 = 35,
+ EF_AVR_ARCH_AVR4 = 4,
+ EF_AVR_ARCH_AVR5 = 5,
+ EF_AVR_ARCH_AVR51 = 51,
+ EF_AVR_ARCH_AVR6 = 6,
EF_AVR_ARCH_AVRTINY = 100,
- EF_AVR_ARCH_XMEGA1 = 101,
- EF_AVR_ARCH_XMEGA2 = 102,
- EF_AVR_ARCH_XMEGA3 = 103,
- EF_AVR_ARCH_XMEGA4 = 104,
- EF_AVR_ARCH_XMEGA5 = 105,
- EF_AVR_ARCH_XMEGA6 = 106,
- EF_AVR_ARCH_XMEGA7 = 107
+ EF_AVR_ARCH_XMEGA1 = 101,
+ EF_AVR_ARCH_XMEGA2 = 102,
+ EF_AVR_ARCH_XMEGA3 = 103,
+ EF_AVR_ARCH_XMEGA4 = 104,
+ EF_AVR_ARCH_XMEGA5 = 105,
+ EF_AVR_ARCH_XMEGA6 = 106,
+ EF_AVR_ARCH_XMEGA7 = 107
};
// ELF Relocation types for AVR
@@ -466,65 +458,63 @@ enum {
// Mips Specific e_flags
enum : unsigned {
EF_MIPS_NOREORDER = 0x00000001, // Don't reorder instructions
- EF_MIPS_PIC = 0x00000002, // Position independent code
- EF_MIPS_CPIC = 0x00000004, // Call object with Position independent code
- EF_MIPS_ABI2 = 0x00000020, // File uses N32 ABI
+ EF_MIPS_PIC = 0x00000002, // Position independent code
+ EF_MIPS_CPIC = 0x00000004, // Call object with Position independent code
+ EF_MIPS_ABI2 = 0x00000020, // File uses N32 ABI
EF_MIPS_32BITMODE = 0x00000100, // Code compiled for a 64-bit machine
// in 32-bit mode
- EF_MIPS_FP64 = 0x00000200, // Code compiled for a 32-bit machine
+ EF_MIPS_FP64 = 0x00000200, // Code compiled for a 32-bit machine
// but uses 64-bit FP registers
- EF_MIPS_NAN2008 = 0x00000400, // Uses IEE 754-2008 NaN encoding
+ EF_MIPS_NAN2008 = 0x00000400, // Uses IEE 754-2008 NaN encoding
// ABI flags
- EF_MIPS_ABI_O32 = 0x00001000, // This file follows the first MIPS 32 bit ABI
- EF_MIPS_ABI_O64 = 0x00002000, // O32 ABI extended for 64-bit architecture.
+ EF_MIPS_ABI_O32 = 0x00001000, // This file follows the first MIPS 32 bit ABI
+ EF_MIPS_ABI_O64 = 0x00002000, // O32 ABI extended for 64-bit architecture.
EF_MIPS_ABI_EABI32 = 0x00003000, // EABI in 32 bit mode.
EF_MIPS_ABI_EABI64 = 0x00004000, // EABI in 64 bit mode.
- EF_MIPS_ABI = 0x0000f000, // Mask for selecting EF_MIPS_ABI_ variant.
+ EF_MIPS_ABI = 0x0000f000, // Mask for selecting EF_MIPS_ABI_ variant.
// MIPS machine variant
- EF_MIPS_MACH_NONE = 0x00000000, // A standard MIPS implementation.
- EF_MIPS_MACH_3900 = 0x00810000, // Toshiba R3900
- EF_MIPS_MACH_4010 = 0x00820000, // LSI R4010
- EF_MIPS_MACH_4100 = 0x00830000, // NEC VR4100
- EF_MIPS_MACH_4650 = 0x00850000, // MIPS R4650
- EF_MIPS_MACH_4120 = 0x00870000, // NEC VR4120
- EF_MIPS_MACH_4111 = 0x00880000, // NEC VR4111/VR4181
- EF_MIPS_MACH_SB1 = 0x008a0000, // Broadcom SB-1
- EF_MIPS_MACH_OCTEON = 0x008b0000, // Cavium Networks Octeon
- EF_MIPS_MACH_XLR = 0x008c0000, // RMI Xlr
+ EF_MIPS_MACH_NONE = 0x00000000, // A standard MIPS implementation.
+ EF_MIPS_MACH_3900 = 0x00810000, // Toshiba R3900
+ EF_MIPS_MACH_4010 = 0x00820000, // LSI R4010
+ EF_MIPS_MACH_4100 = 0x00830000, // NEC VR4100
+ EF_MIPS_MACH_4650 = 0x00850000, // MIPS R4650
+ EF_MIPS_MACH_4120 = 0x00870000, // NEC VR4120
+ EF_MIPS_MACH_4111 = 0x00880000, // NEC VR4111/VR4181
+ EF_MIPS_MACH_SB1 = 0x008a0000, // Broadcom SB-1
+ EF_MIPS_MACH_OCTEON = 0x008b0000, // Cavium Networks Octeon
+ EF_MIPS_MACH_XLR = 0x008c0000, // RMI Xlr
EF_MIPS_MACH_OCTEON2 = 0x008d0000, // Cavium Networks Octeon2
EF_MIPS_MACH_OCTEON3 = 0x008e0000, // Cavium Networks Octeon3
- EF_MIPS_MACH_5400 = 0x00910000, // NEC VR5400
- EF_MIPS_MACH_5900 = 0x00920000, // MIPS R5900
- EF_MIPS_MACH_5500 = 0x00980000, // NEC VR5500
- EF_MIPS_MACH_9000 = 0x00990000, // Unknown
- EF_MIPS_MACH_LS2E = 0x00a00000, // ST Microelectronics Loongson 2E
- EF_MIPS_MACH_LS2F = 0x00a10000, // ST Microelectronics Loongson 2F
- EF_MIPS_MACH_LS3A = 0x00a20000, // Loongson 3A
- EF_MIPS_MACH = 0x00ff0000, // EF_MIPS_MACH_xxx selection mask
+ EF_MIPS_MACH_5400 = 0x00910000, // NEC VR5400
+ EF_MIPS_MACH_5900 = 0x00920000, // MIPS R5900
+ EF_MIPS_MACH_5500 = 0x00980000, // NEC VR5500
+ EF_MIPS_MACH_9000 = 0x00990000, // Unknown
+ EF_MIPS_MACH_LS2E = 0x00a00000, // ST Microelectronics Loongson 2E
+ EF_MIPS_MACH_LS2F = 0x00a10000, // ST Microelectronics Loongson 2F
+ EF_MIPS_MACH_LS3A = 0x00a20000, // Loongson 3A
+ EF_MIPS_MACH = 0x00ff0000, // EF_MIPS_MACH_xxx selection mask
// ARCH_ASE
- EF_MIPS_MICROMIPS = 0x02000000, // microMIPS
- EF_MIPS_ARCH_ASE_M16 =
- 0x04000000, // Has Mips-16 ISA extensions
- EF_MIPS_ARCH_ASE_MDMX =
- 0x08000000, // Has MDMX multimedia extensions
- EF_MIPS_ARCH_ASE = 0x0f000000, // Mask for EF_MIPS_ARCH_ASE_xxx flags
+ EF_MIPS_MICROMIPS = 0x02000000, // microMIPS
+ EF_MIPS_ARCH_ASE_M16 = 0x04000000, // Has Mips-16 ISA extensions
+ EF_MIPS_ARCH_ASE_MDMX = 0x08000000, // Has MDMX multimedia extensions
+ EF_MIPS_ARCH_ASE = 0x0f000000, // Mask for EF_MIPS_ARCH_ASE_xxx flags
// ARCH
- EF_MIPS_ARCH_1 = 0x00000000, // MIPS1 instruction set
- EF_MIPS_ARCH_2 = 0x10000000, // MIPS2 instruction set
- EF_MIPS_ARCH_3 = 0x20000000, // MIPS3 instruction set
- EF_MIPS_ARCH_4 = 0x30000000, // MIPS4 instruction set
- EF_MIPS_ARCH_5 = 0x40000000, // MIPS5 instruction set
- EF_MIPS_ARCH_32 = 0x50000000, // MIPS32 instruction set per linux not elf.h
- EF_MIPS_ARCH_64 = 0x60000000, // MIPS64 instruction set per linux not elf.h
+ EF_MIPS_ARCH_1 = 0x00000000, // MIPS1 instruction set
+ EF_MIPS_ARCH_2 = 0x10000000, // MIPS2 instruction set
+ EF_MIPS_ARCH_3 = 0x20000000, // MIPS3 instruction set
+ EF_MIPS_ARCH_4 = 0x30000000, // MIPS4 instruction set
+ EF_MIPS_ARCH_5 = 0x40000000, // MIPS5 instruction set
+ EF_MIPS_ARCH_32 = 0x50000000, // MIPS32 instruction set per linux not elf.h
+ EF_MIPS_ARCH_64 = 0x60000000, // MIPS64 instruction set per linux not elf.h
EF_MIPS_ARCH_32R2 = 0x70000000, // mips32r2, mips32r3, mips32r5
EF_MIPS_ARCH_64R2 = 0x80000000, // mips64r2, mips64r3, mips64r5
EF_MIPS_ARCH_32R6 = 0x90000000, // mips32r6
EF_MIPS_ARCH_64R6 = 0xa0000000, // mips64r6
- EF_MIPS_ARCH = 0xf0000000 // Mask for applying EF_MIPS_ARCH_ variant
+ EF_MIPS_ARCH = 0xf0000000 // Mask for applying EF_MIPS_ARCH_ variant
};
// ELF Relocation types for Mips
@@ -534,57 +524,57 @@ enum {
// Special values for the st_other field in the symbol table entry for MIPS.
enum {
- STO_MIPS_OPTIONAL = 0x04, // Symbol whose definition is optional
- STO_MIPS_PLT = 0x08, // PLT entry related dynamic table record
- STO_MIPS_PIC = 0x20, // PIC func in an object mixes PIC/non-PIC
- STO_MIPS_MICROMIPS = 0x80, // MIPS Specific ISA for MicroMips
- STO_MIPS_MIPS16 = 0xf0 // MIPS Specific ISA for Mips16
+ STO_MIPS_OPTIONAL = 0x04, // Symbol whose definition is optional
+ STO_MIPS_PLT = 0x08, // PLT entry related dynamic table record
+ STO_MIPS_PIC = 0x20, // PIC func in an object mixes PIC/non-PIC
+ STO_MIPS_MICROMIPS = 0x80, // MIPS Specific ISA for MicroMips
+ STO_MIPS_MIPS16 = 0xf0 // MIPS Specific ISA for Mips16
};
// .MIPS.options section descriptor kinds
enum {
- ODK_NULL = 0, // Undefined
- ODK_REGINFO = 1, // Register usage information
- ODK_EXCEPTIONS = 2, // Exception processing options
- ODK_PAD = 3, // Section padding options
- ODK_HWPATCH = 4, // Hardware patches applied
- ODK_FILL = 5, // Linker fill value
- ODK_TAGS = 6, // Space for tool identification
- ODK_HWAND = 7, // Hardware AND patches applied
- ODK_HWOR = 8, // Hardware OR patches applied
- ODK_GP_GROUP = 9, // GP group to use for text/data sections
- ODK_IDENT = 10, // ID information
- ODK_PAGESIZE = 11 // Page size information
+ ODK_NULL = 0, // Undefined
+ ODK_REGINFO = 1, // Register usage information
+ ODK_EXCEPTIONS = 2, // Exception processing options
+ ODK_PAD = 3, // Section padding options
+ ODK_HWPATCH = 4, // Hardware patches applied
+ ODK_FILL = 5, // Linker fill value
+ ODK_TAGS = 6, // Space for tool identification
+ ODK_HWAND = 7, // Hardware AND patches applied
+ ODK_HWOR = 8, // Hardware OR patches applied
+ ODK_GP_GROUP = 9, // GP group to use for text/data sections
+ ODK_IDENT = 10, // ID information
+ ODK_PAGESIZE = 11 // Page size information
};
// Hexagon-specific e_flags
enum {
// Object processor version flags, bits[11:0]
- EF_HEXAGON_MACH_V2 = 0x00000001, // Hexagon V2
- EF_HEXAGON_MACH_V3 = 0x00000002, // Hexagon V3
- EF_HEXAGON_MACH_V4 = 0x00000003, // Hexagon V4
- EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5
- EF_HEXAGON_MACH_V55 = 0x00000005, // Hexagon V55
- EF_HEXAGON_MACH_V60 = 0x00000060, // Hexagon V60
+ EF_HEXAGON_MACH_V2 = 0x00000001, // Hexagon V2
+ EF_HEXAGON_MACH_V3 = 0x00000002, // Hexagon V3
+ EF_HEXAGON_MACH_V4 = 0x00000003, // Hexagon V4
+ EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5
+ EF_HEXAGON_MACH_V55 = 0x00000005, // Hexagon V55
+ EF_HEXAGON_MACH_V60 = 0x00000060, // Hexagon V60
// Highest ISA version flags
- EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
- // of e_flags
- EF_HEXAGON_ISA_V2 = 0x00000010, // Hexagon V2 ISA
- EF_HEXAGON_ISA_V3 = 0x00000020, // Hexagon V3 ISA
- EF_HEXAGON_ISA_V4 = 0x00000030, // Hexagon V4 ISA
- EF_HEXAGON_ISA_V5 = 0x00000040, // Hexagon V5 ISA
- EF_HEXAGON_ISA_V55 = 0x00000050, // Hexagon V55 ISA
- EF_HEXAGON_ISA_V60 = 0x00000060, // Hexagon V60 ISA
+ EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
+ // of e_flags
+ EF_HEXAGON_ISA_V2 = 0x00000010, // Hexagon V2 ISA
+ EF_HEXAGON_ISA_V3 = 0x00000020, // Hexagon V3 ISA
+ EF_HEXAGON_ISA_V4 = 0x00000030, // Hexagon V4 ISA
+ EF_HEXAGON_ISA_V5 = 0x00000040, // Hexagon V5 ISA
+ EF_HEXAGON_ISA_V55 = 0x00000050, // Hexagon V55 ISA
+ EF_HEXAGON_ISA_V60 = 0x00000060, // Hexagon V60 ISA
};
// Hexagon-specific section indexes for common small data
enum {
- SHN_HEXAGON_SCOMMON = 0xff00, // Other access sizes
- SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
- SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
- SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
- SHN_HEXAGON_SCOMMON_8 = 0xff04 // Double-word-size access
+ SHN_HEXAGON_SCOMMON = 0xff00, // Other access sizes
+ SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
+ SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
+ SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
+ SHN_HEXAGON_SCOMMON_8 = 0xff04 // Double-word-size access
};
// ELF Relocation types for Hexagon
@@ -597,6 +587,11 @@ enum {
#include "ELFRelocs/Lanai.def"
};
+// ELF Relocation types for RISC-V
+enum {
+#include "ELFRelocs/RISCV.def"
+};
+
// ELF Relocation types for S390/zSeries
enum {
#include "ELFRelocs/SystemZ.def"
@@ -630,7 +625,7 @@ struct Elf32_Shdr {
Elf32_Word sh_type; // Section type (SHT_*)
Elf32_Word sh_flags; // Section flags (SHF_*)
Elf32_Addr sh_addr; // Address where section is to be loaded
- Elf32_Off sh_offset; // File offset of section data, in bytes
+ Elf32_Off sh_offset; // File offset of section data, in bytes
Elf32_Word sh_size; // Size of section, in bytes
Elf32_Word sh_link; // Section type-specific header table index link
Elf32_Word sh_info; // Section type-specific extra information
@@ -640,79 +635,79 @@ struct Elf32_Shdr {
// Section header for ELF64 - same fields as ELF32, different types.
struct Elf64_Shdr {
- Elf64_Word sh_name;
- Elf64_Word sh_type;
+ Elf64_Word sh_name;
+ Elf64_Word sh_type;
Elf64_Xword sh_flags;
- Elf64_Addr sh_addr;
- Elf64_Off sh_offset;
+ Elf64_Addr sh_addr;
+ Elf64_Off sh_offset;
Elf64_Xword sh_size;
- Elf64_Word sh_link;
- Elf64_Word sh_info;
+ Elf64_Word sh_link;
+ Elf64_Word sh_info;
Elf64_Xword sh_addralign;
Elf64_Xword sh_entsize;
};
// Special section indices.
enum {
- SHN_UNDEF = 0, // Undefined, missing, irrelevant, or meaningless
+ SHN_UNDEF = 0, // Undefined, missing, irrelevant, or meaningless
SHN_LORESERVE = 0xff00, // Lowest reserved index
- SHN_LOPROC = 0xff00, // Lowest processor-specific index
- SHN_HIPROC = 0xff1f, // Highest processor-specific index
- SHN_LOOS = 0xff20, // Lowest operating system-specific index
- SHN_HIOS = 0xff3f, // Highest operating system-specific index
- SHN_ABS = 0xfff1, // Symbol has absolute value; does not need relocation
- SHN_COMMON = 0xfff2, // FORTRAN COMMON or C external global variables
- SHN_XINDEX = 0xffff, // Mark that the index is >= SHN_LORESERVE
+ SHN_LOPROC = 0xff00, // Lowest processor-specific index
+ SHN_HIPROC = 0xff1f, // Highest processor-specific index
+ SHN_LOOS = 0xff20, // Lowest operating system-specific index
+ SHN_HIOS = 0xff3f, // Highest operating system-specific index
+ SHN_ABS = 0xfff1, // Symbol has absolute value; does not need relocation
+ SHN_COMMON = 0xfff2, // FORTRAN COMMON or C external global variables
+ SHN_XINDEX = 0xffff, // Mark that the index is >= SHN_LORESERVE
SHN_HIRESERVE = 0xffff // Highest reserved index
};
// Section types.
enum : unsigned {
- SHT_NULL = 0, // No associated section (inactive entry).
- SHT_PROGBITS = 1, // Program-defined contents.
- SHT_SYMTAB = 2, // Symbol table.
- SHT_STRTAB = 3, // String table.
- SHT_RELA = 4, // Relocation entries; explicit addends.
- SHT_HASH = 5, // Symbol hash table.
- SHT_DYNAMIC = 6, // Information for dynamic linking.
- SHT_NOTE = 7, // Information about the file.
- SHT_NOBITS = 8, // Data occupies no space in the file.
- SHT_REL = 9, // Relocation entries; no explicit addends.
- SHT_SHLIB = 10, // Reserved.
- SHT_DYNSYM = 11, // Symbol table.
- SHT_INIT_ARRAY = 14, // Pointers to initialization functions.
- SHT_FINI_ARRAY = 15, // Pointers to termination functions.
- SHT_PREINIT_ARRAY = 16, // Pointers to pre-init functions.
- SHT_GROUP = 17, // Section group.
- SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
- SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
- SHT_GNU_ATTRIBUTES= 0x6ffffff5, // Object attributes.
- SHT_GNU_HASH = 0x6ffffff6, // GNU-style hash table.
- SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
- SHT_GNU_verneed = 0x6ffffffe, // GNU version references.
- SHT_GNU_versym = 0x6fffffff, // GNU symbol versions table.
- SHT_HIOS = 0x6fffffff, // Highest operating system-specific type.
- SHT_LOPROC = 0x70000000, // Lowest processor arch-specific type.
+ SHT_NULL = 0, // No associated section (inactive entry).
+ SHT_PROGBITS = 1, // Program-defined contents.
+ SHT_SYMTAB = 2, // Symbol table.
+ SHT_STRTAB = 3, // String table.
+ SHT_RELA = 4, // Relocation entries; explicit addends.
+ SHT_HASH = 5, // Symbol hash table.
+ SHT_DYNAMIC = 6, // Information for dynamic linking.
+ SHT_NOTE = 7, // Information about the file.
+ SHT_NOBITS = 8, // Data occupies no space in the file.
+ SHT_REL = 9, // Relocation entries; no explicit addends.
+ SHT_SHLIB = 10, // Reserved.
+ SHT_DYNSYM = 11, // Symbol table.
+ SHT_INIT_ARRAY = 14, // Pointers to initialization functions.
+ SHT_FINI_ARRAY = 15, // Pointers to termination functions.
+ SHT_PREINIT_ARRAY = 16, // Pointers to pre-init functions.
+ SHT_GROUP = 17, // Section group.
+ SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
+ SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
+ SHT_GNU_ATTRIBUTES = 0x6ffffff5, // Object attributes.
+ SHT_GNU_HASH = 0x6ffffff6, // GNU-style hash table.
+ SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
+ SHT_GNU_verneed = 0x6ffffffe, // GNU version references.
+ SHT_GNU_versym = 0x6fffffff, // GNU symbol versions table.
+ SHT_HIOS = 0x6fffffff, // Highest operating system-specific type.
+ SHT_LOPROC = 0x70000000, // Lowest processor arch-specific type.
// Fixme: All this is duplicated in MCSectionELF. Why??
// Exception Index table
- SHT_ARM_EXIDX = 0x70000001U,
+ SHT_ARM_EXIDX = 0x70000001U,
// BPABI DLL dynamic linking pre-emption map
- SHT_ARM_PREEMPTMAP = 0x70000002U,
+ SHT_ARM_PREEMPTMAP = 0x70000002U,
// Object file compatibility attributes
- SHT_ARM_ATTRIBUTES = 0x70000003U,
- SHT_ARM_DEBUGOVERLAY = 0x70000004U,
- SHT_ARM_OVERLAYSECTION = 0x70000005U,
- SHT_HEX_ORDERED = 0x70000000, // Link editor is to sort the entries in
- // this section based on their sizes
- SHT_X86_64_UNWIND = 0x70000001, // Unwind information
+ SHT_ARM_ATTRIBUTES = 0x70000003U,
+ SHT_ARM_DEBUGOVERLAY = 0x70000004U,
+ SHT_ARM_OVERLAYSECTION = 0x70000005U,
+ SHT_HEX_ORDERED = 0x70000000, // Link editor is to sort the entries in
+ // this section based on their sizes
+ SHT_X86_64_UNWIND = 0x70000001, // Unwind information
- SHT_MIPS_REGINFO = 0x70000006, // Register usage information
- SHT_MIPS_OPTIONS = 0x7000000d, // General options
- SHT_MIPS_ABIFLAGS = 0x7000002a, // ABI information.
+ SHT_MIPS_REGINFO = 0x70000006, // Register usage information
+ SHT_MIPS_OPTIONS = 0x7000000d, // General options
+ SHT_MIPS_ABIFLAGS = 0x7000002a, // ABI information.
- SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
- SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
- SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
+ SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
+ SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
+ SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
};
// Section flags.
@@ -766,7 +761,7 @@ enum : unsigned {
/// set to the start of the section by the boot code.
XCORE_SHF_DP_SECTION = 0x1000U,
- SHF_MASKOS = 0x0ff00000,
+ SHF_MASKOS = 0x0ff00000,
// Bits indicating processor-specific flags.
SHF_MASKPROC = 0xf0000000,
@@ -789,30 +784,33 @@ enum : unsigned {
SHF_MIPS_NODUPES = 0x01000000,
// Linker must generate implicit hidden weak names.
- SHF_MIPS_NAMES = 0x02000000,
+ SHF_MIPS_NAMES = 0x02000000,
// Section data local to process.
- SHF_MIPS_LOCAL = 0x04000000,
+ SHF_MIPS_LOCAL = 0x04000000,
// Do not strip this section.
SHF_MIPS_NOSTRIP = 0x08000000,
// Section must be part of global data area.
- SHF_MIPS_GPREL = 0x10000000,
+ SHF_MIPS_GPREL = 0x10000000,
// This section should be merged.
- SHF_MIPS_MERGE = 0x20000000,
+ SHF_MIPS_MERGE = 0x20000000,
// Address size to be inferred from section entry size.
- SHF_MIPS_ADDR = 0x40000000,
+ SHF_MIPS_ADDR = 0x40000000,
// Section data is string data by default.
- SHF_MIPS_STRING = 0x80000000,
+ SHF_MIPS_STRING = 0x80000000,
+
+ // Make code section unreadable when in execute-only mode
+ SHF_ARM_PURECODE = 0x20000000,
- SHF_AMDGPU_HSA_GLOBAL = 0x00100000,
+ SHF_AMDGPU_HSA_GLOBAL = 0x00100000,
SHF_AMDGPU_HSA_READONLY = 0x00200000,
- SHF_AMDGPU_HSA_CODE = 0x00400000,
- SHF_AMDGPU_HSA_AGENT = 0x00800000
+ SHF_AMDGPU_HSA_CODE = 0x00400000,
+ SHF_AMDGPU_HSA_AGENT = 0x00800000
};
// Section Group Flags
@@ -824,12 +822,12 @@ enum : unsigned {
// Symbol table entries for ELF32.
struct Elf32_Sym {
- Elf32_Word st_name; // Symbol name (index into string table)
- Elf32_Addr st_value; // Value or address associated with the symbol
- Elf32_Word st_size; // Size of the symbol
+ Elf32_Word st_name; // Symbol name (index into string table)
+ Elf32_Addr st_value; // Value or address associated with the symbol
+ Elf32_Word st_size; // Size of the symbol
unsigned char st_info; // Symbol's type and binding attributes
unsigned char st_other; // Must be zero; reserved
- Elf32_Half st_shndx; // Which section (header table index) it's defined in
+ Elf32_Half st_shndx; // Which section (header table index) it's defined in
// These accessors and mutators correspond to the ELF32_ST_BIND,
// ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
@@ -844,12 +842,12 @@ struct Elf32_Sym {
// Symbol table entries for ELF64.
struct Elf64_Sym {
- Elf64_Word st_name; // Symbol name (index into string table)
- unsigned char st_info; // Symbol's type and binding attributes
- unsigned char st_other; // Must be zero; reserved
- Elf64_Half st_shndx; // Which section (header tbl index) it's defined in
- Elf64_Addr st_value; // Value or address associated with the symbol
- Elf64_Xword st_size; // Size of the symbol
+ Elf64_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf64_Half st_shndx; // Which section (header tbl index) it's defined in
+ Elf64_Addr st_value; // Value or address associated with the symbol
+ Elf64_Xword st_size; // Size of the symbol
// These accessors and mutators are identical to those defined for ELF32
// symbol table entries.
@@ -870,48 +868,46 @@ enum {
// Symbol bindings.
enum {
- STB_LOCAL = 0, // Local symbol, not visible outside obj file containing def
- STB_GLOBAL = 1, // Global symbol, visible to all object files being combined
- STB_WEAK = 2, // Weak symbol, like global but lower-precedence
+ STB_LOCAL = 0, // Local symbol, not visible outside obj file containing def
+ STB_GLOBAL = 1, // Global symbol, visible to all object files being combined
+ STB_WEAK = 2, // Weak symbol, like global but lower-precedence
STB_GNU_UNIQUE = 10,
- STB_LOOS = 10, // Lowest operating system-specific binding type
- STB_HIOS = 12, // Highest operating system-specific binding type
+ STB_LOOS = 10, // Lowest operating system-specific binding type
+ STB_HIOS = 12, // Highest operating system-specific binding type
STB_LOPROC = 13, // Lowest processor-specific binding type
STB_HIPROC = 15 // Highest processor-specific binding type
};
// Symbol types.
enum {
- STT_NOTYPE = 0, // Symbol's type is not specified
- STT_OBJECT = 1, // Symbol is a data object (variable, array, etc.)
- STT_FUNC = 2, // Symbol is executable code (function, etc.)
- STT_SECTION = 3, // Symbol refers to a section
- STT_FILE = 4, // Local, absolute symbol that refers to a file
- STT_COMMON = 5, // An uninitialized common block
- STT_TLS = 6, // Thread local data object
+ STT_NOTYPE = 0, // Symbol's type is not specified
+ STT_OBJECT = 1, // Symbol is a data object (variable, array, etc.)
+ STT_FUNC = 2, // Symbol is executable code (function, etc.)
+ STT_SECTION = 3, // Symbol refers to a section
+ STT_FILE = 4, // Local, absolute symbol that refers to a file
+ STT_COMMON = 5, // An uninitialized common block
+ STT_TLS = 6, // Thread local data object
STT_GNU_IFUNC = 10, // GNU indirect function
- STT_LOOS = 10, // Lowest operating system-specific symbol type
- STT_HIOS = 12, // Highest operating system-specific symbol type
- STT_LOPROC = 13, // Lowest processor-specific symbol type
- STT_HIPROC = 15, // Highest processor-specific symbol type
+ STT_LOOS = 10, // Lowest operating system-specific symbol type
+ STT_HIOS = 12, // Highest operating system-specific symbol type
+ STT_LOPROC = 13, // Lowest processor-specific symbol type
+ STT_HIPROC = 15, // Highest processor-specific symbol type
// AMDGPU symbol types
- STT_AMDGPU_HSA_KERNEL = 10,
+ STT_AMDGPU_HSA_KERNEL = 10,
STT_AMDGPU_HSA_INDIRECT_FUNCTION = 11,
- STT_AMDGPU_HSA_METADATA = 12
+ STT_AMDGPU_HSA_METADATA = 12
};
enum {
- STV_DEFAULT = 0, // Visibility is specified by binding type
- STV_INTERNAL = 1, // Defined by processor supplements
- STV_HIDDEN = 2, // Not visible to other components
- STV_PROTECTED = 3 // Visible in other components but not preemptable
+ STV_DEFAULT = 0, // Visibility is specified by binding type
+ STV_INTERNAL = 1, // Defined by processor supplements
+ STV_HIDDEN = 2, // Not visible to other components
+ STV_PROTECTED = 3 // Visible in other components but not preemptable
};
// Symbol number.
-enum {
- STN_UNDEF = 0
-};
+enum { STN_UNDEF = 0 };
// Special relocation symbols used in the MIPS64 ELF relocation entries
enum {
@@ -929,7 +925,7 @@ struct Elf32_Rel {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
Elf32_Word getSymbol() const { return (r_info >> 8); }
- unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ unsigned char getType() const { return (unsigned char)(r_info & 0x0ff); }
void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
void setSymbolAndType(Elf32_Word s, unsigned char t) {
@@ -939,14 +935,14 @@ struct Elf32_Rel {
// Relocation entry with explicit addend.
struct Elf32_Rela {
- Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
- Elf32_Word r_info; // Symbol table index and type of relocation to apply
+ Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf32_Word r_info; // Symbol table index and type of relocation to apply
Elf32_Sword r_addend; // Compute value for relocatable field by adding this
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
Elf32_Word getSymbol() const { return (r_info >> 8); }
- unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ unsigned char getType() const { return (unsigned char)(r_info & 0x0ff); }
void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
void setSymbolAndType(Elf32_Word s, unsigned char t) {
@@ -957,44 +953,40 @@ struct Elf32_Rela {
// Relocation entry, without explicit addend.
struct Elf64_Rel {
Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
- Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
// These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
// and ELF64_R_INFO macros defined in the ELF specification:
Elf64_Word getSymbol() const { return (r_info >> 32); }
- Elf64_Word getType() const {
- return (Elf64_Word) (r_info & 0xffffffffL);
- }
+ Elf64_Word getType() const { return (Elf64_Word)(r_info & 0xffffffffL); }
void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
- r_info = ((Elf64_Xword)s << 32) + (t&0xffffffffL);
+ r_info = ((Elf64_Xword)s << 32) + (t & 0xffffffffL);
}
};
// Relocation entry with explicit addend.
struct Elf64_Rela {
- Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
- Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.
// These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
// and ELF64_R_INFO macros defined in the ELF specification:
Elf64_Word getSymbol() const { return (r_info >> 32); }
- Elf64_Word getType() const {
- return (Elf64_Word) (r_info & 0xffffffffL);
- }
+ Elf64_Word getType() const { return (Elf64_Word)(r_info & 0xffffffffL); }
void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
- r_info = ((Elf64_Xword)s << 32) + (t&0xffffffffL);
+ r_info = ((Elf64_Xword)s << 32) + (t & 0xffffffffL);
}
};
// Program header for ELF32.
struct Elf32_Phdr {
Elf32_Word p_type; // Type of segment
- Elf32_Off p_offset; // File offset where segment is located, in bytes
+ Elf32_Off p_offset; // File offset where segment is located, in bytes
Elf32_Addr p_vaddr; // Virtual address of beginning of segment
Elf32_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
Elf32_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
@@ -1005,57 +997,61 @@ struct Elf32_Phdr {
// Program header for ELF64.
struct Elf64_Phdr {
- Elf64_Word p_type; // Type of segment
- Elf64_Word p_flags; // Segment flags
- Elf64_Off p_offset; // File offset where segment is located, in bytes
- Elf64_Addr p_vaddr; // Virtual address of beginning of segment
- Elf64_Addr p_paddr; // Physical addr of beginning of segment (OS-specific)
- Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
- Elf64_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
- Elf64_Xword p_align; // Segment alignment constraint
+ Elf64_Word p_type; // Type of segment
+ Elf64_Word p_flags; // Segment flags
+ Elf64_Off p_offset; // File offset where segment is located, in bytes
+ Elf64_Addr p_vaddr; // Virtual address of beginning of segment
+ Elf64_Addr p_paddr; // Physical addr of beginning of segment (OS-specific)
+ Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf64_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf64_Xword p_align; // Segment alignment constraint
};
// Segment types.
enum {
- PT_NULL = 0, // Unused segment.
- PT_LOAD = 1, // Loadable segment.
- PT_DYNAMIC = 2, // Dynamic linking information.
- PT_INTERP = 3, // Interpreter pathname.
- PT_NOTE = 4, // Auxiliary information.
- PT_SHLIB = 5, // Reserved.
- PT_PHDR = 6, // The program header table itself.
- PT_TLS = 7, // The thread-local storage template.
- PT_LOOS = 0x60000000, // Lowest operating system-specific pt entry type.
- PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
- PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
- PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.
+ PT_NULL = 0, // Unused segment.
+ PT_LOAD = 1, // Loadable segment.
+ PT_DYNAMIC = 2, // Dynamic linking information.
+ PT_INTERP = 3, // Interpreter pathname.
+ PT_NOTE = 4, // Auxiliary information.
+ PT_SHLIB = 5, // Reserved.
+ PT_PHDR = 6, // The program header table itself.
+ PT_TLS = 7, // The thread-local storage template.
+ PT_LOOS = 0x60000000, // Lowest operating system-specific pt entry type.
+ PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
+ PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
+ PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.
// x86-64 program header types.
// These all contain stack unwind tables.
- PT_GNU_EH_FRAME = 0x6474e550,
+ PT_GNU_EH_FRAME = 0x6474e550,
PT_SUNW_EH_FRAME = 0x6474e550,
- PT_SUNW_UNWIND = 0x6464e550,
+ PT_SUNW_UNWIND = 0x6464e550,
+
+ PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
+ PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
- PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
- PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
+ PT_OPENBSD_RANDOMIZE = 0x65a3dbe6, // Fill with random data.
+ PT_OPENBSD_WXNEEDED = 0x65a3dbe7, // Program does W^X violations.
+ PT_OPENBSD_BOOTDATA = 0x65a41be6, // Section for boot arguments.
// ARM program header types.
PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility info
// These all contain stack unwind tables.
- PT_ARM_EXIDX = 0x70000001,
- PT_ARM_UNWIND = 0x70000001,
+ PT_ARM_EXIDX = 0x70000001,
+ PT_ARM_UNWIND = 0x70000001,
// MIPS program header types.
- PT_MIPS_REGINFO = 0x70000000, // Register usage information.
- PT_MIPS_RTPROC = 0x70000001, // Runtime procedure table.
- PT_MIPS_OPTIONS = 0x70000002, // Options segment.
- PT_MIPS_ABIFLAGS = 0x70000003, // Abiflags segment.
+ PT_MIPS_REGINFO = 0x70000000, // Register usage information.
+ PT_MIPS_RTPROC = 0x70000001, // Runtime procedure table.
+ PT_MIPS_OPTIONS = 0x70000002, // Options segment.
+ PT_MIPS_ABIFLAGS = 0x70000003, // Abiflags segment.
// AMDGPU program header types.
PT_AMDGPU_HSA_LOAD_GLOBAL_PROGRAM = 0x60000000,
- PT_AMDGPU_HSA_LOAD_GLOBAL_AGENT = 0x60000001,
+ PT_AMDGPU_HSA_LOAD_GLOBAL_AGENT = 0x60000001,
PT_AMDGPU_HSA_LOAD_READONLY_AGENT = 0x60000002,
- PT_AMDGPU_HSA_LOAD_CODE_AGENT = 0x60000003,
+ PT_AMDGPU_HSA_LOAD_CODE_AGENT = 0x60000003,
// WebAssembly program header types.
PT_WEBASSEMBLY_FUNCTIONS = PT_LOPROC + 0, // Function definitions.
@@ -1063,267 +1059,273 @@ enum {
// Segment flag bits.
enum : unsigned {
- PF_X = 1, // Execute
- PF_W = 2, // Write
- PF_R = 4, // Read
- PF_MASKOS = 0x0ff00000,// Bits for operating system-specific semantics.
+ PF_X = 1, // Execute
+ PF_W = 2, // Write
+ PF_R = 4, // Read
+ PF_MASKOS = 0x0ff00000, // Bits for operating system-specific semantics.
PF_MASKPROC = 0xf0000000 // Bits for processor-specific semantics.
};
// Dynamic table entry for ELF32.
-struct Elf32_Dyn
-{
- Elf32_Sword d_tag; // Type of dynamic table entry.
- union
- {
- Elf32_Word d_val; // Integer value of entry.
- Elf32_Addr d_ptr; // Pointer value of entry.
+struct Elf32_Dyn {
+ Elf32_Sword d_tag; // Type of dynamic table entry.
+ union {
+ Elf32_Word d_val; // Integer value of entry.
+ Elf32_Addr d_ptr; // Pointer value of entry.
} d_un;
};
// Dynamic table entry for ELF64.
-struct Elf64_Dyn
-{
- Elf64_Sxword d_tag; // Type of dynamic table entry.
- union
- {
- Elf64_Xword d_val; // Integer value of entry.
- Elf64_Addr d_ptr; // Pointer value of entry.
+struct Elf64_Dyn {
+ Elf64_Sxword d_tag; // Type of dynamic table entry.
+ union {
+ Elf64_Xword d_val; // Integer value of entry.
+ Elf64_Addr d_ptr; // Pointer value of entry.
} d_un;
};
// Dynamic table entry tags.
enum {
- DT_NULL = 0, // Marks end of dynamic array.
- DT_NEEDED = 1, // String table offset of needed library.
- DT_PLTRELSZ = 2, // Size of relocation entries in PLT.
- DT_PLTGOT = 3, // Address associated with linkage table.
- DT_HASH = 4, // Address of symbolic hash table.
- DT_STRTAB = 5, // Address of dynamic string table.
- DT_SYMTAB = 6, // Address of dynamic symbol table.
- DT_RELA = 7, // Address of relocation table (Rela entries).
- DT_RELASZ = 8, // Size of Rela relocation table.
- DT_RELAENT = 9, // Size of a Rela relocation entry.
- DT_STRSZ = 10, // Total size of the string table.
- DT_SYMENT = 11, // Size of a symbol table entry.
- DT_INIT = 12, // Address of initialization function.
- DT_FINI = 13, // Address of termination function.
- DT_SONAME = 14, // String table offset of a shared objects name.
- DT_RPATH = 15, // String table offset of library search path.
- DT_SYMBOLIC = 16, // Changes symbol resolution algorithm.
- DT_REL = 17, // Address of relocation table (Rel entries).
- DT_RELSZ = 18, // Size of Rel relocation table.
- DT_RELENT = 19, // Size of a Rel relocation entry.
- DT_PLTREL = 20, // Type of relocation entry used for linking.
- DT_DEBUG = 21, // Reserved for debugger.
- DT_TEXTREL = 22, // Relocations exist for non-writable segments.
- DT_JMPREL = 23, // Address of relocations associated with PLT.
- DT_BIND_NOW = 24, // Process all relocations before execution.
- DT_INIT_ARRAY = 25, // Pointer to array of initialization functions.
- DT_FINI_ARRAY = 26, // Pointer to array of termination functions.
- DT_INIT_ARRAYSZ = 27, // Size of DT_INIT_ARRAY.
- DT_FINI_ARRAYSZ = 28, // Size of DT_FINI_ARRAY.
- DT_RUNPATH = 29, // String table offset of lib search path.
- DT_FLAGS = 30, // Flags.
- DT_ENCODING = 32, // Values from here to DT_LOOS follow the rules
- // for the interpretation of the d_un union.
-
- DT_PREINIT_ARRAY = 32, // Pointer to array of preinit functions.
- DT_PREINIT_ARRAYSZ = 33, // Size of the DT_PREINIT_ARRAY array.
-
- DT_LOOS = 0x60000000, // Start of environment specific tags.
- DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
- DT_LOPROC = 0x70000000, // Start of processor specific tags.
- DT_HIPROC = 0x7FFFFFFF, // End of processor specific tags.
-
- DT_GNU_HASH = 0x6FFFFEF5, // Reference to the GNU hash table.
- DT_TLSDESC_PLT = 0x6FFFFEF6, // Location of PLT entry for TLS descriptor resolver calls.
- DT_TLSDESC_GOT = 0x6FFFFEF7, // Location of GOT entry used by TLS descriptor resolver PLT entry.
- DT_RELACOUNT = 0x6FFFFFF9, // ELF32_Rela count.
- DT_RELCOUNT = 0x6FFFFFFA, // ELF32_Rel count.
-
- DT_FLAGS_1 = 0X6FFFFFFB, // Flags_1.
- DT_VERSYM = 0x6FFFFFF0, // The address of .gnu.version section.
- DT_VERDEF = 0X6FFFFFFC, // The address of the version definition table.
- DT_VERDEFNUM = 0X6FFFFFFD, // The number of entries in DT_VERDEF.
- DT_VERNEED = 0X6FFFFFFE, // The address of the version Dependency table.
- DT_VERNEEDNUM = 0X6FFFFFFF, // The number of entries in DT_VERNEED.
+ DT_NULL = 0, // Marks end of dynamic array.
+ DT_NEEDED = 1, // String table offset of needed library.
+ DT_PLTRELSZ = 2, // Size of relocation entries in PLT.
+ DT_PLTGOT = 3, // Address associated with linkage table.
+ DT_HASH = 4, // Address of symbolic hash table.
+ DT_STRTAB = 5, // Address of dynamic string table.
+ DT_SYMTAB = 6, // Address of dynamic symbol table.
+ DT_RELA = 7, // Address of relocation table (Rela entries).
+ DT_RELASZ = 8, // Size of Rela relocation table.
+ DT_RELAENT = 9, // Size of a Rela relocation entry.
+ DT_STRSZ = 10, // Total size of the string table.
+ DT_SYMENT = 11, // Size of a symbol table entry.
+ DT_INIT = 12, // Address of initialization function.
+ DT_FINI = 13, // Address of termination function.
+ DT_SONAME = 14, // String table offset of a shared objects name.
+ DT_RPATH = 15, // String table offset of library search path.
+ DT_SYMBOLIC = 16, // Changes symbol resolution algorithm.
+ DT_REL = 17, // Address of relocation table (Rel entries).
+ DT_RELSZ = 18, // Size of Rel relocation table.
+ DT_RELENT = 19, // Size of a Rel relocation entry.
+ DT_PLTREL = 20, // Type of relocation entry used for linking.
+ DT_DEBUG = 21, // Reserved for debugger.
+ DT_TEXTREL = 22, // Relocations exist for non-writable segments.
+ DT_JMPREL = 23, // Address of relocations associated with PLT.
+ DT_BIND_NOW = 24, // Process all relocations before execution.
+ DT_INIT_ARRAY = 25, // Pointer to array of initialization functions.
+ DT_FINI_ARRAY = 26, // Pointer to array of termination functions.
+ DT_INIT_ARRAYSZ = 27, // Size of DT_INIT_ARRAY.
+ DT_FINI_ARRAYSZ = 28, // Size of DT_FINI_ARRAY.
+ DT_RUNPATH = 29, // String table offset of lib search path.
+ DT_FLAGS = 30, // Flags.
+ DT_ENCODING = 32, // Values from here to DT_LOOS follow the rules
+ // for the interpretation of the d_un union.
+
+ DT_PREINIT_ARRAY = 32, // Pointer to array of preinit functions.
+ DT_PREINIT_ARRAYSZ = 33, // Size of the DT_PREINIT_ARRAY array.
+
+ DT_LOOS = 0x60000000, // Start of environment specific tags.
+ DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
+ DT_LOPROC = 0x70000000, // Start of processor specific tags.
+ DT_HIPROC = 0x7FFFFFFF, // End of processor specific tags.
+
+ DT_GNU_HASH = 0x6FFFFEF5, // Reference to the GNU hash table.
+ DT_TLSDESC_PLT =
+ 0x6FFFFEF6, // Location of PLT entry for TLS descriptor resolver calls.
+ DT_TLSDESC_GOT = 0x6FFFFEF7, // Location of GOT entry used by TLS descriptor
+ // resolver PLT entry.
+ DT_RELACOUNT = 0x6FFFFFF9, // ELF32_Rela count.
+ DT_RELCOUNT = 0x6FFFFFFA, // ELF32_Rel count.
+
+ DT_FLAGS_1 = 0X6FFFFFFB, // Flags_1.
+ DT_VERSYM = 0x6FFFFFF0, // The address of .gnu.version section.
+ DT_VERDEF = 0X6FFFFFFC, // The address of the version definition table.
+ DT_VERDEFNUM = 0X6FFFFFFD, // The number of entries in DT_VERDEF.
+ DT_VERNEED = 0X6FFFFFFE, // The address of the version Dependency table.
+ DT_VERNEEDNUM = 0X6FFFFFFF, // The number of entries in DT_VERNEED.
+
+ // Hexagon specific dynamic table entries
+ DT_HEXAGON_SYMSZ = 0x70000000,
+ DT_HEXAGON_VER = 0x70000001,
+ DT_HEXAGON_PLT = 0x70000002,
// Mips specific dynamic table entry tags.
- DT_MIPS_RLD_VERSION = 0x70000001, // 32 bit version number for runtime
- // linker interface.
- DT_MIPS_TIME_STAMP = 0x70000002, // Time stamp.
- DT_MIPS_ICHECKSUM = 0x70000003, // Checksum of external strings
- // and common sizes.
- DT_MIPS_IVERSION = 0x70000004, // Index of version string
- // in string table.
- DT_MIPS_FLAGS = 0x70000005, // 32 bits of flags.
- DT_MIPS_BASE_ADDRESS = 0x70000006, // Base address of the segment.
- DT_MIPS_MSYM = 0x70000007, // Address of .msym section.
- DT_MIPS_CONFLICT = 0x70000008, // Address of .conflict section.
- DT_MIPS_LIBLIST = 0x70000009, // Address of .liblist section.
- DT_MIPS_LOCAL_GOTNO = 0x7000000a, // Number of local global offset
- // table entries.
- DT_MIPS_CONFLICTNO = 0x7000000b, // Number of entries
- // in the .conflict section.
- DT_MIPS_LIBLISTNO = 0x70000010, // Number of entries
- // in the .liblist section.
- DT_MIPS_SYMTABNO = 0x70000011, // Number of entries
- // in the .dynsym section.
- DT_MIPS_UNREFEXTNO = 0x70000012, // Index of first external dynamic symbol
- // not referenced locally.
- DT_MIPS_GOTSYM = 0x70000013, // Index of first dynamic symbol
- // in global offset table.
- DT_MIPS_HIPAGENO = 0x70000014, // Number of page table entries
- // in global offset table.
- DT_MIPS_RLD_MAP = 0x70000016, // Address of run time loader map,
- // used for debugging.
- DT_MIPS_DELTA_CLASS = 0x70000017, // Delta C++ class definition.
- DT_MIPS_DELTA_CLASS_NO = 0x70000018, // Number of entries
- // in DT_MIPS_DELTA_CLASS.
- DT_MIPS_DELTA_INSTANCE = 0x70000019, // Delta C++ class instances.
- DT_MIPS_DELTA_INSTANCE_NO = 0x7000001A, // Number of entries
- // in DT_MIPS_DELTA_INSTANCE.
- DT_MIPS_DELTA_RELOC = 0x7000001B, // Delta relocations.
- DT_MIPS_DELTA_RELOC_NO = 0x7000001C, // Number of entries
- // in DT_MIPS_DELTA_RELOC.
- DT_MIPS_DELTA_SYM = 0x7000001D, // Delta symbols that Delta
- // relocations refer to.
- DT_MIPS_DELTA_SYM_NO = 0x7000001E, // Number of entries
- // in DT_MIPS_DELTA_SYM.
- DT_MIPS_DELTA_CLASSSYM = 0x70000020, // Delta symbols that hold
- // class declarations.
- DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021, // Number of entries
- // in DT_MIPS_DELTA_CLASSSYM.
- DT_MIPS_CXX_FLAGS = 0x70000022, // Flags indicating information
- // about C++ flavor.
- DT_MIPS_PIXIE_INIT = 0x70000023, // Pixie information.
- DT_MIPS_SYMBOL_LIB = 0x70000024, // Address of .MIPS.symlib
- DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025, // The GOT index of the first PTE
- // for a segment
- DT_MIPS_LOCAL_GOTIDX = 0x70000026, // The GOT index of the first PTE
- // for a local symbol
- DT_MIPS_HIDDEN_GOTIDX = 0x70000027, // The GOT index of the first PTE
- // for a hidden symbol
- DT_MIPS_PROTECTED_GOTIDX = 0x70000028, // The GOT index of the first PTE
- // for a protected symbol
- DT_MIPS_OPTIONS = 0x70000029, // Address of `.MIPS.options'.
- DT_MIPS_INTERFACE = 0x7000002A, // Address of `.interface'.
- DT_MIPS_DYNSTR_ALIGN = 0x7000002B, // Unknown.
- DT_MIPS_INTERFACE_SIZE = 0x7000002C, // Size of the .interface section.
+ DT_MIPS_RLD_VERSION = 0x70000001, // 32 bit version number for runtime
+ // linker interface.
+ DT_MIPS_TIME_STAMP = 0x70000002, // Time stamp.
+ DT_MIPS_ICHECKSUM = 0x70000003, // Checksum of external strings
+ // and common sizes.
+ DT_MIPS_IVERSION = 0x70000004, // Index of version string
+ // in string table.
+ DT_MIPS_FLAGS = 0x70000005, // 32 bits of flags.
+ DT_MIPS_BASE_ADDRESS = 0x70000006, // Base address of the segment.
+ DT_MIPS_MSYM = 0x70000007, // Address of .msym section.
+ DT_MIPS_CONFLICT = 0x70000008, // Address of .conflict section.
+ DT_MIPS_LIBLIST = 0x70000009, // Address of .liblist section.
+ DT_MIPS_LOCAL_GOTNO = 0x7000000a, // Number of local global offset
+ // table entries.
+ DT_MIPS_CONFLICTNO = 0x7000000b, // Number of entries
+ // in the .conflict section.
+ DT_MIPS_LIBLISTNO = 0x70000010, // Number of entries
+ // in the .liblist section.
+ DT_MIPS_SYMTABNO = 0x70000011, // Number of entries
+ // in the .dynsym section.
+ DT_MIPS_UNREFEXTNO = 0x70000012, // Index of first external dynamic symbol
+ // not referenced locally.
+ DT_MIPS_GOTSYM = 0x70000013, // Index of first dynamic symbol
+ // in global offset table.
+ DT_MIPS_HIPAGENO = 0x70000014, // Number of page table entries
+ // in global offset table.
+ DT_MIPS_RLD_MAP = 0x70000016, // Address of run time loader map,
+ // used for debugging.
+ DT_MIPS_DELTA_CLASS = 0x70000017, // Delta C++ class definition.
+ DT_MIPS_DELTA_CLASS_NO = 0x70000018, // Number of entries
+ // in DT_MIPS_DELTA_CLASS.
+ DT_MIPS_DELTA_INSTANCE = 0x70000019, // Delta C++ class instances.
+ DT_MIPS_DELTA_INSTANCE_NO = 0x7000001A, // Number of entries
+ // in DT_MIPS_DELTA_INSTANCE.
+ DT_MIPS_DELTA_RELOC = 0x7000001B, // Delta relocations.
+ DT_MIPS_DELTA_RELOC_NO = 0x7000001C, // Number of entries
+ // in DT_MIPS_DELTA_RELOC.
+ DT_MIPS_DELTA_SYM = 0x7000001D, // Delta symbols that Delta
+ // relocations refer to.
+ DT_MIPS_DELTA_SYM_NO = 0x7000001E, // Number of entries
+ // in DT_MIPS_DELTA_SYM.
+ DT_MIPS_DELTA_CLASSSYM = 0x70000020, // Delta symbols that hold
+ // class declarations.
+ DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021, // Number of entries
+ // in DT_MIPS_DELTA_CLASSSYM.
+ DT_MIPS_CXX_FLAGS = 0x70000022, // Flags indicating information
+ // about C++ flavor.
+ DT_MIPS_PIXIE_INIT = 0x70000023, // Pixie information.
+ DT_MIPS_SYMBOL_LIB = 0x70000024, // Address of .MIPS.symlib
+ DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025, // The GOT index of the first PTE
+ // for a segment
+ DT_MIPS_LOCAL_GOTIDX = 0x70000026, // The GOT index of the first PTE
+ // for a local symbol
+ DT_MIPS_HIDDEN_GOTIDX = 0x70000027, // The GOT index of the first PTE
+ // for a hidden symbol
+ DT_MIPS_PROTECTED_GOTIDX = 0x70000028, // The GOT index of the first PTE
+ // for a protected symbol
+ DT_MIPS_OPTIONS = 0x70000029, // Address of `.MIPS.options'.
+ DT_MIPS_INTERFACE = 0x7000002A, // Address of `.interface'.
+ DT_MIPS_DYNSTR_ALIGN = 0x7000002B, // Unknown.
+ DT_MIPS_INTERFACE_SIZE = 0x7000002C, // Size of the .interface section.
DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002D, // Size of rld_text_resolve
// function stored in the GOT.
- DT_MIPS_PERF_SUFFIX = 0x7000002E, // Default suffix of DSO to be added
- // by rld on dlopen() calls.
- DT_MIPS_COMPACT_SIZE = 0x7000002F, // Size of compact relocation
- // section (O32).
- DT_MIPS_GP_VALUE = 0x70000030, // GP value for auxiliary GOTs.
- DT_MIPS_AUX_DYNAMIC = 0x70000031, // Address of auxiliary .dynamic.
- DT_MIPS_PLTGOT = 0x70000032, // Address of the base of the PLTGOT.
- DT_MIPS_RWPLT = 0x70000034, // Points to the base
- // of a writable PLT.
- DT_MIPS_RLD_MAP_REL = 0x70000035, // Relative offset of run time loader
- // map, used for debugging.
+ DT_MIPS_PERF_SUFFIX = 0x7000002E, // Default suffix of DSO to be added
+ // by rld on dlopen() calls.
+ DT_MIPS_COMPACT_SIZE = 0x7000002F, // Size of compact relocation
+ // section (O32).
+ DT_MIPS_GP_VALUE = 0x70000030, // GP value for auxiliary GOTs.
+ DT_MIPS_AUX_DYNAMIC = 0x70000031, // Address of auxiliary .dynamic.
+ DT_MIPS_PLTGOT = 0x70000032, // Address of the base of the PLTGOT.
+ DT_MIPS_RWPLT = 0x70000034, // Points to the base
+ // of a writable PLT.
+ DT_MIPS_RLD_MAP_REL = 0x70000035, // Relative offset of run time loader
+ // map, used for debugging.
// Sun machine-independent extensions.
- DT_AUXILIARY = 0x7FFFFFFD, // Shared object to load before self
- DT_FILTER = 0x7FFFFFFF // Shared object to get values from
+ DT_AUXILIARY = 0x7FFFFFFD, // Shared object to load before self
+ DT_FILTER = 0x7FFFFFFF // Shared object to get values from
};
// DT_FLAGS values.
enum {
- DF_ORIGIN = 0x01, // The object may reference $ORIGIN.
- DF_SYMBOLIC = 0x02, // Search the shared lib before searching the exe.
- DF_TEXTREL = 0x04, // Relocations may modify a non-writable segment.
- DF_BIND_NOW = 0x08, // Process all relocations on load.
- DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
+ DF_ORIGIN = 0x01, // The object may reference $ORIGIN.
+ DF_SYMBOLIC = 0x02, // Search the shared lib before searching the exe.
+ DF_TEXTREL = 0x04, // Relocations may modify a non-writable segment.
+ DF_BIND_NOW = 0x08, // Process all relocations on load.
+ DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
};
// State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 entry.
enum {
- DF_1_NOW = 0x00000001, // Set RTLD_NOW for this object.
- DF_1_GLOBAL = 0x00000002, // Set RTLD_GLOBAL for this object.
- DF_1_GROUP = 0x00000004, // Set RTLD_GROUP for this object.
- DF_1_NODELETE = 0x00000008, // Set RTLD_NODELETE for this object.
- DF_1_LOADFLTR = 0x00000010, // Trigger filtee loading at runtime.
- DF_1_INITFIRST = 0x00000020, // Set RTLD_INITFIRST for this object.
- DF_1_NOOPEN = 0x00000040, // Set RTLD_NOOPEN for this object.
- DF_1_ORIGIN = 0x00000080, // $ORIGIN must be handled.
- DF_1_DIRECT = 0x00000100, // Direct binding enabled.
- DF_1_TRANS = 0x00000200,
- DF_1_INTERPOSE = 0x00000400, // Object is used to interpose.
- DF_1_NODEFLIB = 0x00000800, // Ignore default lib search path.
- DF_1_NODUMP = 0x00001000, // Object can't be dldump'ed.
- DF_1_CONFALT = 0x00002000, // Configuration alternative created.
- DF_1_ENDFILTEE = 0x00004000, // Filtee terminates filters search.
+ DF_1_NOW = 0x00000001, // Set RTLD_NOW for this object.
+ DF_1_GLOBAL = 0x00000002, // Set RTLD_GLOBAL for this object.
+ DF_1_GROUP = 0x00000004, // Set RTLD_GROUP for this object.
+ DF_1_NODELETE = 0x00000008, // Set RTLD_NODELETE for this object.
+ DF_1_LOADFLTR = 0x00000010, // Trigger filtee loading at runtime.
+ DF_1_INITFIRST = 0x00000020, // Set RTLD_INITFIRST for this object.
+ DF_1_NOOPEN = 0x00000040, // Set RTLD_NOOPEN for this object.
+ DF_1_ORIGIN = 0x00000080, // $ORIGIN must be handled.
+ DF_1_DIRECT = 0x00000100, // Direct binding enabled.
+ DF_1_TRANS = 0x00000200,
+ DF_1_INTERPOSE = 0x00000400, // Object is used to interpose.
+ DF_1_NODEFLIB = 0x00000800, // Ignore default lib search path.
+ DF_1_NODUMP = 0x00001000, // Object can't be dldump'ed.
+ DF_1_CONFALT = 0x00002000, // Configuration alternative created.
+ DF_1_ENDFILTEE = 0x00004000, // Filtee terminates filters search.
DF_1_DISPRELDNE = 0x00008000, // Disp reloc applied at build time.
DF_1_DISPRELPND = 0x00010000, // Disp reloc applied at run-time.
- DF_1_NODIRECT = 0x00020000, // Object has no-direct binding.
- DF_1_IGNMULDEF = 0x00040000,
- DF_1_NOKSYMS = 0x00080000,
- DF_1_NOHDR = 0x00100000,
- DF_1_EDITED = 0x00200000, // Object is modified after built.
- DF_1_NORELOC = 0x00400000,
+ DF_1_NODIRECT = 0x00020000, // Object has no-direct binding.
+ DF_1_IGNMULDEF = 0x00040000,
+ DF_1_NOKSYMS = 0x00080000,
+ DF_1_NOHDR = 0x00100000,
+ DF_1_EDITED = 0x00200000, // Object is modified after built.
+ DF_1_NORELOC = 0x00400000,
DF_1_SYMINTPOSE = 0x00800000, // Object has individual interposers.
- DF_1_GLOBAUDIT = 0x01000000, // Global auditing required.
- DF_1_SINGLETON = 0x02000000 // Singleton symbols are used.
+ DF_1_GLOBAUDIT = 0x01000000, // Global auditing required.
+ DF_1_SINGLETON = 0x02000000 // Singleton symbols are used.
};
// DT_MIPS_FLAGS values.
enum {
- RHF_NONE = 0x00000000, // No flags.
- RHF_QUICKSTART = 0x00000001, // Uses shortcut pointers.
- RHF_NOTPOT = 0x00000002, // Hash size is not a power of two.
- RHS_NO_LIBRARY_REPLACEMENT = 0x00000004, // Ignore LD_LIBRARY_PATH.
- RHF_NO_MOVE = 0x00000008, // DSO address may not be relocated.
- RHF_SGI_ONLY = 0x00000010, // SGI specific features.
- RHF_GUARANTEE_INIT = 0x00000020, // Guarantee that .init will finish
- // executing before any non-init
- // code in DSO is called.
- RHF_DELTA_C_PLUS_PLUS = 0x00000040, // Contains Delta C++ code.
- RHF_GUARANTEE_START_INIT = 0x00000080, // Guarantee that .init will start
- // executing before any non-init
- // code in DSO is called.
- RHF_PIXIE = 0x00000100, // Generated by pixie.
- RHF_DEFAULT_DELAY_LOAD = 0x00000200, // Delay-load DSO by default.
- RHF_REQUICKSTART = 0x00000400, // Object may be requickstarted
- RHF_REQUICKSTARTED = 0x00000800, // Object has been requickstarted
- RHF_CORD = 0x00001000, // Generated by cord.
- RHF_NO_UNRES_UNDEF = 0x00002000, // Object contains no unresolved
- // undef symbols.
- RHF_RLD_ORDER_SAFE = 0x00004000 // Symbol table is in a safe order.
+ RHF_NONE = 0x00000000, // No flags.
+ RHF_QUICKSTART = 0x00000001, // Uses shortcut pointers.
+ RHF_NOTPOT = 0x00000002, // Hash size is not a power of two.
+ RHS_NO_LIBRARY_REPLACEMENT = 0x00000004, // Ignore LD_LIBRARY_PATH.
+ RHF_NO_MOVE = 0x00000008, // DSO address may not be relocated.
+ RHF_SGI_ONLY = 0x00000010, // SGI specific features.
+ RHF_GUARANTEE_INIT = 0x00000020, // Guarantee that .init will finish
+ // executing before any non-init
+ // code in DSO is called.
+ RHF_DELTA_C_PLUS_PLUS = 0x00000040, // Contains Delta C++ code.
+ RHF_GUARANTEE_START_INIT = 0x00000080, // Guarantee that .init will start
+ // executing before any non-init
+ // code in DSO is called.
+ RHF_PIXIE = 0x00000100, // Generated by pixie.
+ RHF_DEFAULT_DELAY_LOAD = 0x00000200, // Delay-load DSO by default.
+ RHF_REQUICKSTART = 0x00000400, // Object may be requickstarted
+ RHF_REQUICKSTARTED = 0x00000800, // Object has been requickstarted
+ RHF_CORD = 0x00001000, // Generated by cord.
+ RHF_NO_UNRES_UNDEF = 0x00002000, // Object contains no unresolved
+ // undef symbols.
+ RHF_RLD_ORDER_SAFE = 0x00004000 // Symbol table is in a safe order.
};
// ElfXX_VerDef structure version (GNU versioning)
-enum {
- VER_DEF_NONE = 0,
- VER_DEF_CURRENT = 1
-};
+enum { VER_DEF_NONE = 0, VER_DEF_CURRENT = 1 };
// VerDef Flags (ElfXX_VerDef::vd_flags)
-enum {
- VER_FLG_BASE = 0x1,
- VER_FLG_WEAK = 0x2,
- VER_FLG_INFO = 0x4
-};
+enum { VER_FLG_BASE = 0x1, VER_FLG_WEAK = 0x2, VER_FLG_INFO = 0x4 };
// Special constants for the version table. (SHT_GNU_versym/.gnu.version)
enum {
- VER_NDX_LOCAL = 0, // Unversioned local symbol
+ VER_NDX_LOCAL = 0, // Unversioned local symbol
VER_NDX_GLOBAL = 1, // Unversioned global symbol
VERSYM_VERSION = 0x7fff, // Version Index mask
- VERSYM_HIDDEN = 0x8000 // Hidden bit (non-default version)
+ VERSYM_HIDDEN = 0x8000 // Hidden bit (non-default version)
};
// ElfXX_VerNeed structure version (GNU versioning)
+enum { VER_NEED_NONE = 0, VER_NEED_CURRENT = 1 };
+
+// SHT_NOTE section types
enum {
- VER_NEED_NONE = 0,
- VER_NEED_CURRENT = 1
+ NT_GNU_ABI_TAG = 1,
+ NT_GNU_HWCAP = 2,
+ NT_GNU_BUILD_ID = 3,
+ NT_GNU_GOLD_VERSION = 4,
};
-// SHT_NOTE section types
enum {
- NT_GNU_BUILD_ID = 3
+ GNU_ABI_TAG_LINUX = 0,
+ GNU_ABI_TAG_HURD = 1,
+ GNU_ABI_TAG_SOLARIS = 2,
+ GNU_ABI_TAG_FREEBSD = 3,
+ GNU_ABI_TAG_NETBSD = 4,
+ GNU_ABI_TAG_SYLLABLE = 5,
+ GNU_ABI_TAG_NACL = 6,
};
// Compressed section header for ELF32.
diff --git a/include/llvm/Support/ELFRelocs/AArch64.def b/include/llvm/Support/ELFRelocs/AArch64.def
index aa0c560f3e50..c21df07d2dbc 100644
--- a/include/llvm/Support/ELFRelocs/AArch64.def
+++ b/include/llvm/Support/ELFRelocs/AArch64.def
@@ -3,145 +3,199 @@
#error "ELF_RELOC must be defined"
#endif
-// ABI release 1.0
-ELF_RELOC(R_AARCH64_NONE, 0)
-
-ELF_RELOC(R_AARCH64_ABS64, 0x101)
-ELF_RELOC(R_AARCH64_ABS32, 0x102)
-ELF_RELOC(R_AARCH64_ABS16, 0x103)
-ELF_RELOC(R_AARCH64_PREL64, 0x104)
-ELF_RELOC(R_AARCH64_PREL32, 0x105)
-ELF_RELOC(R_AARCH64_PREL16, 0x106)
-
-ELF_RELOC(R_AARCH64_MOVW_UABS_G0, 0x107)
-ELF_RELOC(R_AARCH64_MOVW_UABS_G0_NC, 0x108)
-ELF_RELOC(R_AARCH64_MOVW_UABS_G1, 0x109)
-ELF_RELOC(R_AARCH64_MOVW_UABS_G1_NC, 0x10a)
-ELF_RELOC(R_AARCH64_MOVW_UABS_G2, 0x10b)
-ELF_RELOC(R_AARCH64_MOVW_UABS_G2_NC, 0x10c)
-ELF_RELOC(R_AARCH64_MOVW_UABS_G3, 0x10d)
-ELF_RELOC(R_AARCH64_MOVW_SABS_G0, 0x10e)
-ELF_RELOC(R_AARCH64_MOVW_SABS_G1, 0x10f)
-ELF_RELOC(R_AARCH64_MOVW_SABS_G2, 0x110)
-
-ELF_RELOC(R_AARCH64_LD_PREL_LO19, 0x111)
-ELF_RELOC(R_AARCH64_ADR_PREL_LO21, 0x112)
-ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21, 0x113)
-ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21_NC, 0x114)
-ELF_RELOC(R_AARCH64_ADD_ABS_LO12_NC, 0x115)
-ELF_RELOC(R_AARCH64_LDST8_ABS_LO12_NC, 0x116)
-
-ELF_RELOC(R_AARCH64_TSTBR14, 0x117)
-ELF_RELOC(R_AARCH64_CONDBR19, 0x118)
-ELF_RELOC(R_AARCH64_JUMP26, 0x11a)
-ELF_RELOC(R_AARCH64_CALL26, 0x11b)
-
-ELF_RELOC(R_AARCH64_LDST16_ABS_LO12_NC, 0x11c)
-ELF_RELOC(R_AARCH64_LDST32_ABS_LO12_NC, 0x11d)
-ELF_RELOC(R_AARCH64_LDST64_ABS_LO12_NC, 0x11e)
-
-ELF_RELOC(R_AARCH64_MOVW_PREL_G0, 0x11f)
-ELF_RELOC(R_AARCH64_MOVW_PREL_G0_NC, 0x120)
-ELF_RELOC(R_AARCH64_MOVW_PREL_G1, 0x121)
-ELF_RELOC(R_AARCH64_MOVW_PREL_G1_NC, 0x122)
-ELF_RELOC(R_AARCH64_MOVW_PREL_G2, 0x123)
-ELF_RELOC(R_AARCH64_MOVW_PREL_G2_NC, 0x124)
-ELF_RELOC(R_AARCH64_MOVW_PREL_G3, 0x125)
-
-ELF_RELOC(R_AARCH64_LDST128_ABS_LO12_NC, 0x12b)
-
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0, 0x12c)
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0_NC, 0x12d)
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1, 0x12e)
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1_NC, 0x12f)
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2, 0x130)
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2_NC, 0x131)
-ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G3, 0x132)
-
-ELF_RELOC(R_AARCH64_GOTREL64, 0x133)
-ELF_RELOC(R_AARCH64_GOTREL32, 0x134)
-
-ELF_RELOC(R_AARCH64_GOT_LD_PREL19, 0x135)
-ELF_RELOC(R_AARCH64_LD64_GOTOFF_LO15, 0x136)
-ELF_RELOC(R_AARCH64_ADR_GOT_PAGE, 0x137)
-ELF_RELOC(R_AARCH64_LD64_GOT_LO12_NC, 0x138)
-ELF_RELOC(R_AARCH64_LD64_GOTPAGE_LO15, 0x139)
-
-ELF_RELOC(R_AARCH64_TLSGD_ADR_PREL21, 0x200)
-ELF_RELOC(R_AARCH64_TLSGD_ADR_PAGE21, 0x201)
-ELF_RELOC(R_AARCH64_TLSGD_ADD_LO12_NC, 0x202)
-ELF_RELOC(R_AARCH64_TLSGD_MOVW_G1, 0x203)
-ELF_RELOC(R_AARCH64_TLSGD_MOVW_G0_NC, 0x204)
-
-ELF_RELOC(R_AARCH64_TLSLD_ADR_PREL21, 0x205)
-ELF_RELOC(R_AARCH64_TLSLD_ADR_PAGE21, 0x206)
-ELF_RELOC(R_AARCH64_TLSLD_ADD_LO12_NC, 0x207)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_G1, 0x208)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_G0_NC, 0x209)
-ELF_RELOC(R_AARCH64_TLSLD_LD_PREL19, 0x20a)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G2, 0x20b)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1, 0x20c)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC, 0x20d)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0, 0x20e)
-ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC, 0x20f)
-ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_HI12, 0x210)
-ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12, 0x211)
-ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC, 0x212)
-ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12, 0x213)
-ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC, 0x214)
-ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12, 0x215)
-ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC, 0x216)
-ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12, 0x217)
-ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, 0x218)
-ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12, 0x219)
-ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, 0x21a)
-
-ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, 0x21b)
-ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, 0x21c)
-ELF_RELOC(R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 0x21d)
-ELF_RELOC(R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, 0x21e)
-ELF_RELOC(R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, 0x21f)
-
-ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G2, 0x220)
-ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1, 0x221)
-ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 0x222)
-ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0, 0x223)
-ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 0x224)
-ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_HI12, 0x225)
-ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12, 0x226)
-ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 0x227)
-ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12, 0x228)
-ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, 0x229)
-ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12, 0x22a)
-ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, 0x22b)
-ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12, 0x22c)
-ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, 0x22d)
-ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12, 0x22e)
-ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, 0x22f)
-
-ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19, 0x230)
-ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21, 0x231)
-ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21, 0x232)
-ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12_NC, 0x233)
-ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12_NC, 0x234)
-ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1, 0x235)
-ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC, 0x236)
-ELF_RELOC(R_AARCH64_TLSDESC_LDR, 0x237)
-ELF_RELOC(R_AARCH64_TLSDESC_ADD, 0x238)
-ELF_RELOC(R_AARCH64_TLSDESC_CALL, 0x239)
-
-ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12, 0x23a)
-ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC, 0x23b)
-
-ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12, 0x23c)
-ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC, 0x23d)
-
-ELF_RELOC(R_AARCH64_COPY, 0x400)
-ELF_RELOC(R_AARCH64_GLOB_DAT, 0x401)
-ELF_RELOC(R_AARCH64_JUMP_SLOT, 0x402)
-ELF_RELOC(R_AARCH64_RELATIVE, 0x403)
-ELF_RELOC(R_AARCH64_TLS_DTPREL64, 0x404)
-ELF_RELOC(R_AARCH64_TLS_DTPMOD64, 0x405)
-ELF_RELOC(R_AARCH64_TLS_TPREL64, 0x406)
-ELF_RELOC(R_AARCH64_TLSDESC, 0x407)
-ELF_RELOC(R_AARCH64_IRELATIVE, 0x408)
+// Based on ABI release 1.1-beta, dated 6 November 2013. NB: The cover page of
+// this document, IHI0056C_beta_aaelf64.pdf, on infocenter.arm.com, still
+// labels this as release 1.0.
+ELF_RELOC(R_AARCH64_NONE, 0)
+ELF_RELOC(R_AARCH64_ABS64, 0x101)
+ELF_RELOC(R_AARCH64_ABS32, 0x102)
+ELF_RELOC(R_AARCH64_ABS16, 0x103)
+ELF_RELOC(R_AARCH64_PREL64, 0x104)
+ELF_RELOC(R_AARCH64_PREL32, 0x105)
+ELF_RELOC(R_AARCH64_PREL16, 0x106)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G0, 0x107)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G0_NC, 0x108)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G1, 0x109)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G1_NC, 0x10a)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G2, 0x10b)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G2_NC, 0x10c)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G3, 0x10d)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G0, 0x10e)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G1, 0x10f)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G2, 0x110)
+ELF_RELOC(R_AARCH64_LD_PREL_LO19, 0x111)
+ELF_RELOC(R_AARCH64_ADR_PREL_LO21, 0x112)
+ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21, 0x113)
+ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21_NC, 0x114)
+ELF_RELOC(R_AARCH64_ADD_ABS_LO12_NC, 0x115)
+ELF_RELOC(R_AARCH64_LDST8_ABS_LO12_NC, 0x116)
+ELF_RELOC(R_AARCH64_TSTBR14, 0x117)
+ELF_RELOC(R_AARCH64_CONDBR19, 0x118)
+ELF_RELOC(R_AARCH64_JUMP26, 0x11a)
+ELF_RELOC(R_AARCH64_CALL26, 0x11b)
+ELF_RELOC(R_AARCH64_LDST16_ABS_LO12_NC, 0x11c)
+ELF_RELOC(R_AARCH64_LDST32_ABS_LO12_NC, 0x11d)
+ELF_RELOC(R_AARCH64_LDST64_ABS_LO12_NC, 0x11e)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G0, 0x11f)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G0_NC, 0x120)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G1, 0x121)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G1_NC, 0x122)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G2, 0x123)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G2_NC, 0x124)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G3, 0x125)
+ELF_RELOC(R_AARCH64_LDST128_ABS_LO12_NC, 0x12b)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0, 0x12c)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0_NC, 0x12d)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1, 0x12e)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1_NC, 0x12f)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2, 0x130)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2_NC, 0x131)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G3, 0x132)
+ELF_RELOC(R_AARCH64_GOTREL64, 0x133)
+ELF_RELOC(R_AARCH64_GOTREL32, 0x134)
+ELF_RELOC(R_AARCH64_GOT_LD_PREL19, 0x135)
+ELF_RELOC(R_AARCH64_LD64_GOTOFF_LO15, 0x136)
+ELF_RELOC(R_AARCH64_ADR_GOT_PAGE, 0x137)
+ELF_RELOC(R_AARCH64_LD64_GOT_LO12_NC, 0x138)
+ELF_RELOC(R_AARCH64_LD64_GOTPAGE_LO15, 0x139)
+ELF_RELOC(R_AARCH64_TLSGD_ADR_PREL21, 0x200)
+ELF_RELOC(R_AARCH64_TLSGD_ADR_PAGE21, 0x201)
+ELF_RELOC(R_AARCH64_TLSGD_ADD_LO12_NC, 0x202)
+ELF_RELOC(R_AARCH64_TLSGD_MOVW_G1, 0x203)
+ELF_RELOC(R_AARCH64_TLSGD_MOVW_G0_NC, 0x204)
+ELF_RELOC(R_AARCH64_TLSLD_ADR_PREL21, 0x205)
+ELF_RELOC(R_AARCH64_TLSLD_ADR_PAGE21, 0x206)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_LO12_NC, 0x207)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_G1, 0x208)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_G0_NC, 0x209)
+ELF_RELOC(R_AARCH64_TLSLD_LD_PREL19, 0x20a)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G2, 0x20b)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1, 0x20c)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC, 0x20d)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0, 0x20e)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC, 0x20f)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_HI12, 0x210)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12, 0x211)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC, 0x212)
+ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12, 0x213)
+ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC, 0x214)
+ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12, 0x215)
+ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC, 0x216)
+ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12, 0x217)
+ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, 0x218)
+ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12, 0x219)
+ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, 0x21a)
+ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, 0x21b)
+ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, 0x21c)
+ELF_RELOC(R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 0x21d)
+ELF_RELOC(R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, 0x21e)
+ELF_RELOC(R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, 0x21f)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G2, 0x220)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1, 0x221)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 0x222)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0, 0x223)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 0x224)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_HI12, 0x225)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12, 0x226)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 0x227)
+ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12, 0x228)
+ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, 0x229)
+ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12, 0x22a)
+ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, 0x22b)
+ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12, 0x22c)
+ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, 0x22d)
+ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12, 0x22e)
+ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, 0x22f)
+ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19, 0x230)
+ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21, 0x231)
+ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21, 0x232)
+ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12_NC, 0x233)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12_NC, 0x234)
+ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1, 0x235)
+ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC, 0x236)
+ELF_RELOC(R_AARCH64_TLSDESC_LDR, 0x237)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD, 0x238)
+ELF_RELOC(R_AARCH64_TLSDESC_CALL, 0x239)
+ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12, 0x23a)
+ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC, 0x23b)
+ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12, 0x23c)
+ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC, 0x23d)
+ELF_RELOC(R_AARCH64_COPY, 0x400)
+ELF_RELOC(R_AARCH64_GLOB_DAT, 0x401)
+ELF_RELOC(R_AARCH64_JUMP_SLOT, 0x402)
+ELF_RELOC(R_AARCH64_RELATIVE, 0x403)
+ELF_RELOC(R_AARCH64_TLS_DTPREL64, 0x404)
+ELF_RELOC(R_AARCH64_TLS_DTPMOD64, 0x405)
+ELF_RELOC(R_AARCH64_TLS_TPREL64, 0x406)
+ELF_RELOC(R_AARCH64_TLSDESC, 0x407)
+ELF_RELOC(R_AARCH64_IRELATIVE, 0x408)
+
+// ELF_RELOC(R_AARCH64_P32_NONE, 0)
+ELF_RELOC(R_AARCH64_P32_ABS32, 0x001)
+ELF_RELOC(R_AARCH64_P32_ABS16, 0x002)
+ELF_RELOC(R_AARCH64_P32_PREL32, 0x003)
+ELF_RELOC(R_AARCH64_P32_PREL16, 0x004)
+ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G0, 0x005)
+ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G0_NC, 0x006)
+ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G1, 0x007)
+ELF_RELOC(R_AARCH64_P32_MOVW_SABS_G0, 0x008)
+ELF_RELOC(R_AARCH64_P32_LD_PREL_LO19, 0x009)
+ELF_RELOC(R_AARCH64_P32_ADR_PREL_LO21, 0x00a)
+ELF_RELOC(R_AARCH64_P32_ADR_PREL_PG_HI21, 0x00b)
+ELF_RELOC(R_AARCH64_P32_ADD_ABS_LO12_NC, 0x00c)
+ELF_RELOC(R_AARCH64_P32_LDST8_ABS_LO12_NC, 0x00d)
+ELF_RELOC(R_AARCH64_P32_TSTBR14, 0x012)
+ELF_RELOC(R_AARCH64_P32_CONDBR19, 0x013)
+ELF_RELOC(R_AARCH64_P32_JUMP26, 0x014)
+ELF_RELOC(R_AARCH64_P32_CALL26, 0x015)
+ELF_RELOC(R_AARCH64_P32_LDST16_ABS_LO12_NC, 0x00e)
+ELF_RELOC(R_AARCH64_P32_LDST32_ABS_LO12_NC, 0x00f)
+ELF_RELOC(R_AARCH64_P32_LDST64_ABS_LO12_NC, 0x010)
+ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0, 0x016)
+ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0_NC, 0x017)
+ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G1, 0x018)
+ELF_RELOC(R_AARCH64_P32_LDST128_ABS_LO12_NC, 0x011)
+ELF_RELOC(R_AARCH64_P32_GOT_LD_PREL19, 0x019)
+ELF_RELOC(R_AARCH64_P32_ADR_GOT_PAGE, 0x01a)
+ELF_RELOC(R_AARCH64_P32_LD64_GOT_LO12_NC, 0x01b)
+ELF_RELOC(R_AARCH64_P32_LD32_GOTPAGE_LO14, 0x01c)
+ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G1, 0x057)
+ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0, 0x058)
+ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0_NC, 0x059)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_HI12, 0x05a)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12, 0x05b)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12_NC, 0x05c)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12, 0x05d)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12_NC, 0x05e)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12, 0x05f)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12_NC, 0x060)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12, 0x061)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12_NC, 0x062)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12, 0x063)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12_NC, 0x064)
+ELF_RELOC(R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21, 0x067)
+ELF_RELOC(R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC, 0x068)
+ELF_RELOC(R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19, 0x069)
+ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G1, 0x06a)
+ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G0, 0x06b)
+ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC, 0x06c)
+ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_HI12, 0x06d)
+ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_LO12, 0x06e)
+ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC, 0x06f)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12, 0x070)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12_NC, 0x071)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12, 0x072)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12_NC, 0x073)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12, 0x074)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12_NC, 0x075)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12, 0x076)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12_NC, 0x077)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PAGE21, 0x051)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_LD32_LO12_NC, 0x07d)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADD_LO12_NC, 0x034)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_CALL, 0x07f)
+ELF_RELOC(R_AARCH64_P32_COPY, 0x0b4)
+ELF_RELOC(R_AARCH64_P32_GLOB_DAT, 0x0b5)
+ELF_RELOC(R_AARCH64_P32_JUMP_SLOT, 0x0b6)
+ELF_RELOC(R_AARCH64_P32_RELATIVE, 0x0b7)
+ELF_RELOC(R_AARCH64_P32_IRELATIVE, 0x0bc)
diff --git a/include/llvm/Support/ELFRelocs/AMDGPU.def b/include/llvm/Support/ELFRelocs/AMDGPU.def
index c1e6797fdb0d..c66f88d14ec7 100644
--- a/include/llvm/Support/ELFRelocs/AMDGPU.def
+++ b/include/llvm/Support/ELFRelocs/AMDGPU.def
@@ -2,11 +2,15 @@
#error "ELF_RELOC must be defined"
#endif
-ELF_RELOC(R_AMDGPU_NONE, 0)
-ELF_RELOC(R_AMDGPU_ABS32_LO, 1)
-ELF_RELOC(R_AMDGPU_ABS32_HI, 2)
-ELF_RELOC(R_AMDGPU_ABS64, 3)
-ELF_RELOC(R_AMDGPU_REL32, 4)
-ELF_RELOC(R_AMDGPU_REL64, 5)
-ELF_RELOC(R_AMDGPU_ABS32, 6)
-ELF_RELOC(R_AMDGPU_GOTPCREL, 7)
+ELF_RELOC(R_AMDGPU_NONE, 0)
+ELF_RELOC(R_AMDGPU_ABS32_LO, 1)
+ELF_RELOC(R_AMDGPU_ABS32_HI, 2)
+ELF_RELOC(R_AMDGPU_ABS64, 3)
+ELF_RELOC(R_AMDGPU_REL32, 4)
+ELF_RELOC(R_AMDGPU_REL64, 5)
+ELF_RELOC(R_AMDGPU_ABS32, 6)
+ELF_RELOC(R_AMDGPU_GOTPCREL, 7)
+ELF_RELOC(R_AMDGPU_GOTPCREL32_LO, 8)
+ELF_RELOC(R_AMDGPU_GOTPCREL32_HI, 9)
+ELF_RELOC(R_AMDGPU_REL32_LO, 10)
+ELF_RELOC(R_AMDGPU_REL32_HI, 11)
diff --git a/include/llvm/Support/ELFRelocs/BPF.def b/include/llvm/Support/ELFRelocs/BPF.def
index 868974d683c7..5dd7f70b6963 100644
--- a/include/llvm/Support/ELFRelocs/BPF.def
+++ b/include/llvm/Support/ELFRelocs/BPF.def
@@ -4,6 +4,5 @@
// No relocation
ELF_RELOC(R_BPF_NONE, 0)
-// Map index in "maps" section to file descriptor
-// within ld_64 instruction.
-ELF_RELOC(R_BPF_MAP_FD, 1)
+ELF_RELOC(R_BPF_64_64, 1)
+ELF_RELOC(R_BPF_64_32, 10)
diff --git a/include/llvm/Support/ELFRelocs/RISCV.def b/include/llvm/Support/ELFRelocs/RISCV.def
new file mode 100644
index 000000000000..9ec4955d26db
--- /dev/null
+++ b/include/llvm/Support/ELFRelocs/RISCV.def
@@ -0,0 +1,50 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_RISCV_NONE, 0)
+ELF_RELOC(R_RISCV_32, 1)
+ELF_RELOC(R_RISCV_64, 2)
+ELF_RELOC(R_RISCV_RELATIVE, 3)
+ELF_RELOC(R_RISCV_COPY, 4)
+ELF_RELOC(R_RISCV_JUMP_SLOT, 5)
+ELF_RELOC(R_RISCV_TLS_DTPMOD32, 6)
+ELF_RELOC(R_RISCV_TLS_DTPMOD64, 7)
+ELF_RELOC(R_RISCV_TLS_DTPREL32, 8)
+ELF_RELOC(R_RISCV_TLS_DTPREL64, 9)
+ELF_RELOC(R_RISCV_TLS_TPREL32, 10)
+ELF_RELOC(R_RISCV_TLS_TPREL64, 11)
+ELF_RELOC(R_RISCV_BRANCH, 16)
+ELF_RELOC(R_RISCV_JAL, 17)
+ELF_RELOC(R_RISCV_CALL, 18)
+ELF_RELOC(R_RISCV_CALL_PLT, 19)
+ELF_RELOC(R_RISCV_GOT_HI20, 20)
+ELF_RELOC(R_RISCV_TLS_GOT_HI20, 21)
+ELF_RELOC(R_RISCV_TLS_GD_HI20, 22)
+ELF_RELOC(R_RISCV_PCREL_HI20, 23)
+ELF_RELOC(R_RISCV_PCREL_LO12_I, 24)
+ELF_RELOC(R_RISCV_PCREL_LO12_S, 25)
+ELF_RELOC(R_RISCV_HI20, 26)
+ELF_RELOC(R_RISCV_LO12_I, 27)
+ELF_RELOC(R_RISCV_LO12_S, 28)
+ELF_RELOC(R_RISCV_TPREL_HI20, 29)
+ELF_RELOC(R_RISCV_TPREL_LO12_I, 30)
+ELF_RELOC(R_RISCV_TPREL_LO12_S, 31)
+ELF_RELOC(R_RISCV_TPREL_ADD, 32)
+ELF_RELOC(R_RISCV_ADD8, 33)
+ELF_RELOC(R_RISCV_ADD16, 34)
+ELF_RELOC(R_RISCV_ADD32, 35)
+ELF_RELOC(R_RISCV_ADD64, 36)
+ELF_RELOC(R_RISCV_SUB8, 37)
+ELF_RELOC(R_RISCV_SUB16, 38)
+ELF_RELOC(R_RISCV_SUB32, 39)
+ELF_RELOC(R_RISCV_SUB64, 40)
+ELF_RELOC(R_RISCV_GNU_VTINHERIT, 41)
+ELF_RELOC(R_RISCV_GNU_VTENTRY, 42)
+ELF_RELOC(R_RISCV_ALIGN, 43)
+ELF_RELOC(R_RISCV_RVC_BRANCH, 44)
+ELF_RELOC(R_RISCV_RVC_JUMP, 45)
+ELF_RELOC(R_RISCV_RVC_LUI, 46)
+ELF_RELOC(R_RISCV_GPREL_I, 47)
+ELF_RELOC(R_RISCV_GPREL_S, 48)
diff --git a/include/llvm/Support/ELFRelocs/SystemZ.def b/include/llvm/Support/ELFRelocs/SystemZ.def
index 711f94011f2c..d6c0b79d40ab 100644
--- a/include/llvm/Support/ELFRelocs/SystemZ.def
+++ b/include/llvm/Support/ELFRelocs/SystemZ.def
@@ -65,3 +65,7 @@ ELF_RELOC(R_390_GOT20, 58)
ELF_RELOC(R_390_GOTPLT20, 59)
ELF_RELOC(R_390_TLS_GOTIE20, 60)
ELF_RELOC(R_390_IRELATIVE, 61)
+ELF_RELOC(R_390_PC12DBL, 62)
+ELF_RELOC(R_390_PLT12DBL, 63)
+ELF_RELOC(R_390_PC24DBL, 64)
+ELF_RELOC(R_390_PLT24DBL, 65)
diff --git a/include/llvm/Support/Endian.h b/include/llvm/Support/Endian.h
index cb5cd8e511b1..cbe3d67b1f9e 100644
--- a/include/llvm/Support/Endian.h
+++ b/include/llvm/Support/Endian.h
@@ -14,7 +14,6 @@
#ifndef LLVM_SUPPORT_ENDIAN_H
#define LLVM_SUPPORT_ENDIAN_H
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/SwapByteOrder.h"
@@ -29,7 +28,7 @@ namespace detail {
/// \brief ::value is either alignment, or alignof(T) if alignment is 0.
template<class T, int alignment>
struct PickAlignment {
- enum {value = alignment == 0 ? AlignOf<T>::Alignment : alignment};
+ enum { value = alignment == 0 ? alignof(T) : alignment };
};
} // end namespace detail
diff --git a/include/llvm/Support/Error.h b/include/llvm/Support/Error.h
index 5f515a88a021..f13c9484b5fd 100644
--- a/include/llvm/Support/Error.h
+++ b/include/llvm/Support/Error.h
@@ -14,25 +14,38 @@
#ifndef LLVM_SUPPORT_ERROR_H
#define LLVM_SUPPORT_ERROR_H
-#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <memory>
+#include <new>
+#include <string>
+#include <system_error>
+#include <type_traits>
+#include <utility>
#include <vector>
namespace llvm {
-class Error;
-class ErrorList;
+class ErrorSuccess;
/// Base class for error info classes. Do not extend this directly: Extend
/// the ErrorInfo template subclass instead.
class ErrorInfoBase {
public:
- virtual ~ErrorInfoBase() {}
+ virtual ~ErrorInfoBase() = default;
/// Print an error message to an output stream.
virtual void log(raw_ostream &OS) const = 0;
@@ -67,6 +80,7 @@ public:
private:
virtual void anchor();
+
static char ID;
};
@@ -86,12 +100,14 @@ private:
/// Error instance is in. For Error instances indicating success, it
/// is sufficient to invoke the boolean conversion operator. E.g.:
///
+/// @code{.cpp}
/// Error foo(<...>);
///
/// if (auto E = foo(<...>))
/// return E; // <- Return E if it is in the error state.
/// // We have verified that E was in the success state. It can now be safely
/// // destroyed.
+/// @endcode
///
/// A success value *can not* be dropped. For example, just calling 'foo(<...>)'
/// without testing the return value will raise a runtime error, even if foo
@@ -100,6 +116,7 @@ private:
/// For Error instances representing failure, you must use either the
/// handleErrors or handleAllErrors function with a typed handler. E.g.:
///
+/// @code{.cpp}
/// class MyErrorInfo : public ErrorInfo<MyErrorInfo> {
/// // Custom error info.
/// };
@@ -122,6 +139,7 @@ private:
/// );
/// // Note - we must check or return NewE in case any of the handlers
/// // returned a new error.
+/// @endcode
///
/// The handleAllErrors function is identical to handleErrors, except
/// that it has a void return type, and requires all errors to be handled and
@@ -131,8 +149,7 @@ private:
/// *All* Error instances must be checked before destruction, even if
/// they're moved-assigned or constructed from Success values that have already
/// been checked. This enforces checking through all levels of the call stack.
-class Error {
-
+class LLVM_NODISCARD Error {
// ErrorList needs to be able to yank ErrorInfoBase pointers out of this
// class to add to the error list.
friend class ErrorList;
@@ -143,19 +160,18 @@ class Error {
// Expected<T> needs to be able to steal the payload when constructed from an
// error.
- template <typename T> class Expected;
+ template <typename T> friend class Expected;
-public:
+protected:
/// Create a success value. Prefer using 'Error::success()' for readability
- /// where possible.
- Error() {
+ Error() : Payload(nullptr) {
setPtr(nullptr);
setChecked(false);
}
- /// Create a success value. This is equivalent to calling the default
- /// constructor, but should be preferred for readability where possible.
- static Error success() { return Error(); }
+public:
+ /// Create a success value.
+ static ErrorSuccess success();
// Errors are not copy-constructable.
Error(const Error &Other) = delete;
@@ -163,7 +179,7 @@ public:
/// Move-construct an error value. The newly constructed error is considered
/// unchecked, even if the source error had been checked. The original error
/// becomes a checked Success value, regardless of its original state.
- Error(Error &&Other) {
+ Error(Error &&Other) : Payload(nullptr) {
setChecked(true);
*this = std::move(Other);
}
@@ -219,7 +235,7 @@ public:
private:
void assertIsChecked() {
-#ifndef NDEBUG
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
if (!getChecked() || getPtr()) {
dbgs() << "Program aborted due to an unhandled Error:\n";
if (getPtr())
@@ -234,33 +250,35 @@ private:
}
ErrorInfoBase *getPtr() const {
-#ifndef NDEBUG
- return PayloadAndCheckedBit.getPointer();
-#else
- return Payload;
-#endif
+ return reinterpret_cast<ErrorInfoBase*>(
+ reinterpret_cast<uintptr_t>(Payload) &
+ ~static_cast<uintptr_t>(0x1));
}
void setPtr(ErrorInfoBase *EI) {
-#ifndef NDEBUG
- PayloadAndCheckedBit.setPointer(EI);
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ Payload = reinterpret_cast<ErrorInfoBase*>(
+ (reinterpret_cast<uintptr_t>(EI) &
+ ~static_cast<uintptr_t>(0x1)) |
+ (reinterpret_cast<uintptr_t>(Payload) & 0x1));
#else
Payload = EI;
#endif
}
bool getChecked() const {
-#ifndef NDEBUG
- return PayloadAndCheckedBit.getInt();
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ return (reinterpret_cast<uintptr_t>(Payload) & 0x1) == 0;
#else
return true;
#endif
}
void setChecked(bool V) {
-#ifndef NDEBUG
- PayloadAndCheckedBit.setInt(V);
-#endif
+ Payload = reinterpret_cast<ErrorInfoBase*>(
+ (reinterpret_cast<uintptr_t>(Payload) &
+ ~static_cast<uintptr_t>(0x1)) |
+ (V ? 0 : 1));
}
std::unique_ptr<ErrorInfoBase> takePayload() {
@@ -270,13 +288,16 @@ private:
return Tmp;
}
-#ifndef NDEBUG
- PointerIntPair<ErrorInfoBase *, 1> PayloadAndCheckedBit;
-#else
ErrorInfoBase *Payload;
-#endif
};
+/// Subclass of Error for the sole purpose of identifying the success path in
+/// the type system. This allows to catch invalid conversion to Expected<T> at
+/// compile time.
+class ErrorSuccess : public Error {};
+
+inline ErrorSuccess Error::success() { return ErrorSuccess(); }
+
/// Make a Error instance representing failure using the given error info
/// type.
template <typename ErrT, typename... ArgTs> Error make_error(ArgTs &&... Args) {
@@ -305,7 +326,6 @@ public:
/// Special ErrorInfo subclass representing a list of ErrorInfos.
/// Instances of this class are constructed by joinError.
class ErrorList final : public ErrorInfo<ErrorList> {
-
// handleErrors needs to be able to iterate the payload list of an
// ErrorList.
template <typename... HandlerTs>
@@ -570,25 +590,36 @@ inline void consumeError(Error Err) {
/// to check the result. This helper performs these actions automatically using
/// RAII:
///
-/// Result foo(Error &Err) {
-/// ErrorAsOutParameter ErrAsOutParam(Err); // 'Checked' flag set
-/// // <body of foo>
-/// // <- 'Checked' flag auto-cleared when ErrAsOutParam is destructed.
-/// }
+/// @code{.cpp}
+/// Result foo(Error &Err) {
+/// ErrorAsOutParameter ErrAsOutParam(&Err); // 'Checked' flag set
+/// // <body of foo>
+/// // <- 'Checked' flag auto-cleared when ErrAsOutParam is destructed.
+/// }
+/// @endcode
+///
+/// ErrorAsOutParameter takes an Error* rather than Error& so that it can be
+/// used with optional Errors (Error pointers that are allowed to be null). If
+/// ErrorAsOutParameter took an Error reference, an instance would have to be
+/// created inside every condition that verified that Error was non-null. By
+/// taking an Error pointer we can just create one instance at the top of the
+/// function.
class ErrorAsOutParameter {
public:
- ErrorAsOutParameter(Error &Err) : Err(Err) {
+ ErrorAsOutParameter(Error *Err) : Err(Err) {
// Raise the checked bit if Err is success.
- (void)!!Err;
+ if (Err)
+ (void)!!*Err;
}
+
~ErrorAsOutParameter() {
// Clear the checked bit.
- if (!Err)
- Err = Error::success();
+ if (Err && !*Err)
+ *Err = Error::success();
}
private:
- Error &Err;
+ Error *Err;
};
/// Tagged union holding either a T or a Error.
@@ -597,7 +628,7 @@ private:
/// Error cannot be copied, this class replaces getError() with
/// takeError(). It also adds an bool errorIsA<ErrT>() method for testing the
/// error class type.
-template <class T> class Expected {
+template <class T> class LLVM_NODISCARD Expected {
template <class OtherT> friend class Expected;
static const bool isRef = std::is_reference<T>::value;
typedef ReferenceStorage<typename std::remove_reference<T>::type> wrap;
@@ -618,15 +649,20 @@ public:
/// Create an Expected<T> error value from the given Error.
Expected(Error Err)
: HasError(true)
-#ifndef NDEBUG
- ,
- Checked(false)
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ // Expected is unchecked upon construction in Debug builds.
+ , Unchecked(true)
#endif
{
assert(Err && "Cannot create Expected<T> from Error success value.");
- new (getErrorStorage()) Error(std::move(Err));
+ new (getErrorStorage()) error_type(Err.takePayload());
}
+ /// Forbid to convert from Error::success() implicitly, this avoids having
+ /// Expected<T> foo() { return Error::success(); } which compiles otherwise
+ /// but triggers the assertion above.
+ Expected(ErrorSuccess) = delete;
+
/// Create an Expected<T> success value from the given OtherT value, which
/// must be convertible to T.
template <typename OtherT>
@@ -634,9 +670,9 @@ public:
typename std::enable_if<std::is_convertible<OtherT, T>::value>::type
* = nullptr)
: HasError(false)
-#ifndef NDEBUG
- ,
- Checked(false)
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ // Expected is unchecked upon construction in Debug builds.
+ , Unchecked(true)
#endif
{
new (getStorage()) storage_type(std::forward<OtherT>(Val));
@@ -681,8 +717,8 @@ public:
/// \brief Return false if there is an error.
explicit operator bool() {
-#ifndef NDEBUG
- Checked = !HasError;
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ Unchecked = HasError;
#endif
return !HasError;
}
@@ -709,8 +745,8 @@ public:
/// only be safely destructed. No further calls (beside the destructor) should
/// be made on the Expected<T> vaule.
Error takeError() {
-#ifndef NDEBUG
- Checked = true;
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ Unchecked = false;
#endif
return HasError ? Error(std::move(*getErrorStorage())) : Error::success();
}
@@ -752,10 +788,9 @@ private:
template <class OtherT> void moveConstruct(Expected<OtherT> &&Other) {
HasError = Other.HasError;
-
-#ifndef NDEBUG
- Checked = false;
- Other.Checked = true;
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ Unchecked = true;
+ Other.Unchecked = false;
#endif
if (!HasError)
@@ -798,8 +833,8 @@ private:
}
void assertIsChecked() {
-#ifndef NDEBUG
- if (!Checked) {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ if (Unchecked) {
dbgs() << "Expected<T> must be checked before access or destruction.\n";
if (HasError) {
dbgs() << "Unchecked Expected<T> contained error:\n";
@@ -818,8 +853,8 @@ private:
AlignedCharArrayUnion<error_type> ErrorStorage;
};
bool HasError : 1;
-#ifndef NDEBUG
- bool Checked : 1;
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ bool Unchecked : 1;
#endif
};
@@ -830,6 +865,7 @@ private:
/// std::error_codes.
class ECError : public ErrorInfo<ECError> {
friend Error errorCodeToError(std::error_code);
+
public:
void setErrorCode(std::error_code EC) { this->EC = EC; }
std::error_code convertToErrorCode() const override { return EC; }
@@ -841,6 +877,7 @@ public:
protected:
ECError() = default;
ECError(std::error_code EC) : EC(EC) {}
+
std::error_code EC;
};
@@ -883,9 +920,12 @@ template <typename T> ErrorOr<T> expectedToErrorOr(Expected<T> &&E) {
class StringError : public ErrorInfo<StringError> {
public:
static char ID;
+
StringError(const Twine &S, std::error_code EC);
+
void log(raw_ostream &OS) const override;
std::error_code convertToErrorCode() const override;
+
private:
std::string Msg;
std::error_code EC;
@@ -945,6 +985,6 @@ private:
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err,
bool gen_crash_diag = true);
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_SUPPORT_ERROR_H
diff --git a/include/llvm/Support/FileSystem.h b/include/llvm/Support/FileSystem.h
index 42a6180e0eb3..9d8d8c3ffb5c 100644
--- a/include/llvm/Support/FileSystem.h
+++ b/include/llvm/Support/FileSystem.h
@@ -31,10 +31,9 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Chrono.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/TimeValue.h"
#include <cassert>
#include <cstdint>
#include <ctime>
@@ -125,6 +124,7 @@ class UniqueID {
public:
UniqueID() = default;
UniqueID(uint64_t Device, uint64_t File) : Device(Device), File(File) {}
+
bool operator==(const UniqueID &Other) const {
return Device == Other.Device && File == Other.File;
}
@@ -132,6 +132,7 @@ public:
bool operator<(const UniqueID &Other) const {
return std::tie(Device, File) < std::tie(Other.Device, Other.File);
}
+
uint64_t getDevice() const { return Device; }
uint64_t getFile() const { return File; }
};
@@ -209,8 +210,8 @@ public:
// getters
file_type type() const { return Type; }
perms permissions() const { return Perms; }
- TimeValue getLastAccessedTime() const;
- TimeValue getLastModificationTime() const;
+ TimePoint<> getLastAccessedTime() const;
+ TimePoint<> getLastModificationTime() const;
UniqueID getUniqueID() const;
#if defined(LLVM_ON_UNIX)
@@ -258,10 +259,12 @@ struct file_magic {
macho_dsym_companion, ///< Mach-O dSYM companion file
macho_kext_bundle, ///< Mach-O kext bundle file
macho_universal_binary, ///< Mach-O universal binary
+ coff_cl_gl_object, ///< Microsoft cl.exe's intermediate code file
coff_object, ///< COFF object file
coff_import_library, ///< COFF import library
pecoff_executable, ///< PECOFF executable file
- windows_resource ///< Windows compiled resource file (.rc)
+ windows_resource, ///< Windows compiled resource file (.rc)
+ wasm_object ///< WebAssembly Object file
};
bool is_object() const {
@@ -339,6 +342,14 @@ std::error_code create_directory(const Twine &path, bool IgnoreExisting = true,
/// specific error_code.
std::error_code create_link(const Twine &to, const Twine &from);
+/// Create a hard link from \a from to \a to, or return an error.
+///
+/// @param to The path to hard link to.
+/// @param from The path to hard link from. This is created.
+/// @returns errc::success if the link was created, otherwise a platform
+/// specific error_code.
+std::error_code create_hard_link(const Twine &to, const Twine &from);
+
/// @brief Get the current path.
///
/// @param result Holds the current path on return.
@@ -540,7 +551,7 @@ inline std::error_code file_size(const Twine &Path, uint64_t &Result) {
/// @returns errc::success if the file times were successfully set, otherwise a
/// platform-specific error_code or errc::function_not_supported on
/// platforms where the functionality isn't available.
-std::error_code setLastModificationAndAccessTime(int FD, TimeValue Time);
+std::error_code setLastModificationAndAccessTime(int FD, TimePoint<> Time);
/// @brief Is status available?
///
@@ -672,10 +683,6 @@ ErrorOr<space_info> disk_space(const Twine &Path);
/// This class represents a memory mapped file. It is based on
/// boost::iostreams::mapped_file.
class mapped_file_region {
- mapped_file_region() = delete;
- mapped_file_region(mapped_file_region&) = delete;
- mapped_file_region &operator =(mapped_file_region&) = delete;
-
public:
enum mapmode {
readonly, ///< May only access map via const_data as read only.
@@ -691,6 +698,10 @@ private:
std::error_code init(int FD, uint64_t Offset, mapmode Mode);
public:
+ mapped_file_region() = delete;
+ mapped_file_region(mapped_file_region&) = delete;
+ mapped_file_region &operator =(mapped_file_region&) = delete;
+
/// \param fd An open file descriptor to map. mapped_file_region takes
/// ownership if closefd is true. It must have been opended in the correct
/// mode.
@@ -731,7 +742,7 @@ public:
: Path(path.str())
, Status(st) {}
- directory_entry() {}
+ directory_entry() = default;
void assign(const Twine &path, file_status st = file_status()) {
Path = path.str();
@@ -829,7 +840,7 @@ namespace detail {
: Level(0)
, HasNoPushRequest(false) {}
- std::stack<directory_iterator, std::vector<directory_iterator> > Stack;
+ std::stack<directory_iterator, std::vector<directory_iterator>> Stack;
uint16_t Level;
bool HasNoPushRequest;
};
@@ -841,13 +852,14 @@ class recursive_directory_iterator {
IntrusiveRefCntPtr<detail::RecDirIterState> State;
public:
- recursive_directory_iterator() {}
+ recursive_directory_iterator() = default;
explicit recursive_directory_iterator(const Twine &path, std::error_code &ec)
: State(new detail::RecDirIterState) {
State->Stack.push(directory_iterator(path, ec));
if (State->Stack.top() == directory_iterator())
State.reset();
}
+
// No operator++ because we need error_code.
recursive_directory_iterator &increment(std::error_code &ec) {
const directory_iterator end_itr;
diff --git a/include/llvm/Support/Format.h b/include/llvm/Support/Format.h
index d5c301cd7e2b..017b4973f1ff 100644
--- a/include/llvm/Support/Format.h
+++ b/include/llvm/Support/Format.h
@@ -23,6 +23,7 @@
#ifndef LLVM_SUPPORT_FORMAT_H
#define LLVM_SUPPORT_FORMAT_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
@@ -71,10 +72,20 @@ public:
};
/// These are templated helper classes used by the format function that
-/// capture the object to be formated and the format string. When actually
+/// capture the object to be formatted and the format string. When actually
/// printed, this synthesizes the string into a temporary buffer provided and
/// returns whether or not it is big enough.
+// Helper to validate that format() parameters are scalars or pointers.
+template <typename... Args> struct validate_format_parameters;
+template <typename Arg, typename... Args>
+struct validate_format_parameters<Arg, Args...> {
+ static_assert(std::is_scalar<Arg>::value,
+ "format can't be used with non fundamental / non pointer type");
+ validate_format_parameters() { validate_format_parameters<Args...>(); }
+};
+template <> struct validate_format_parameters<> {};
+
template <typename... Ts>
class format_object final : public format_object_base {
std::tuple<Ts...> Vals;
@@ -91,7 +102,9 @@ class format_object final : public format_object_base {
public:
format_object(const char *fmt, const Ts &... vals)
- : format_object_base(fmt), Vals(vals...) {}
+ : format_object_base(fmt), Vals(vals...) {
+ validate_format_parameters<Ts...>();
+ }
int snprint(char *Buffer, unsigned BufferSize) const override {
return snprint_tuple(Buffer, BufferSize, index_sequence_for<Ts...>());
@@ -190,6 +203,46 @@ inline FormattedNumber format_decimal(int64_t N, unsigned Width) {
return FormattedNumber(0, N, Width, false, false, false);
}
+class FormattedBytes {
+ ArrayRef<uint8_t> Bytes;
+
+ // If not None, display offsets for each line relative to starting value.
+ Optional<uint64_t> FirstByteOffset;
+ uint32_t IndentLevel; // Number of characters to indent each line.
+ uint32_t NumPerLine; // Number of bytes to show per line.
+ uint8_t ByteGroupSize; // How many hex bytes are grouped without spaces
+ bool Upper; // Show offset and hex bytes as upper case.
+ bool ASCII; // Show the ASCII bytes for the hex bytes to the right.
+ friend class raw_ostream;
+
+public:
+ FormattedBytes(ArrayRef<uint8_t> B, uint32_t IL, Optional<uint64_t> O,
+ uint32_t NPL, uint8_t BGS, bool U, bool A)
+ : Bytes(B), FirstByteOffset(O), IndentLevel(IL), NumPerLine(NPL),
+ ByteGroupSize(BGS), Upper(U), ASCII(A) {
+
+ if (ByteGroupSize > NumPerLine)
+ ByteGroupSize = NumPerLine;
+ }
+};
+
+inline FormattedBytes
+format_bytes(ArrayRef<uint8_t> Bytes, Optional<uint64_t> FirstByteOffset = None,
+ uint32_t NumPerLine = 16, uint8_t ByteGroupSize = 4,
+ uint32_t IndentLevel = 0, bool Upper = false) {
+ return FormattedBytes(Bytes, IndentLevel, FirstByteOffset, NumPerLine,
+ ByteGroupSize, Upper, false);
+}
+
+inline FormattedBytes
+format_bytes_with_ascii(ArrayRef<uint8_t> Bytes,
+ Optional<uint64_t> FirstByteOffset = None,
+ uint32_t NumPerLine = 16, uint8_t ByteGroupSize = 4,
+ uint32_t IndentLevel = 0, bool Upper = false) {
+ return FormattedBytes(Bytes, IndentLevel, FirstByteOffset, NumPerLine,
+ ByteGroupSize, Upper, true);
+}
+
} // end namespace llvm
#endif
diff --git a/include/llvm/Support/FormatAdapters.h b/include/llvm/Support/FormatAdapters.h
new file mode 100644
index 000000000000..7bacd2e17135
--- /dev/null
+++ b/include/llvm/Support/FormatAdapters.h
@@ -0,0 +1,93 @@
+//===- FormatAdapters.h - Formatters for common LLVM types -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATADAPTERS_H
+#define LLVM_SUPPORT_FORMATADAPTERS_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/FormatCommon.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+template <typename T> class FormatAdapter : public detail::format_adapter {
+protected:
+ explicit FormatAdapter(T &&Item) : Item(Item) {}
+
+ T Item;
+
+ static_assert(!detail::uses_missing_provider<T>::value,
+ "Item does not have a format provider!");
+};
+
+namespace detail {
+template <typename T> class AlignAdapter final : public FormatAdapter<T> {
+ AlignStyle Where;
+ size_t Amount;
+
+public:
+ AlignAdapter(T &&Item, AlignStyle Where, size_t Amount)
+ : FormatAdapter<T>(std::forward<T>(Item)), Where(Where), Amount(Amount) {}
+
+ void format(llvm::raw_ostream &Stream, StringRef Style) {
+ auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
+ FmtAlign(Adapter, Where, Amount).format(Stream, Style);
+ }
+};
+
+template <typename T> class PadAdapter final : public FormatAdapter<T> {
+ size_t Left;
+ size_t Right;
+
+public:
+ PadAdapter(T &&Item, size_t Left, size_t Right)
+ : FormatAdapter<T>(std::forward<T>(Item)), Left(Left), Right(Right) {}
+
+ void format(llvm::raw_ostream &Stream, StringRef Style) {
+ auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
+ Stream.indent(Left);
+ Adapter.format(Stream, Style);
+ Stream.indent(Right);
+ }
+};
+
+template <typename T> class RepeatAdapter final : public FormatAdapter<T> {
+ size_t Count;
+
+public:
+ RepeatAdapter(T &&Item, size_t Count)
+ : FormatAdapter<T>(std::forward<T>(Item)), Count(Count) {}
+
+ void format(llvm::raw_ostream &Stream, StringRef Style) {
+ auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
+ for (size_t I = 0; I < Count; ++I) {
+ Adapter.format(Stream, Style);
+ }
+ }
+};
+}
+
+template <typename T>
+detail::AlignAdapter<T> fmt_align(T &&Item, AlignStyle Where, size_t Amount) {
+ return detail::AlignAdapter<T>(std::forward<T>(Item), Where, Amount);
+}
+
+template <typename T>
+detail::PadAdapter<T> fmt_pad(T &&Item, size_t Left, size_t Right) {
+ return detail::PadAdapter<T>(std::forward<T>(Item), Left, Right);
+}
+
+template <typename T>
+detail::RepeatAdapter<T> fmt_repeat(T &&Item, size_t Count) {
+ return detail::RepeatAdapter<T>(std::forward<T>(Item), Count);
+}
+}
+
+#endif
diff --git a/include/llvm/Support/FormatCommon.h b/include/llvm/Support/FormatCommon.h
new file mode 100644
index 000000000000..a8c5fdeb6bff
--- /dev/null
+++ b/include/llvm/Support/FormatCommon.h
@@ -0,0 +1,69 @@
+//===- FormatAdapters.h - Formatters for common LLVM types -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATCOMMON_H
+#define LLVM_SUPPORT_FORMATCOMMON_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+enum class AlignStyle { Left, Center, Right };
+
+struct FmtAlign {
+ detail::format_adapter &Adapter;
+ AlignStyle Where;
+ size_t Amount;
+
+ FmtAlign(detail::format_adapter &Adapter, AlignStyle Where, size_t Amount)
+ : Adapter(Adapter), Where(Where), Amount(Amount) {}
+
+ void format(raw_ostream &S, StringRef Options) {
+ // If we don't need to align, we can format straight into the underlying
+ // stream. Otherwise we have to go through an intermediate stream first
+ // in order to calculate how long the output is so we can align it.
+ // TODO: Make the format method return the number of bytes written, that
+ // way we can also skip the intermediate stream for left-aligned output.
+ if (Amount == 0) {
+ Adapter.format(S, Options);
+ return;
+ }
+ SmallString<64> Item;
+ raw_svector_ostream Stream(Item);
+
+ Adapter.format(Stream, Options);
+ if (Amount <= Item.size()) {
+ S << Item;
+ return;
+ }
+
+ size_t PadAmount = Amount - Item.size();
+ switch (Where) {
+ case AlignStyle::Left:
+ S << Item;
+ S.indent(PadAmount);
+ break;
+ case AlignStyle::Center: {
+ size_t X = PadAmount / 2;
+ S.indent(X);
+ S << Item;
+ S.indent(PadAmount - X);
+ break;
+ }
+ default:
+ S.indent(PadAmount);
+ S << Item;
+ break;
+ }
+ }
+};
+}
+
+#endif
diff --git a/include/llvm/Support/FormatProviders.h b/include/llvm/Support/FormatProviders.h
new file mode 100644
index 000000000000..1f0768c3ab08
--- /dev/null
+++ b/include/llvm/Support/FormatProviders.h
@@ -0,0 +1,413 @@
+//===- FormatProviders.h - Formatters for common LLVM types -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements format providers for many common LLVM types, for example
+// allowing precision and width specifiers for scalar and string types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATPROVIDERS_H
+#define LLVM_SUPPORT_FORMATPROVIDERS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/NativeFormatting.h"
+
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+namespace detail {
+template <typename T>
+struct use_integral_formatter
+ : public std::integral_constant<
+ bool, is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t, int, unsigned, long, unsigned long,
+ long long, unsigned long long>::value> {};
+
+template <typename T>
+struct use_char_formatter
+ : public std::integral_constant<bool, std::is_same<T, char>::value> {};
+
+template <typename T>
+struct is_cstring
+ : public std::integral_constant<bool,
+ is_one_of<T, char *, const char *>::value> {
+};
+
+template <typename T>
+struct use_string_formatter
+ : public std::integral_constant<
+ bool, is_one_of<T, llvm::StringRef, std::string>::value ||
+ is_cstring<T>::value> {};
+
+template <typename T>
+struct use_pointer_formatter
+ : public std::integral_constant<bool, std::is_pointer<T>::value &&
+ !is_cstring<T>::value> {};
+
+template <typename T>
+struct use_double_formatter
+ : public std::integral_constant<bool, std::is_floating_point<T>::value> {};
+
+class HelperFunctions {
+protected:
+ static Optional<size_t> parseNumericPrecision(StringRef Str) {
+ size_t Prec;
+ Optional<size_t> Result;
+ if (Str.empty())
+ Result = None;
+ else if (Str.getAsInteger(10, Prec)) {
+ assert(false && "Invalid precision specifier");
+ Result = None;
+ } else {
+ assert(Prec < 100 && "Precision out of range");
+ Result = std::min<size_t>(99u, Prec);
+ }
+ return Result;
+ }
+
+ static bool consumeHexStyle(StringRef &Str, HexPrintStyle &Style) {
+ if (!Str.startswith_lower("x"))
+ return false;
+
+ if (Str.consume_front("x-"))
+ Style = HexPrintStyle::Lower;
+ else if (Str.consume_front("X-"))
+ Style = HexPrintStyle::Upper;
+ else if (Str.consume_front("x+") || Str.consume_front("x"))
+ Style = HexPrintStyle::PrefixLower;
+ else if (Str.consume_front("X+") || Str.consume_front("X"))
+ Style = HexPrintStyle::PrefixUpper;
+ return true;
+ }
+
+ static size_t consumeNumHexDigits(StringRef &Str, HexPrintStyle Style,
+ size_t Default) {
+ Str.consumeInteger(10, Default);
+ if (isPrefixedHexStyle(Style))
+ Default += 2;
+ return Default;
+ }
+};
+}
+
+/// Implementation of format_provider<T> for integral arithmetic types.
+///
+/// The options string of an integral type has the grammar:
+///
+/// integer_options :: [style][digits]
+/// style :: <see table below>
+/// digits :: <non-negative integer> 0-99
+///
+/// ==========================================================================
+/// | style | Meaning | Example | Digits Meaning |
+/// --------------------------------------------------------------------------
+/// | | | Input | Output | |
+/// ==========================================================================
+/// | x- | Hex no prefix, lower | 42 | 2a | Minimum # digits |
+/// | X- | Hex no prefix, upper | 42 | 2A | Minimum # digits |
+/// | x+ / x | Hex + prefix, lower | 42 | 0x2a | Minimum # digits |
+/// | X+ / X | Hex + prefix, upper | 42 | 0x2A | Minimum # digits |
+/// | N / n | Digit grouped number | 123456 | 123,456 | Ignored |
+/// | D / d | Integer | 100000 | 100000 | Ignored |
+/// | (empty) | Same as D / d | | | |
+/// ==========================================================================
+///
+
+template <typename T>
+struct format_provider<
+ T, typename std::enable_if<detail::use_integral_formatter<T>::value>::type>
+ : public detail::HelperFunctions {
+private:
+public:
+ static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+ HexPrintStyle HS;
+ size_t Digits = 0;
+ if (consumeHexStyle(Style, HS)) {
+ Digits = consumeNumHexDigits(Style, HS, 0);
+ write_hex(Stream, V, HS, Digits);
+ return;
+ }
+
+ IntegerStyle IS = IntegerStyle::Integer;
+ if (Style.consume_front("N") || Style.consume_front("n"))
+ IS = IntegerStyle::Number;
+ else if (Style.consume_front("D") || Style.consume_front("d"))
+ IS = IntegerStyle::Integer;
+
+ Style.consumeInteger(10, Digits);
+ assert(Style.empty() && "Invalid integral format style!");
+ write_integer(Stream, V, Digits, IS);
+ }
+};
+
+/// Implementation of format_provider<T> for integral pointer types.
+///
+/// The options string of a pointer type has the grammar:
+///
+/// pointer_options :: [style][precision]
+/// style :: <see table below>
+/// digits :: <non-negative integer> 0-sizeof(void*)
+///
+/// ==========================================================================
+/// | S | Meaning | Example |
+/// --------------------------------------------------------------------------
+/// | | | Input | Output |
+/// ==========================================================================
+/// | x- | Hex no prefix, lower | 0xDEADBEEF | deadbeef |
+/// | X- | Hex no prefix, upper | 0xDEADBEEF | DEADBEEF |
+/// | x+ / x | Hex + prefix, lower | 0xDEADBEEF | 0xdeadbeef |
+/// | X+ / X | Hex + prefix, upper | 0xDEADBEEF | 0xDEADBEEF |
+/// | (empty) | Same as X+ / X | | |
+/// ==========================================================================
+///
+/// The default precision is the number of nibbles in a machine word, and in all
+/// cases indicates the minimum number of nibbles to print.
+template <typename T>
+struct format_provider<
+ T, typename std::enable_if<detail::use_pointer_formatter<T>::value>::type>
+ : public detail::HelperFunctions {
+private:
+public:
+ static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+ HexPrintStyle HS = HexPrintStyle::PrefixUpper;
+ consumeHexStyle(Style, HS);
+ size_t Digits = consumeNumHexDigits(Style, HS, sizeof(void *) * 2);
+ write_hex(Stream, reinterpret_cast<std::uintptr_t>(V), HS, Digits);
+ }
+};
+
+/// Implementation of format_provider<T> for c-style strings and string
+/// objects such as std::string and llvm::StringRef.
+///
+/// The options string of a string type has the grammar:
+///
+/// string_options :: [length]
+///
+/// where `length` is an optional integer specifying the maximum number of
+/// characters in the string to print. If `length` is omitted, the string is
+/// printed up to the null terminator.
+
+template <typename T>
+struct format_provider<
+ T, typename std::enable_if<detail::use_string_formatter<T>::value>::type> {
+ static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+ size_t N = StringRef::npos;
+ if (!Style.empty() && Style.getAsInteger(10, N)) {
+ assert(false && "Style is not a valid integer");
+ }
+ llvm::StringRef S(V);
+ Stream << S.substr(0, N);
+ }
+};
+
+/// Implementation of format_provider<T> for characters.
+///
+/// The options string of a character type has the grammar:
+///
+/// char_options :: (empty) | [integer_options]
+///
+/// If `char_options` is empty, the character is displayed as an ASCII
+/// character. Otherwise, it is treated as an integer options string.
+///
+template <typename T>
+struct format_provider<
+ T, typename std::enable_if<detail::use_char_formatter<T>::value>::type> {
+ static void format(const char &V, llvm::raw_ostream &Stream,
+ StringRef Style) {
+ if (Style.empty())
+ Stream << V;
+ else {
+ int X = static_cast<int>(V);
+ format_provider<int>::format(X, Stream, Style);
+ }
+ }
+};
+
+/// Implementation of format_provider<T> for type `bool`
+///
+/// The options string of a boolean type has the grammar:
+///
+/// bool_options :: "" | "Y" | "y" | "D" | "d" | "T" | "t"
+///
+/// ==================================
+/// | C | Meaning |
+/// ==================================
+/// | Y | YES / NO |
+/// | y | yes / no |
+/// | D / d | Integer 0 or 1 |
+/// | T | TRUE / FALSE |
+/// | t | true / false |
+/// | (empty) | Equivalent to 't' |
+/// ==================================
+template <> struct format_provider<bool> {
+ static void format(const bool &B, llvm::raw_ostream &Stream,
+ StringRef Style) {
+ Stream << StringSwitch<const char *>(Style)
+ .Case("Y", B ? "YES" : "NO")
+ .Case("y", B ? "yes" : "no")
+ .CaseLower("D", B ? "1" : "0")
+ .Case("T", B ? "TRUE" : "FALSE")
+ .Cases("t", "", B ? "true" : "false")
+ .Default(B ? "1" : "0");
+ }
+};
+
+/// Implementation of format_provider<T> for floating point types.
+///
+/// The options string of a floating point type has the format:
+///
+/// float_options :: [style][precision]
+/// style :: <see table below>
+/// precision :: <non-negative integer> 0-99
+///
+/// =====================================================
+/// | style | Meaning | Example |
+/// -----------------------------------------------------
+/// | | | Input | Output |
+/// =====================================================
+/// | P / p | Percentage | 0.05 | 5.00% |
+/// | F / f | Fixed point | 1.0 | 1.00 |
+/// | E | Exponential with E | 100000 | 1.0E+05 |
+/// | e | Exponential with e | 100000 | 1.0e+05 |
+/// | (empty) | Same as F / f | | |
+/// =====================================================
+///
+/// The default precision is 6 for exponential (E / e) and 2 for everything
+/// else.
+
+template <typename T>
+struct format_provider<
+ T, typename std::enable_if<detail::use_double_formatter<T>::value>::type>
+ : public detail::HelperFunctions {
+ static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
+ FloatStyle S;
+ if (Style.consume_front("P") || Style.consume_front("p"))
+ S = FloatStyle::Percent;
+ else if (Style.consume_front("F") || Style.consume_front("f"))
+ S = FloatStyle::Fixed;
+ else if (Style.consume_front("E"))
+ S = FloatStyle::ExponentUpper;
+ else if (Style.consume_front("e"))
+ S = FloatStyle::Exponent;
+ else
+ S = FloatStyle::Fixed;
+
+ Optional<size_t> Precision = parseNumericPrecision(Style);
+ if (!Precision.hasValue())
+ Precision = getDefaultPrecision(S);
+
+ write_double(Stream, static_cast<double>(V), S, Precision);
+ }
+};
+
+namespace detail {
+template <typename IterT>
+using IterValue = typename std::iterator_traits<IterT>::value_type;
+
+template <typename IterT>
+struct range_item_has_provider
+ : public std::integral_constant<
+ bool, !uses_missing_provider<IterValue<IterT>>::value> {};
+}
+
+/// Implementation of format_provider<T> for ranges.
+///
+/// This will print an arbitrary range as a delimited sequence of items.
+///
+/// The options string of a range type has the grammar:
+///
+/// range_style ::= [separator] [element_style]
+/// separator ::= "$" delimeted_expr
+/// element_style ::= "@" delimeted_expr
+/// delimeted_expr ::= "[" expr "]" | "(" expr ")" | "<" expr ">"
+/// expr ::= <any string not containing delimeter>
+///
+/// where the separator expression is the string to insert between consecutive
+/// items in the range and the argument expression is the Style specification to
+/// be used when formatting the underlying type. The default separator if
+/// unspecified is ' ' (space). The syntax of the argument expression follows
+/// whatever grammar is dictated by the format provider or format adapter used
+/// to format the value type.
+///
+/// Note that attempting to format an `iterator_range<T>` where no format
+/// provider can be found for T will result in a compile error.
+///
+
+template <typename IterT> class format_provider<llvm::iterator_range<IterT>> {
+ using value = typename std::iterator_traits<IterT>::value_type;
+ using reference = typename std::iterator_traits<IterT>::reference;
+
+ static StringRef consumeOneOption(StringRef &Style, char Indicator,
+ StringRef Default) {
+ if (Style.empty())
+ return Default;
+ if (Style.front() != Indicator)
+ return Default;
+ Style = Style.drop_front();
+ if (Style.empty()) {
+ assert(false && "Invalid range style");
+ return Default;
+ }
+
+ std::vector<const char *> Delims = {"[]", "<>", "()"};
+ for (const char *D : Delims) {
+ if (Style.front() != D[0])
+ continue;
+ size_t End = Style.find_first_of(D[1]);
+ if (End == StringRef::npos) {
+ assert(false && "Missing range option end delimeter!");
+ return Default;
+ }
+ StringRef Result = Style.slice(1, End);
+ Style = Style.drop_front(End + 1);
+ return Result;
+ }
+ assert(false && "Invalid range style!");
+ return Default;
+ }
+
+ static std::pair<StringRef, StringRef> parseOptions(StringRef Style) {
+ StringRef Sep = consumeOneOption(Style, '$', ", ");
+ StringRef Args = consumeOneOption(Style, '@', "");
+ assert(Style.empty() && "Unexpected text in range option string!");
+ return std::make_pair(Sep, Args);
+ }
+
+public:
+ static_assert(detail::range_item_has_provider<IterT>::value,
+ "Range value_type does not have a format provider!");
+ static void format(const llvm::iterator_range<IterT> &V,
+ llvm::raw_ostream &Stream, StringRef Style) {
+ StringRef Sep;
+ StringRef ArgStyle;
+ std::tie(Sep, ArgStyle) = parseOptions(Style);
+ auto Begin = V.begin();
+ auto End = V.end();
+ if (Begin != End) {
+ auto Adapter =
+ detail::build_format_adapter(std::forward<reference>(*Begin));
+ Adapter.format(Stream, ArgStyle);
+ ++Begin;
+ }
+ while (Begin != End) {
+ Stream << Sep;
+ auto Adapter =
+ detail::build_format_adapter(std::forward<reference>(*Begin));
+ Adapter.format(Stream, ArgStyle);
+ ++Begin;
+ }
+ }
+};
+}
+
+#endif
diff --git a/include/llvm/Support/FormatVariadic.h b/include/llvm/Support/FormatVariadic.h
new file mode 100644
index 000000000000..e5f5c9615cb6
--- /dev/null
+++ b/include/llvm/Support/FormatVariadic.h
@@ -0,0 +1,247 @@
+//===- FormatVariadic.h - Efficient type-safe string formatting --*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the formatv() function which can be used with other LLVM
+// subsystems to provide printf-like formatting, but with improved safety and
+// flexibility. The result of `formatv` is an object which can be streamed to
+// a raw_ostream or converted to a std::string or llvm::SmallString.
+//
+// // Convert to std::string.
+// std::string S = formatv("{0} {1}", 1234.412, "test").str();
+//
+// // Convert to llvm::SmallString
+// SmallString<8> S = formatv("{0} {1}", 1234.412, "test").sstr<8>();
+//
+// // Stream to an existing raw_ostream.
+// OS << formatv("{0} {1}", 1234.412, "test");
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATVARIADIC_H
+#define LLVM_SUPPORT_FORMATVARIADIC_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/FormatCommon.h"
+#include "llvm/Support/FormatProviders.h"
+#include "llvm/Support/FormatVariadicDetails.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+enum class ReplacementType { Empty, Format, Literal };
+
+struct ReplacementItem {
+ ReplacementItem() = default;
+ explicit ReplacementItem(StringRef Literal)
+ : Type(ReplacementType::Literal), Spec(Literal) {}
+ ReplacementItem(StringRef Spec, size_t Index, size_t Align, AlignStyle Where,
+ char Pad, StringRef Options)
+ : Type(ReplacementType::Format), Spec(Spec), Index(Index), Align(Align),
+ Where(Where), Pad(Pad), Options(Options) {}
+
+ ReplacementType Type = ReplacementType::Empty;
+ StringRef Spec;
+ size_t Index = 0;
+ size_t Align = 0;
+ AlignStyle Where = AlignStyle::Right;
+ char Pad;
+ StringRef Options;
+};
+
+class formatv_object_base {
+protected:
+ // The parameters are stored in a std::tuple, which does not provide runtime
+ // indexing capabilities. In order to enable runtime indexing, we use this
+ // structure to put the parameters into a std::vector. Since the parameters
+ // are not all the same type, we use some type-erasure by wrapping the
+ // parameters in a template class that derives from a non-template superclass.
+ // Essentially, we are converting a std::tuple<Derived<Ts...>> to a
+ // std::vector<Base*>.
+ struct create_adapters {
+ template <typename... Ts>
+ std::vector<detail::format_adapter *> operator()(Ts &... Items) {
+ return std::vector<detail::format_adapter *>{&Items...};
+ }
+ };
+
+ StringRef Fmt;
+ std::vector<detail::format_adapter *> Adapters;
+ std::vector<ReplacementItem> Replacements;
+
+ static bool consumeFieldLayout(StringRef &Spec, AlignStyle &Where,
+ size_t &Align, char &Pad);
+
+ static std::pair<ReplacementItem, StringRef>
+ splitLiteralAndReplacement(StringRef Fmt);
+
+public:
+ formatv_object_base(StringRef Fmt, std::size_t ParamCount)
+ : Fmt(Fmt), Replacements(parseFormatString(Fmt)) {
+ Adapters.reserve(ParamCount);
+ }
+
+ void format(raw_ostream &S) const {
+ for (auto &R : Replacements) {
+ if (R.Type == ReplacementType::Empty)
+ continue;
+ if (R.Type == ReplacementType::Literal) {
+ S << R.Spec;
+ continue;
+ }
+ if (R.Index >= Adapters.size()) {
+ S << R.Spec;
+ continue;
+ }
+
+ auto W = Adapters[R.Index];
+
+ FmtAlign Align(*W, R.Where, R.Align);
+ Align.format(S, R.Options);
+ }
+ }
+ static std::vector<ReplacementItem> parseFormatString(StringRef Fmt);
+
+ static Optional<ReplacementItem> parseReplacementItem(StringRef Spec);
+
+ std::string str() const {
+ std::string Result;
+ raw_string_ostream Stream(Result);
+ Stream << *this;
+ Stream.flush();
+ return Result;
+ }
+
+ template <unsigned N> SmallString<N> sstr() const {
+ SmallString<N> Result;
+ raw_svector_ostream Stream(Result);
+ Stream << *this;
+ return Result;
+ }
+
+ template <unsigned N> operator SmallString<N>() const { return sstr<N>(); }
+
+ operator std::string() const { return str(); }
+};
+
+template <typename Tuple> class formatv_object : public formatv_object_base {
+ // Storage for the parameter adapters. Since the base class erases the type
+ // of the parameters, we have to own the storage for the parameters here, and
+ // have the base class store type-erased pointers into this tuple.
+ Tuple Parameters;
+
+public:
+ formatv_object(StringRef Fmt, Tuple &&Params)
+ : formatv_object_base(Fmt, std::tuple_size<Tuple>::value),
+ Parameters(std::move(Params)) {
+ Adapters = apply_tuple(create_adapters(), Parameters);
+ }
+};
+
+// \brief Format text given a format string and replacement parameters.
+//
+// ===General Description===
+//
+// Formats textual output. `Fmt` is a string consisting of one or more
+// replacement sequences with the following grammar:
+//
+// rep_field ::= "{" [index] ["," layout] [":" format] "}"
+// index ::= <non-negative integer>
+// layout ::= [[[char]loc]width]
+// format ::= <any string not containing "{" or "}">
+// char ::= <any character except "{" or "}">
+// loc ::= "-" | "=" | "+"
+// width ::= <positive integer>
+//
+// index - A non-negative integer specifying the index of the item in the
+// parameter pack to print. Any other value is invalid.
+// layout - A string controlling how the field is laid out within the available
+// space.
+// format - A type-dependent string used to provide additional options to
+// the formatting operation. Refer to the documentation of the
+// various individual format providers for per-type options.
+// char - The padding character. Defaults to ' ' (space). Only valid if
+// `loc` is also specified.
+// loc - Where to print the formatted text within the field. Only valid if
+// `width` is also specified.
+// '-' : The field is left aligned within the available space.
+// '=' : The field is centered within the available space.
+// '+' : The field is right aligned within the available space (this
+// is the default).
+// width - The width of the field within which to print the formatted text.
+// If this is less than the required length then the `char` and `loc`
+// fields are ignored, and the field is printed with no leading or
+// trailing padding. If this is greater than the required length,
+// then the text is output according to the value of `loc`, and padded
+// as appropriate on the left and/or right by `char`.
+//
+// ===Special Characters===
+//
+// The characters '{' and '}' are reserved and cannot appear anywhere within a
+// replacement sequence. Outside of a replacement sequence, in order to print
+// a literal '{' or '}' it must be doubled -- "{{" to print a literal '{' and
+// "}}" to print a literal '}'.
+//
+// ===Parameter Indexing===
+// `index` specifies the index of the paramter in the parameter pack to format
+// into the output. Note that it is possible to refer to the same parameter
+// index multiple times in a given format string. This makes it possible to
+// output the same value multiple times without passing it multiple times to the
+// function. For example:
+//
+// formatv("{0} {1} {0}", "a", "bb")
+//
+// would yield the string "abba". This can be convenient when it is expensive
+// to compute the value of the parameter, and you would otherwise have had to
+// save it to a temporary.
+//
+// ===Formatter Search===
+//
+// For a given parameter of type T, the following steps are executed in order
+// until a match is found:
+//
+// 1. If the parameter is of class type, and contains a method
+// void format(raw_ostream &Stream, StringRef Options)
+// Then this method is invoked to produce the formatted output. The
+// implementation should write the formatted text into `Stream`.
+// 2. If there is a suitable template specialization of format_provider<>
+// for type T containing a method whose signature is:
+// void format(const T &Obj, raw_ostream &Stream, StringRef Options)
+// Then this method is invoked as described in Step 1.
+//
+// If a match cannot be found through either of the above methods, a compiler
+// error is generated.
+//
+// ===Invalid Format String Handling===
+//
+// In the case of a format string which does not match the grammar described
+// above, the output is undefined. With asserts enabled, LLVM will trigger an
+// assertion. Otherwise, it will try to do something reasonable, but in general
+// the details of what that is are undefined.
+//
+template <typename... Ts>
+inline auto formatv(const char *Fmt, Ts &&... Vals) -> formatv_object<decltype(
+ std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...))> {
+ using ParamTuple = decltype(
+ std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...));
+ return formatv_object<ParamTuple>(
+ Fmt,
+ std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_FORMATVARIADIC_H
diff --git a/include/llvm/Support/FormatVariadicDetails.h b/include/llvm/Support/FormatVariadicDetails.h
new file mode 100644
index 000000000000..b4a564ffc26c
--- /dev/null
+++ b/include/llvm/Support/FormatVariadicDetails.h
@@ -0,0 +1,112 @@
+//===- FormatVariadicDetails.h - Helpers for FormatVariadic.h ----*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATVARIADIC_DETAILS_H
+#define LLVM_SUPPORT_FORMATVARIADIC_DETAILS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <type_traits>
+
+namespace llvm {
+template <typename T, typename Enable = void> struct format_provider {};
+
+namespace detail {
+class format_adapter {
+protected:
+ virtual ~format_adapter() {}
+
+public:
+ virtual void format(raw_ostream &S, StringRef Options) = 0;
+};
+
+template <typename T> class provider_format_adapter : public format_adapter {
+ T Item;
+
+public:
+ explicit provider_format_adapter(T &&Item) : Item(Item) {}
+
+ void format(llvm::raw_ostream &S, StringRef Options) override {
+ format_provider<typename std::decay<T>::type>::format(Item, S, Options);
+ }
+};
+
+template <typename T> class missing_format_adapter;
+
+// Test if format_provider<T> is defined on T and contains a member function
+// with the signature:
+// static void format(const T&, raw_stream &, StringRef);
+//
+template <class T> class has_FormatProvider {
+public:
+ using Decayed = typename std::decay<T>::type;
+ typedef void (*Signature_format)(const Decayed &, llvm::raw_ostream &,
+ StringRef);
+
+ template <typename U>
+ static char test(SameType<Signature_format, &U::format> *);
+
+ template <typename U> static double test(...);
+
+ static bool const value =
+ (sizeof(test<llvm::format_provider<Decayed>>(nullptr)) == 1);
+};
+
+// Simple template that decides whether a type T should use the member-function
+// based format() invocation.
+template <typename T>
+struct uses_format_member
+ : public std::integral_constant<
+ bool,
+ std::is_base_of<format_adapter,
+ typename std::remove_reference<T>::type>::value> {};
+
+// Simple template that decides whether a type T should use the format_provider
+// based format() invocation. The member function takes priority, so this test
+// will only be true if there is not ALSO a format member.
+template <typename T>
+struct uses_format_provider
+ : public std::integral_constant<
+ bool, !uses_format_member<T>::value && has_FormatProvider<T>::value> {
+};
+
+// Simple template that decides whether a type T has neither a member-function
+// nor format_provider based implementation that it can use. Mostly used so
+// that the compiler spits out a nice diagnostic when a type with no format
+// implementation can be located.
+template <typename T>
+struct uses_missing_provider
+ : public std::integral_constant<bool,
+ !uses_format_member<T>::value &&
+ !uses_format_provider<T>::value> {};
+
+template <typename T>
+typename std::enable_if<uses_format_member<T>::value, T>::type
+build_format_adapter(T &&Item) {
+ return std::forward<T>(Item);
+}
+
+template <typename T>
+typename std::enable_if<uses_format_provider<T>::value,
+ provider_format_adapter<T>>::type
+build_format_adapter(T &&Item) {
+ return provider_format_adapter<T>(std::forward<T>(Item));
+}
+
+template <typename T>
+typename std::enable_if<uses_missing_provider<T>::value,
+ missing_format_adapter<T>>::type
+build_format_adapter(T &&Item) {
+ return missing_format_adapter<T>();
+}
+}
+}
+
+#endif
diff --git a/include/llvm/Support/GCOV.h b/include/llvm/Support/GCOV.h
index 544434f036a4..f297fe609d2a 100644
--- a/include/llvm/Support/GCOV.h
+++ b/include/llvm/Support/GCOV.h
@@ -1,4 +1,4 @@
-//===- GCOV.h - LLVM coverage tool ----------------------------------------===//
+//===- GCOV.h - LLVM coverage tool ------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,12 +16,20 @@
#define LLVM_SUPPORT_GCOV_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
namespace llvm {
@@ -30,6 +38,7 @@ class GCOVBlock;
class FileInfo;
namespace GCOV {
+
enum GCOVVersion { V402, V404, V704 };
/// \brief A struct for passing gcov options between functions.
@@ -47,7 +56,8 @@ struct Options {
bool LongFileNames;
bool NoOutput;
};
-} // end GCOV namespace
+
+} // end namespace GCOV
/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
/// read operations.
@@ -232,8 +242,9 @@ private:
class GCOVFile {
public:
GCOVFile()
- : GCNOInitialized(false), Checksum(0), Functions(), RunCount(0),
+ : GCNOInitialized(false), Checksum(0), RunCount(0),
ProgramCount(0) {}
+
bool readGCNO(GCOVBuffer &Buffer);
bool readGCDA(GCOVBuffer &Buffer);
uint32_t getChecksum() const { return Checksum; }
@@ -312,9 +323,9 @@ public:
typedef SmallVectorImpl<GCOVEdge *>::const_iterator EdgeIterator;
GCOVBlock(GCOVFunction &P, uint32_t N)
- : Parent(P), Number(N), Counter(0), DstEdgesAreSorted(true), SrcEdges(),
- DstEdges(), Lines() {}
+ : Parent(P), Number(N), Counter(0), DstEdgesAreSorted(true) {}
~GCOVBlock();
+
const GCOVFunction &getParent() const { return Parent; }
void addLine(uint32_t N) { Lines.push_back(N); }
uint32_t getLastLine() const { return Lines.back(); }
@@ -325,6 +336,7 @@ public:
assert(&Edge->Dst == this); // up to caller to ensure edge is valid
SrcEdges.push_back(Edge);
}
+
void addDstEdge(GCOVEdge *Edge) {
assert(&Edge->Src == this); // up to caller to ensure edge is valid
// Check if adding this edge causes list to become unsorted.
@@ -332,6 +344,7 @@ public:
DstEdgesAreSorted = false;
DstEdges.push_back(Edge);
}
+
size_t getNumSrcEdges() const { return SrcEdges.size(); }
size_t getNumDstEdges() const { return DstEdges.size(); }
void sortDstEdges();
@@ -396,19 +409,21 @@ class FileInfo {
public:
FileInfo(const GCOV::Options &Options)
- : Options(Options), LineInfo(), RunCount(0), ProgramCount(0) {}
+ : Options(Options), RunCount(0), ProgramCount(0) {}
void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) {
if (Line > LineInfo[Filename].LastLine)
LineInfo[Filename].LastLine = Line;
LineInfo[Filename].Blocks[Line - 1].push_back(Block);
}
+
void addFunctionLine(StringRef Filename, uint32_t Line,
const GCOVFunction *Function) {
if (Line > LineInfo[Filename].LastLine)
LineInfo[Filename].LastLine = Line;
LineInfo[Filename].Functions[Line - 1].push_back(Function);
}
+
void setRunCount(uint32_t Runs) { RunCount = Runs; }
void setProgramCount(uint32_t Programs) { ProgramCount = Programs; }
void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile,
@@ -440,6 +455,7 @@ private:
FileCoverageList FileCoverages;
FuncCoverageMap FuncCoverages;
};
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_GCOV_H
diff --git a/include/llvm/Support/GenericDomTree.h b/include/llvm/Support/GenericDomTree.h
index 77b5ba7e3e97..07a53438085a 100644
--- a/include/llvm/Support/GenericDomTree.h
+++ b/include/llvm/Support/GenericDomTree.h
@@ -13,6 +13,12 @@
/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
/// graph types.
///
+/// Unlike ADT/* graph algorithms, generic dominator tree has more reuiqrement
+/// on the graph's NodeRef. The NodeRef should be a pointer and, depending on
+/// the implementation, e.g. NodeRef->getParent() return the parent node.
+///
+/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
@@ -30,6 +36,23 @@
namespace llvm {
+template <class NodeT> class DominatorTreeBase;
+
+namespace detail {
+
+template <typename GT> struct DominatorTreeBaseTraits {
+ static_assert(std::is_pointer<typename GT::NodeRef>::value,
+ "Currently NodeRef must be a pointer type.");
+ using type = DominatorTreeBase<
+ typename std::remove_pointer<typename GT::NodeRef>::type>;
+};
+
+} // End namespace detail
+
+template <typename GT>
+using DominatorTreeBaseByGraphTraits =
+ typename detail::DominatorTreeBaseTraits<GT>::type;
+
/// \brief Base class that other, more interesting dominator analyses
/// inherit from.
template <class NodeT> class DominatorBase {
@@ -62,7 +85,6 @@ public:
bool isPostDominator() const { return IsPostDominators; }
};
-template <class NodeT> class DominatorTreeBase;
struct PostDominatorTree;
/// \brief Base class for the actual dominator tree node.
@@ -126,7 +148,7 @@ public:
assert(IDom && "No immediate dominator?");
if (IDom != NewIDom) {
typename std::vector<DomTreeNodeBase<NodeT> *>::iterator I =
- std::find(IDom->Children.begin(), IDom->Children.end(), this);
+ find(IDom->Children, this);
assert(I != IDom->Children.end() &&
"Not in immediate dominator children set!");
// I am no longer your child...
@@ -177,8 +199,7 @@ void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o,
// The calculate routine is provided in a separate header but referenced here.
template <class FuncT, class N>
-void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType> &DT,
- FuncT &F);
+void Calculate(DominatorTreeBaseByGraphTraits<GraphTraits<N>> &DT, FuncT &F);
/// \brief Core dominator tree base class.
///
@@ -251,14 +272,14 @@ protected:
// NewBB is split and now it has one successor. Update dominator tree to
// reflect this change.
template <class N, class GraphT>
- void Split(DominatorTreeBase<typename GraphT::NodeType> &DT,
- typename GraphT::NodeType *NewBB) {
+ void Split(DominatorTreeBaseByGraphTraits<GraphT> &DT,
+ typename GraphT::NodeRef NewBB) {
assert(std::distance(GraphT::child_begin(NewBB),
GraphT::child_end(NewBB)) == 1 &&
"NewBB should have a single successor!");
- typename GraphT::NodeType *NewBBSucc = *GraphT::child_begin(NewBB);
+ typename GraphT::NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
- std::vector<typename GraphT::NodeType *> PredBlocks;
+ std::vector<typename GraphT::NodeRef> PredBlocks;
typedef GraphTraits<Inverse<N>> InvTraits;
for (typename InvTraits::ChildIteratorType
PI = InvTraits::child_begin(NewBB),
@@ -273,7 +294,7 @@ protected:
PI = InvTraits::child_begin(NewBBSucc),
E = InvTraits::child_end(NewBBSucc);
PI != E; ++PI) {
- typename InvTraits::NodeType *ND = *PI;
+ typename InvTraits::NodeRef ND = *PI;
if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
DT.isReachableFromEntry(ND)) {
NewBBDominatesNewBBSucc = false;
@@ -588,7 +609,7 @@ public:
DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
if (IDom) {
typename std::vector<DomTreeNodeBase<NodeT> *>::iterator I =
- std::find(IDom->Children.begin(), IDom->Children.end(), Node);
+ find(IDom->Children, Node);
assert(I != IDom->Children.end() &&
"Not in immediate dominator children set!");
// I am no longer your child...
@@ -627,18 +648,17 @@ public:
protected:
template <class GraphT>
- friend typename GraphT::NodeType *
- Eval(DominatorTreeBase<typename GraphT::NodeType> &DT,
- typename GraphT::NodeType *V, unsigned LastLinked);
+ friend typename GraphT::NodeRef
+ Eval(DominatorTreeBaseByGraphTraits<GraphT> &DT, typename GraphT::NodeRef V,
+ unsigned LastLinked);
template <class GraphT>
- friend unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType> &DT,
- typename GraphT::NodeType *V, unsigned N);
+ friend unsigned DFSPass(DominatorTreeBaseByGraphTraits<GraphT> &DT,
+ typename GraphT::NodeRef V, unsigned N);
template <class FuncT, class N>
- friend void
- Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType> &DT, FuncT &F);
-
+ friend void Calculate(DominatorTreeBaseByGraphTraits<GraphTraits<N>> &DT,
+ FuncT &F);
DomTreeNodeBase<NodeT> *getNodeForBlock(NodeT *BB) {
if (DomTreeNodeBase<NodeT> *Node = getNode(BB))
@@ -730,8 +750,8 @@ public:
for (typename TraitsTy::nodes_iterator I = TraitsTy::nodes_begin(&F),
E = TraitsTy::nodes_end(&F);
I != E; ++I)
- if (TraitsTy::child_begin(&*I) == TraitsTy::child_end(&*I))
- addRoot(&*I);
+ if (TraitsTy::child_begin(*I) == TraitsTy::child_end(*I))
+ addRoot(*I);
Calculate<FT, Inverse<NodeT *>>(*this, F);
}
diff --git a/include/llvm/Support/GenericDomTreeConstruction.h b/include/llvm/Support/GenericDomTreeConstruction.h
index 3e867dc6cbf1..54e55cc1a32e 100644
--- a/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/include/llvm/Support/GenericDomTreeConstruction.h
@@ -29,9 +29,9 @@
namespace llvm {
-template<class GraphT>
-unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType* V, unsigned N) {
+template <class GraphT>
+unsigned DFSPass(DominatorTreeBaseByGraphTraits<GraphT> &DT,
+ typename GraphT::NodeRef V, unsigned N) {
// This is more understandable as a recursive algorithm, but we can't use the
// recursive algorithm due to stack depth issues. Keep it here for
// documentation purposes.
@@ -52,15 +52,16 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
#else
bool IsChildOfArtificialExit = (N != 0);
- SmallVector<std::pair<typename GraphT::NodeType*,
- typename GraphT::ChildIteratorType>, 32> Worklist;
+ SmallVector<
+ std::pair<typename GraphT::NodeRef, typename GraphT::ChildIteratorType>,
+ 32>
+ Worklist;
Worklist.push_back(std::make_pair(V, GraphT::child_begin(V)));
while (!Worklist.empty()) {
- typename GraphT::NodeType* BB = Worklist.back().first;
+ typename GraphT::NodeRef BB = Worklist.back().first;
typename GraphT::ChildIteratorType NextSucc = Worklist.back().second;
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo =
- DT.Info[BB];
+ auto &BBInfo = DT.Info[BB];
// First time we visited this BB?
if (NextSucc == GraphT::child_begin(BB)) {
@@ -89,10 +90,9 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
++Worklist.back().second;
// Visit the successor next, if it isn't already visited.
- typename GraphT::NodeType* Succ = *NextSucc;
+ typename GraphT::NodeRef Succ = *NextSucc;
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &SuccVInfo =
- DT.Info[Succ];
+ auto &SuccVInfo = DT.Info[Succ];
if (SuccVInfo.Semi == 0) {
SuccVInfo.Parent = BBDFSNum;
Worklist.push_back(std::make_pair(Succ, GraphT::child_begin(Succ)));
@@ -103,25 +103,23 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
}
template <class GraphT>
-typename GraphT::NodeType *
-Eval(DominatorTreeBase<typename GraphT::NodeType> &DT,
- typename GraphT::NodeType *VIn, unsigned LastLinked) {
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInInfo =
- DT.Info[VIn];
+typename GraphT::NodeRef Eval(DominatorTreeBaseByGraphTraits<GraphT> &DT,
+ typename GraphT::NodeRef VIn,
+ unsigned LastLinked) {
+ auto &VInInfo = DT.Info[VIn];
if (VInInfo.DFSNum < LastLinked)
return VIn;
- SmallVector<typename GraphT::NodeType*, 32> Work;
- SmallPtrSet<typename GraphT::NodeType*, 32> Visited;
+ SmallVector<typename GraphT::NodeRef, 32> Work;
+ SmallPtrSet<typename GraphT::NodeRef, 32> Visited;
if (VInInfo.Parent >= LastLinked)
Work.push_back(VIn);
while (!Work.empty()) {
- typename GraphT::NodeType* V = Work.back();
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
- DT.Info[V];
- typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Parent];
+ typename GraphT::NodeRef V = Work.back();
+ auto &VInfo = DT.Info[V];
+ typename GraphT::NodeRef VAncestor = DT.Vertex[VInfo.Parent];
// Process Ancestor first
if (Visited.insert(VAncestor).second && VInfo.Parent >= LastLinked) {
@@ -134,10 +132,9 @@ Eval(DominatorTreeBase<typename GraphT::NodeType> &DT,
if (VInfo.Parent < LastLinked)
continue;
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VAInfo =
- DT.Info[VAncestor];
- typename GraphT::NodeType* VAncestorLabel = VAInfo.Label;
- typename GraphT::NodeType* VLabel = VInfo.Label;
+ auto &VAInfo = DT.Info[VAncestor];
+ typename GraphT::NodeRef VAncestorLabel = VAInfo.Label;
+ typename GraphT::NodeRef VLabel = VInfo.Label;
if (DT.Info[VAncestorLabel].Semi < DT.Info[VLabel].Semi)
VInfo.Label = VAncestorLabel;
VInfo.Parent = VAInfo.Parent;
@@ -146,16 +143,18 @@ Eval(DominatorTreeBase<typename GraphT::NodeType> &DT,
return VInInfo.Label;
}
-template<class FuncT, class NodeT>
-void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
- FuncT& F) {
+template <class FuncT, class NodeT>
+void Calculate(DominatorTreeBaseByGraphTraits<GraphTraits<NodeT>> &DT,
+ FuncT &F) {
typedef GraphTraits<NodeT> GraphT;
+ static_assert(std::is_pointer<typename GraphT::NodeRef>::value,
+ "NodeRef should be pointer type");
+ typedef typename std::remove_pointer<typename GraphT::NodeRef>::type NodeType;
unsigned N = 0;
bool MultipleRoots = (DT.Roots.size() > 1);
if (MultipleRoots) {
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo =
- DT.Info[nullptr];
+ auto &BBInfo = DT.Info[nullptr];
BBInfo.DFSNum = BBInfo.Semi = ++N;
BBInfo.Label = nullptr;
@@ -188,14 +187,13 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
Buckets[i] = i;
for (unsigned i = N; i >= 2; --i) {
- typename GraphT::NodeType* W = DT.Vertex[i];
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo =
- DT.Info[W];
+ typename GraphT::NodeRef W = DT.Vertex[i];
+ auto &WInfo = DT.Info[W];
// Step #2: Implicitly define the immediate dominator of vertices
for (unsigned j = i; Buckets[j] != i; j = Buckets[j]) {
- typename GraphT::NodeType* V = DT.Vertex[Buckets[j]];
- typename GraphT::NodeType* U = Eval<GraphT>(DT, V, i + 1);
+ typename GraphT::NodeRef V = DT.Vertex[Buckets[j]];
+ typename GraphT::NodeRef U = Eval<GraphT>(DT, V, i + 1);
DT.IDoms[V] = DT.Info[U].Semi < i ? U : W;
}
@@ -207,7 +205,7 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
for (typename InvTraits::ChildIteratorType CI =
InvTraits::child_begin(W),
E = InvTraits::child_end(W); CI != E; ++CI) {
- typename InvTraits::NodeType *N = *CI;
+ typename InvTraits::NodeRef N = *CI;
if (DT.Info.count(N)) { // Only if this predecessor is reachable!
unsigned SemiU = DT.Info[Eval<GraphT>(DT, N, i + 1)].Semi;
if (SemiU < WInfo.Semi)
@@ -227,17 +225,17 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
}
if (N >= 1) {
- typename GraphT::NodeType* Root = DT.Vertex[1];
+ typename GraphT::NodeRef Root = DT.Vertex[1];
for (unsigned j = 1; Buckets[j] != 1; j = Buckets[j]) {
- typename GraphT::NodeType* V = DT.Vertex[Buckets[j]];
+ typename GraphT::NodeRef V = DT.Vertex[Buckets[j]];
DT.IDoms[V] = Root;
}
}
// Step #4: Explicitly define the immediate dominator of each vertex
for (unsigned i = 2; i <= N; ++i) {
- typename GraphT::NodeType* W = DT.Vertex[i];
- typename GraphT::NodeType*& WIDom = DT.IDoms[W];
+ typename GraphT::NodeRef W = DT.Vertex[i];
+ typename GraphT::NodeRef &WIDom = DT.IDoms[W];
if (WIDom != DT.Vertex[DT.Info[W].Semi])
WIDom = DT.IDoms[WIDom];
}
@@ -248,34 +246,32 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// one exit block, or it may be the virtual exit (denoted by (BasicBlock *)0)
// which postdominates all real exits if there are multiple exit blocks, or
// an infinite loop.
- typename GraphT::NodeType* Root = !MultipleRoots ? DT.Roots[0] : nullptr;
+ typename GraphT::NodeRef Root = !MultipleRoots ? DT.Roots[0] : nullptr;
DT.RootNode =
(DT.DomTreeNodes[Root] =
- llvm::make_unique<DomTreeNodeBase<typename GraphT::NodeType>>(
- Root, nullptr)).get();
+ llvm::make_unique<DomTreeNodeBase<NodeType>>(Root, nullptr))
+ .get();
// Loop over all of the reachable blocks in the function...
for (unsigned i = 2; i <= N; ++i) {
- typename GraphT::NodeType* W = DT.Vertex[i];
+ typename GraphT::NodeRef W = DT.Vertex[i];
// Don't replace this with 'count', the insertion side effect is important
if (DT.DomTreeNodes[W])
continue; // Haven't calculated this node yet?
- typename GraphT::NodeType* ImmDom = DT.getIDom(W);
+ typename GraphT::NodeRef ImmDom = DT.getIDom(W);
assert(ImmDom || DT.DomTreeNodes[nullptr]);
// Get or calculate the node for the immediate dominator
- DomTreeNodeBase<typename GraphT::NodeType> *IDomNode =
- DT.getNodeForBlock(ImmDom);
+ DomTreeNodeBase<NodeType> *IDomNode = DT.getNodeForBlock(ImmDom);
// Add a new tree node for this BasicBlock, and link it as a child of
// IDomNode
DT.DomTreeNodes[W] = IDomNode->addChild(
- llvm::make_unique<DomTreeNodeBase<typename GraphT::NodeType>>(
- W, IDomNode));
+ llvm::make_unique<DomTreeNodeBase<NodeType>>(W, IDomNode));
}
// Free temporary memory used to construct idom's
diff --git a/include/llvm/Support/GlobPattern.h b/include/llvm/Support/GlobPattern.h
new file mode 100644
index 000000000000..c9436a13c1a3
--- /dev/null
+++ b/include/llvm/Support/GlobPattern.h
@@ -0,0 +1,48 @@
+//===-- GlobPattern.h - glob pattern matcher implementation -*- C++ -*-----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a glob pattern matcher. The glob pattern is the
+// rule used by the shell.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GLOB_PATTERN_H
+#define LLVM_SUPPORT_GLOB_PATTERN_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include <vector>
+
+// This class represents a glob pattern. Supported metacharacters
+// are "*", "?", "[<chars>]" and "[^<chars>]".
+namespace llvm {
+class BitVector;
+template <typename T> class ArrayRef;
+
+class GlobPattern {
+public:
+ static Expected<GlobPattern> create(StringRef Pat);
+ bool match(StringRef S) const;
+
+private:
+ bool matchOne(ArrayRef<BitVector> Pat, StringRef S) const;
+
+ // Parsed glob pattern.
+ std::vector<BitVector> Tokens;
+
+ // The following members are for optimization.
+ Optional<StringRef> Exact;
+ Optional<StringRef> Prefix;
+ Optional<StringRef> Suffix;
+};
+}
+
+#endif // LLVM_SUPPORT_GLOB_PATTERN_H
diff --git a/include/llvm/Support/GraphWriter.h b/include/llvm/Support/GraphWriter.h
index 86985c569464..7555d5b31a8d 100644
--- a/include/llvm/Support/GraphWriter.h
+++ b/include/llvm/Support/GraphWriter.h
@@ -59,14 +59,19 @@ class GraphWriter {
typedef DOTGraphTraits<GraphType> DOTTraits;
typedef GraphTraits<GraphType> GTraits;
- typedef typename GTraits::NodeType NodeType;
+ typedef typename GTraits::NodeRef NodeRef;
typedef typename GTraits::nodes_iterator node_iterator;
typedef typename GTraits::ChildIteratorType child_iterator;
DOTTraits DTraits;
+ static_assert(std::is_pointer<NodeRef>::value,
+ "FIXME: Currently GraphWriter requires the NodeRef type to be "
+ "a pointer.\nThe pointer usage should be moved to "
+ "DOTGraphTraits, and removed from GraphWriter itself.");
+
// Writes the edge labels of the node to O and returns true if there are any
// edge labels not equal to the empty string "".
- bool getEdgeSourceLabels(raw_ostream &O, NodeType *Node) {
+ bool getEdgeSourceLabels(raw_ostream &O, NodeRef Node) {
child_iterator EI = GTraits::child_begin(Node);
child_iterator EE = GTraits::child_end(Node);
bool hasEdgeSourceLabels = false;
@@ -144,27 +149,11 @@ public:
writeNode(*I);
}
- bool isNodeHidden(NodeType &Node) {
- return isNodeHidden(&Node);
- }
-
- bool isNodeHidden(NodeType *const *Node) {
- return isNodeHidden(*Node);
- }
-
- bool isNodeHidden(NodeType *Node) {
+ bool isNodeHidden(NodeRef Node) {
return DTraits.isNodeHidden(Node);
}
- void writeNode(NodeType& Node) {
- writeNode(&Node);
- }
-
- void writeNode(NodeType *const *Node) {
- writeNode(*Node);
- }
-
- void writeNode(NodeType *Node) {
+ void writeNode(NodeRef Node) {
std::string NodeAttributes = DTraits.getNodeAttributes(Node, G);
O << "\tNode" << static_cast<const void*>(Node) << " [shape=record,";
@@ -237,8 +226,8 @@ public:
writeEdge(Node, 64, EI);
}
- void writeEdge(NodeType *Node, unsigned edgeidx, child_iterator EI) {
- if (NodeType *TargetNode = *EI) {
+ void writeEdge(NodeRef Node, unsigned edgeidx, child_iterator EI) {
+ if (NodeRef TargetNode = *EI) {
int DestPort = -1;
if (DTraits.edgeTargetsEdgeSource(Node, EI)) {
child_iterator TargetIt = DTraits.getEdgeTarget(Node, EI);
diff --git a/include/llvm/Support/Host.h b/include/llvm/Support/Host.h
index 8114f9bf846b..9df584c68c0d 100644
--- a/include/llvm/Support/Host.h
+++ b/include/llvm/Support/Host.h
@@ -18,6 +18,8 @@
#if defined(__linux__) || defined(__GNU__) || defined(__HAIKU__)
#include <endian.h>
+#elif defined(_AIX)
+#include <sys/machine.h>
#else
#if !defined(BYTE_ORDER) && !defined(LLVM_ON_WIN32)
#include <machine/endian.h>
@@ -68,6 +70,11 @@ namespace sys {
///
/// \return - True on success.
bool getHostCPUFeatures(StringMap<bool> &Features);
+
+ /// Get the number of physical cores (as opposed to logical cores returned
+ /// from thread::hardware_concurrency(), which includes hyperthreads).
+ /// Returns -1 if unknown for the current host system.
+ int getHostNumPhysicalCores();
}
}
diff --git a/include/llvm/Support/MD5.h b/include/llvm/Support/MD5.h
index 42d8ca8a1ebb..eb181bfe8a5c 100644
--- a/include/llvm/Support/MD5.h
+++ b/include/llvm/Support/MD5.h
@@ -31,6 +31,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"
+#include <array>
namespace llvm {
template <typename T> class ArrayRef;
@@ -62,6 +63,9 @@ public:
/// deposited into \p Str. The result will be of length 32.
static void stringifyResult(MD5Result &Result, SmallString<32> &Str);
+ /// \brief Computes the hash for a given bytes.
+ static std::array<uint8_t, 16> hash(ArrayRef<uint8_t> Data);
+
private:
const uint8_t *body(ArrayRef<uint8_t> Data);
};
diff --git a/include/llvm/Support/MachO.def b/include/llvm/Support/MachO.def
index 9ca6440dd82b..57522897d0fc 100644
--- a/include/llvm/Support/MachO.def
+++ b/include/llvm/Support/MachO.def
@@ -15,27 +15,37 @@
HANDLE_LOAD_COMMAND(LC_SEGMENT, 0x00000001u, segment_command)
HANDLE_LOAD_COMMAND(LC_SYMTAB, 0x00000002u, symtab_command)
+// LC_SYMSEG is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_SYMSEG, 0x00000003u, symseg_command)
HANDLE_LOAD_COMMAND(LC_THREAD, 0x00000004u, thread_command)
HANDLE_LOAD_COMMAND(LC_UNIXTHREAD, 0x00000005u, thread_command)
+// LC_LOADFVMLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_LOADFVMLIB, 0x00000006u, fvmlib_command)
+// LC_IDFVMLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_IDFVMLIB, 0x00000007u, fvmlib_command)
+// LC_IDENT is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_IDENT, 0x00000008u, ident_command)
+// LC_FVMFILE is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_FVMFILE, 0x00000009u, fvmfile_command)
+// LC_PREPAGE is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_PREPAGE, 0x0000000Au, load_command)
HANDLE_LOAD_COMMAND(LC_DYSYMTAB, 0x0000000Bu, dysymtab_command)
HANDLE_LOAD_COMMAND(LC_LOAD_DYLIB, 0x0000000Cu, dylib_command)
HANDLE_LOAD_COMMAND(LC_ID_DYLIB, 0x0000000Du, dylib_command)
HANDLE_LOAD_COMMAND(LC_LOAD_DYLINKER, 0x0000000Eu, dylinker_command)
HANDLE_LOAD_COMMAND(LC_ID_DYLINKER, 0x0000000Fu, dylinker_command)
+// LC_PREBOUND_DYLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_PREBOUND_DYLIB, 0x00000010u, prebound_dylib_command)
HANDLE_LOAD_COMMAND(LC_ROUTINES, 0x00000011u, routines_command)
HANDLE_LOAD_COMMAND(LC_SUB_FRAMEWORK, 0x00000012u, sub_framework_command)
HANDLE_LOAD_COMMAND(LC_SUB_UMBRELLA, 0x00000013u, sub_umbrella_command)
HANDLE_LOAD_COMMAND(LC_SUB_CLIENT, 0x00000014u, sub_client_command)
HANDLE_LOAD_COMMAND(LC_SUB_LIBRARY, 0x00000015u, sub_library_command)
+// LC_TWOLEVEL_HINTS is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_TWOLEVEL_HINTS, 0x00000016u, twolevel_hints_command)
+// LC_PREBIND_CKSUM is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_PREBIND_CKSUM, 0x00000017u, prebind_cksum_command)
+// LC_LOAD_WEAK_DYLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_LOAD_WEAK_DYLIB, 0x80000018u, dylib_command)
HANDLE_LOAD_COMMAND(LC_SEGMENT_64, 0x00000019u, segment_command_64)
HANDLE_LOAD_COMMAND(LC_ROUTINES_64, 0x0000001Au, routines_command_64)
diff --git a/include/llvm/Support/MachO.h b/include/llvm/Support/MachO.h
index 9a03722d250f..2b23c0f86448 100644
--- a/include/llvm/Support/MachO.h
+++ b/include/llvm/Support/MachO.h
@@ -302,7 +302,7 @@ namespace llvm {
N_EXT = 0x01
};
- enum NListType {
+ enum NListType : uint8_t {
// Constants for the "n_type & N_TYPE" llvm::MachO::nlist and
// llvm::MachO::nlist_64
N_UNDF = 0x0u,
@@ -578,6 +578,7 @@ namespace llvm {
uint32_t header_addr;
};
+ // The fvmlib_command is obsolete and no longer supported.
struct fvmlib_command {
uint32_t cmd;
uint32_t cmdsize;
@@ -621,6 +622,7 @@ namespace llvm {
uint32_t sub_library;
};
+ // The prebound_dylib_command is obsolete and no longer supported.
struct prebound_dylib_command {
uint32_t cmd;
uint32_t cmdsize;
@@ -740,6 +742,7 @@ namespace llvm {
flags:8;
};
+ // The twolevel_hints_command is obsolete and no longer supported.
struct twolevel_hints_command {
uint32_t cmd;
uint32_t cmdsize;
@@ -747,11 +750,13 @@ namespace llvm {
uint32_t nhints;
};
+ // The twolevel_hints_command is obsolete and no longer supported.
struct twolevel_hint {
uint32_t isub_image:8,
itoc:24;
};
+ // The prebind_cksum_command is obsolete and no longer supported.
struct prebind_cksum_command {
uint32_t cmd;
uint32_t cmdsize;
@@ -835,6 +840,7 @@ namespace llvm {
uint32_t count;
};
+ // The symseg_command is obsolete and no longer supported.
struct symseg_command {
uint32_t cmd;
uint32_t cmdsize;
@@ -842,11 +848,13 @@ namespace llvm {
uint32_t size;
};
+ // The ident_command is obsolete and no longer supported.
struct ident_command {
uint32_t cmd;
uint32_t cmdsize;
};
+ // The fvmfile_command is obsolete and no longer supported.
struct fvmfile_command {
uint32_t cmd;
uint32_t cmdsize;
@@ -1268,12 +1276,14 @@ namespace llvm {
sys::swapByteOrder(C);
}
+ // The prebind_cksum_command is obsolete and no longer supported.
inline void swapStruct(prebind_cksum_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
sys::swapByteOrder(C.cksum);
}
+ // The twolevel_hints_command is obsolete and no longer supported.
inline void swapStruct(twolevel_hints_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
@@ -1281,6 +1291,7 @@ namespace llvm {
sys::swapByteOrder(C.nhints);
}
+ // The prebound_dylib_command is obsolete and no longer supported.
inline void swapStruct(prebound_dylib_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
@@ -1289,6 +1300,7 @@ namespace llvm {
sys::swapByteOrder(C.linked_modules);
}
+ // The fvmfile_command is obsolete and no longer supported.
inline void swapStruct(fvmfile_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
@@ -1296,6 +1308,7 @@ namespace llvm {
sys::swapByteOrder(C.header_addr);
}
+ // The symseg_command is obsolete and no longer supported.
inline void swapStruct(symseg_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
@@ -1303,6 +1316,7 @@ namespace llvm {
sys::swapByteOrder(C.size);
}
+ // The ident_command is obsolete and no longer supported.
inline void swapStruct(ident_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
@@ -1314,6 +1328,7 @@ namespace llvm {
sys::swapByteOrder(C.header_addr);
}
+ // The fvmlib_command is obsolete and no longer supported.
inline void swapStruct(fvmlib_command &C) {
sys::swapByteOrder(C.cmd);
sys::swapByteOrder(C.cmdsize);
@@ -1710,6 +1725,204 @@ namespace llvm {
const uint32_t x86_EXCEPTION_STATE_COUNT =
sizeof(x86_exception_state_t) / sizeof(uint32_t);
+ struct arm_thread_state32_t {
+ uint32_t r[13];
+ uint32_t sp;
+ uint32_t lr;
+ uint32_t pc;
+ uint32_t cpsr;
+ };
+
+ inline void swapStruct(arm_thread_state32_t &x) {
+ for (int i = 0; i < 13; i++)
+ sys::swapByteOrder(x.r[i]);
+ sys::swapByteOrder(x.sp);
+ sys::swapByteOrder(x.lr);
+ sys::swapByteOrder(x.pc);
+ sys::swapByteOrder(x.cpsr);
+ }
+
+ struct arm_thread_state64_t {
+ uint64_t x[29];
+ uint64_t fp;
+ uint64_t lr;
+ uint64_t sp;
+ uint64_t pc;
+ uint32_t cpsr;
+ uint32_t pad;
+ };
+
+ inline void swapStruct(arm_thread_state64_t &x) {
+ for (int i = 0; i < 29; i++)
+ sys::swapByteOrder(x.x[i]);
+ sys::swapByteOrder(x.fp);
+ sys::swapByteOrder(x.lr);
+ sys::swapByteOrder(x.sp);
+ sys::swapByteOrder(x.pc);
+ sys::swapByteOrder(x.cpsr);
+ }
+
+ struct arm_state_hdr_t {
+ uint32_t flavor;
+ uint32_t count;
+ };
+
+ struct arm_thread_state_t {
+ arm_state_hdr_t tsh;
+ union {
+ arm_thread_state32_t ts32;
+ } uts;
+ };
+
+ inline void swapStruct(arm_state_hdr_t &x) {
+ sys::swapByteOrder(x.flavor);
+ sys::swapByteOrder(x.count);
+ }
+
+ enum ARMThreadFlavors {
+ ARM_THREAD_STATE = 1,
+ ARM_VFP_STATE = 2,
+ ARM_EXCEPTION_STATE = 3,
+ ARM_DEBUG_STATE = 4,
+ ARN_THREAD_STATE_NONE = 5,
+ ARM_THREAD_STATE64 = 6,
+ ARM_EXCEPTION_STATE64 = 7
+ };
+
+ inline void swapStruct(arm_thread_state_t &x) {
+ swapStruct(x.tsh);
+ if (x.tsh.flavor == ARM_THREAD_STATE)
+ swapStruct(x.uts.ts32);
+ }
+
+ const uint32_t ARM_THREAD_STATE_COUNT =
+ sizeof(arm_thread_state32_t) / sizeof(uint32_t);
+
+ const uint32_t ARM_THREAD_STATE64_COUNT =
+ sizeof(arm_thread_state64_t) / sizeof(uint32_t);
+
+ struct ppc_thread_state32_t {
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t r13;
+ uint32_t r14;
+ uint32_t r15;
+ uint32_t r16;
+ uint32_t r17;
+ uint32_t r18;
+ uint32_t r19;
+ uint32_t r20;
+ uint32_t r21;
+ uint32_t r22;
+ uint32_t r23;
+ uint32_t r24;
+ uint32_t r25;
+ uint32_t r26;
+ uint32_t r27;
+ uint32_t r28;
+ uint32_t r29;
+ uint32_t r30;
+ uint32_t r31;
+ uint32_t ct;
+ uint32_t xer;
+ uint32_t lr;
+ uint32_t ctr;
+ uint32_t mq;
+ uint32_t vrsave;
+ };
+
+ inline void swapStruct(ppc_thread_state32_t &x) {
+ sys::swapByteOrder(x.srr0);
+ sys::swapByteOrder(x.srr1);
+ sys::swapByteOrder(x.r0);
+ sys::swapByteOrder(x.r1);
+ sys::swapByteOrder(x.r2);
+ sys::swapByteOrder(x.r3);
+ sys::swapByteOrder(x.r4);
+ sys::swapByteOrder(x.r5);
+ sys::swapByteOrder(x.r6);
+ sys::swapByteOrder(x.r7);
+ sys::swapByteOrder(x.r8);
+ sys::swapByteOrder(x.r9);
+ sys::swapByteOrder(x.r10);
+ sys::swapByteOrder(x.r11);
+ sys::swapByteOrder(x.r12);
+ sys::swapByteOrder(x.r13);
+ sys::swapByteOrder(x.r14);
+ sys::swapByteOrder(x.r15);
+ sys::swapByteOrder(x.r16);
+ sys::swapByteOrder(x.r17);
+ sys::swapByteOrder(x.r18);
+ sys::swapByteOrder(x.r19);
+ sys::swapByteOrder(x.r20);
+ sys::swapByteOrder(x.r21);
+ sys::swapByteOrder(x.r22);
+ sys::swapByteOrder(x.r23);
+ sys::swapByteOrder(x.r24);
+ sys::swapByteOrder(x.r25);
+ sys::swapByteOrder(x.r26);
+ sys::swapByteOrder(x.r27);
+ sys::swapByteOrder(x.r28);
+ sys::swapByteOrder(x.r29);
+ sys::swapByteOrder(x.r30);
+ sys::swapByteOrder(x.r31);
+ sys::swapByteOrder(x.ct);
+ sys::swapByteOrder(x.xer);
+ sys::swapByteOrder(x.lr);
+ sys::swapByteOrder(x.ctr);
+ sys::swapByteOrder(x.mq);
+ sys::swapByteOrder(x.vrsave);
+ }
+
+ struct ppc_state_hdr_t {
+ uint32_t flavor;
+ uint32_t count;
+ };
+
+ struct ppc_thread_state_t {
+ ppc_state_hdr_t tsh;
+ union {
+ ppc_thread_state32_t ts32;
+ } uts;
+ };
+
+ inline void swapStruct(ppc_state_hdr_t &x) {
+ sys::swapByteOrder(x.flavor);
+ sys::swapByteOrder(x.count);
+ }
+
+ enum PPCThreadFlavors {
+ PPC_THREAD_STATE = 1,
+ PPC_FLOAT_STATE = 2,
+ PPC_EXCEPTION_STATE = 3,
+ PPC_VECTOR_STATE = 4,
+ PPC_THREAD_STATE64 = 5,
+ PPC_EXCEPTION_STATE64 = 6,
+ PPC_THREAD_STATE_NONE = 7
+ };
+
+ inline void swapStruct(ppc_thread_state_t &x) {
+ swapStruct(x.tsh);
+ if (x.tsh.flavor == PPC_THREAD_STATE)
+ swapStruct(x.uts.ts32);
+ }
+
+ const uint32_t PPC_THREAD_STATE_COUNT =
+ sizeof(ppc_thread_state32_t) / sizeof(uint32_t);
+
// Define a union of all load command structs
#define LOAD_COMMAND_STRUCT(LCStruct) LCStruct LCStruct##_data;
diff --git a/include/llvm/Support/ManagedStatic.h b/include/llvm/Support/ManagedStatic.h
index ec8154b828e5..7ce86eee95d2 100644
--- a/include/llvm/Support/ManagedStatic.h
+++ b/include/llvm/Support/ManagedStatic.h
@@ -46,6 +46,7 @@ protected:
mutable const ManagedStaticBase *Next;
void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const;
+
public:
/// isConstructed - Return true if this object has not been created yet.
bool isConstructed() const { return Ptr != nullptr; }
@@ -89,10 +90,10 @@ void llvm_shutdown();
/// llvm_shutdown_obj - This is a simple helper class that calls
/// llvm_shutdown() when it is destroyed.
struct llvm_shutdown_obj {
- llvm_shutdown_obj() { }
+ llvm_shutdown_obj() = default;
~llvm_shutdown_obj() { llvm_shutdown(); }
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_SUPPORT_MANAGEDSTATIC_H
diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h
index 5c816ac9df92..77970f487112 100644
--- a/include/llvm/Support/MathExtras.h
+++ b/include/llvm/Support/MathExtras.h
@@ -245,44 +245,40 @@ T reverseBits(T Val) {
// ambiguity.
/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
-inline uint32_t Hi_32(uint64_t Value) {
+constexpr inline uint32_t Hi_32(uint64_t Value) {
return static_cast<uint32_t>(Value >> 32);
}
/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
-inline uint32_t Lo_32(uint64_t Value) {
+constexpr inline uint32_t Lo_32(uint64_t Value) {
return static_cast<uint32_t>(Value);
}
/// Make_64 - This functions makes a 64-bit integer from a high / low pair of
/// 32-bit integers.
-inline uint64_t Make_64(uint32_t High, uint32_t Low) {
+constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
return ((uint64_t)High << 32) | (uint64_t)Low;
}
/// isInt - Checks if an integer fits into the given bit width.
-template<unsigned N>
-inline bool isInt(int64_t x) {
+template <unsigned N> constexpr inline bool isInt(int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
// Template specializations to get better code for common cases.
-template<>
-inline bool isInt<8>(int64_t x) {
+template <> constexpr inline bool isInt<8>(int64_t x) {
return static_cast<int8_t>(x) == x;
}
-template<>
-inline bool isInt<16>(int64_t x) {
+template <> constexpr inline bool isInt<16>(int64_t x) {
return static_cast<int16_t>(x) == x;
}
-template<>
-inline bool isInt<32>(int64_t x) {
+template <> constexpr inline bool isInt<32>(int64_t x) {
return static_cast<int32_t>(x) == x;
}
/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
/// left by S.
-template<unsigned N, unsigned S>
-inline bool isShiftedInt(int64_t x) {
+template <unsigned N, unsigned S>
+constexpr inline bool isShiftedInt(int64_t x) {
static_assert(
N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
@@ -290,29 +286,39 @@ inline bool isShiftedInt(int64_t x) {
}
/// isUInt - Checks if an unsigned integer fits into the given bit width.
-template<unsigned N>
-inline bool isUInt(uint64_t x) {
- static_assert(N > 0, "isUInt<0> doesn't make sense.");
- return N >= 64 || x < (UINT64_C(1)<<(N));
+///
+/// This is written as two functions rather than as simply
+///
+/// return N >= 64 || X < (UINT64_C(1) << N);
+///
+/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
+/// left too many places.
+template <unsigned N>
+constexpr inline typename std::enable_if<(N < 64), bool>::type
+isUInt(uint64_t X) {
+ static_assert(N > 0, "isUInt<0> doesn't make sense");
+ return X < (UINT64_C(1) << (N));
+}
+template <unsigned N>
+constexpr inline typename std::enable_if<N >= 64, bool>::type
+isUInt(uint64_t X) {
+ return true;
}
// Template specializations to get better code for common cases.
-template<>
-inline bool isUInt<8>(uint64_t x) {
+template <> constexpr inline bool isUInt<8>(uint64_t x) {
return static_cast<uint8_t>(x) == x;
}
-template<>
-inline bool isUInt<16>(uint64_t x) {
+template <> constexpr inline bool isUInt<16>(uint64_t x) {
return static_cast<uint16_t>(x) == x;
}
-template<>
-inline bool isUInt<32>(uint64_t x) {
+template <> constexpr inline bool isUInt<32>(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
/// Checks if a unsigned integer is an N bit number shifted left by S.
-template<unsigned N, unsigned S>
-inline bool isShiftedUInt(uint64_t x) {
+template <unsigned N, unsigned S>
+constexpr inline bool isShiftedUInt(uint64_t x) {
static_assert(
N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
static_assert(N + S <= 64,
@@ -364,39 +370,39 @@ inline bool isIntN(unsigned N, int64_t x) {
/// isMask_32 - This function returns true if the argument is a non-empty
/// sequence of ones starting at the least significant bit with the remainder
/// zero (32 bit version). Ex. isMask_32(0x0000FFFFU) == true.
-inline bool isMask_32(uint32_t Value) {
+constexpr inline bool isMask_32(uint32_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
/// isMask_64 - This function returns true if the argument is a non-empty
/// sequence of ones starting at the least significant bit with the remainder
/// zero (64 bit version).
-inline bool isMask_64(uint64_t Value) {
+constexpr inline bool isMask_64(uint64_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
/// isShiftedMask_32 - This function returns true if the argument contains a
/// non-empty sequence of ones with the remainder zero (32 bit version.)
/// Ex. isShiftedMask_32(0x0000FF00U) == true.
-inline bool isShiftedMask_32(uint32_t Value) {
+constexpr inline bool isShiftedMask_32(uint32_t Value) {
return Value && isMask_32((Value - 1) | Value);
}
/// isShiftedMask_64 - This function returns true if the argument contains a
/// non-empty sequence of ones with the remainder zero (64 bit version.)
-inline bool isShiftedMask_64(uint64_t Value) {
+constexpr inline bool isShiftedMask_64(uint64_t Value) {
return Value && isMask_64((Value - 1) | Value);
}
/// isPowerOf2_32 - This function returns true if the argument is a power of
/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
-inline bool isPowerOf2_32(uint32_t Value) {
+constexpr inline bool isPowerOf2_32(uint32_t Value) {
return Value && !(Value & (Value - 1));
}
/// isPowerOf2_64 - This function returns true if the argument is a power of two
/// > 0 (64 bit edition.)
-inline bool isPowerOf2_64(uint64_t Value) {
+constexpr inline bool isPowerOf2_64(uint64_t Value) {
return Value && !(Value & (Value - int64_t(1L)));
}
@@ -541,23 +547,19 @@ inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
/// BitsToDouble - This function takes a 64-bit integer and returns the bit
/// equivalent double.
inline double BitsToDouble(uint64_t Bits) {
- union {
- uint64_t L;
- double D;
- } T;
- T.L = Bits;
- return T.D;
+ double D;
+ static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
+ memcpy(&D, &Bits, sizeof(Bits));
+ return D;
}
/// BitsToFloat - This function takes a 32-bit integer and returns the bit
/// equivalent float.
inline float BitsToFloat(uint32_t Bits) {
- union {
- uint32_t I;
- float F;
- } T;
- T.I = Bits;
- return T.F;
+ float F;
+ static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
+ memcpy(&F, &Bits, sizeof(Bits));
+ return F;
}
/// DoubleToBits - This function takes a double and returns the bit
@@ -565,12 +567,10 @@ inline float BitsToFloat(uint32_t Bits) {
/// changes the bits of NaNs on some hosts, notably x86, so this
/// routine cannot be used if these bits are needed.
inline uint64_t DoubleToBits(double Double) {
- union {
- uint64_t L;
- double D;
- } T;
- T.D = Double;
- return T.L;
+ uint64_t Bits;
+ static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
+ memcpy(&Bits, &Double, sizeof(Double));
+ return Bits;
}
/// FloatToBits - This function takes a float and returns the bit
@@ -578,17 +578,15 @@ inline uint64_t DoubleToBits(double Double) {
/// changes the bits of NaNs on some hosts, notably x86, so this
/// routine cannot be used if these bits are needed.
inline uint32_t FloatToBits(float Float) {
- union {
- uint32_t I;
- float F;
- } T;
- T.F = Float;
- return T.I;
+ uint32_t Bits;
+ static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
+ memcpy(&Bits, &Float, sizeof(Float));
+ return Bits;
}
/// MinAlign - A and B are either alignments or offsets. Return the minimum
/// alignment that may be assumed after adding the two together.
-inline uint64_t MinAlign(uint64_t A, uint64_t B) {
+constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
// The largest power of 2 that divides both A and B.
//
// Replace "-Value" by "1+~Value" in the following commented code to avoid
@@ -635,6 +633,14 @@ inline uint64_t PowerOf2Floor(uint64_t A) {
return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
}
+/// Returns the power of two which is greater than or equal to the given value.
+/// Essentially, it is a ceil operation across the domain of powers of two.
+inline uint64_t PowerOf2Ceil(uint64_t A) {
+ if (!A)
+ return 0;
+ return NextPowerOf2(A - 1);
+}
+
/// Returns the next integer (mod 2**64) that is greater than or equal to
/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
@@ -656,13 +662,35 @@ inline uint64_t PowerOf2Floor(uint64_t A) {
/// alignTo(321, 255, 42) = 552
/// \endcode
inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
+ assert(Align != 0u && "Align can't be 0.");
Skew %= Align;
return (Value + Align - 1 - Skew) / Align * Align + Skew;
}
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
+template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
+ static_assert(Align != 0u, "Align must be non-zero");
+ return (Value + Align - 1) / Align * Align;
+}
+
+/// \c alignTo for contexts where a constant expression is required.
+/// \sa alignTo
+///
+/// \todo FIXME: remove when \c constexpr becomes really \c constexpr
+template <uint64_t Align>
+struct AlignTo {
+ static_assert(Align != 0u, "Align must be non-zero");
+ template <uint64_t Value>
+ struct from_value {
+ static const uint64_t value = (Value + Align - 1) / Align * Align;
+ };
+};
+
/// Returns the largest uint64_t less than or equal to \p Value and is
/// \p Skew mod \p Align. \p Align must be non-zero
inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
+ assert(Align != 0u && "Align can't be 0.");
Skew %= Align;
return (Value - Skew) / Align * Align + Skew;
}
@@ -676,7 +704,7 @@ inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
/// Requires 0 < B <= 32.
-template <unsigned B> inline int32_t SignExtend32(uint32_t X) {
+template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
static_assert(B > 0, "Bit width can't be 0.");
static_assert(B <= 32, "Bit width out of range.");
return int32_t(X << (32 - B)) >> (32 - B);
@@ -692,7 +720,7 @@ inline int32_t SignExtend32(uint32_t X, unsigned B) {
/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
/// Requires 0 < B < 64.
-template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
+template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
static_assert(B > 0, "Bit width can't be 0.");
static_assert(B <= 64, "Bit width out of range.");
return int64_t(x << (64 - B)) >> (64 - B);
diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h
index 73d643537a6f..f739d19907b0 100644
--- a/include/llvm/Support/MemoryBuffer.h
+++ b/include/llvm/Support/MemoryBuffer.h
@@ -14,13 +14,17 @@
#ifndef LLVM_SUPPORT_MEMORYBUFFER_H
#define LLVM_SUPPORT_MEMORYBUFFER_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/CBindingWrapping.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm-c/Types.h"
#include <memory>
+#include <cstddef>
+#include <cstdint>
namespace llvm {
+
class MemoryBufferRef;
/// This interface provides simple read-only access to a block of memory, and
@@ -37,13 +41,15 @@ class MemoryBuffer {
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
- MemoryBuffer(const MemoryBuffer &) = delete;
- MemoryBuffer &operator=(const MemoryBuffer &) = delete;
+
protected:
- MemoryBuffer() {}
+ MemoryBuffer() = default;
+
void init(const char *BufStart, const char *BufEnd,
bool RequiresNullTerminator);
public:
+ MemoryBuffer(const MemoryBuffer &) = delete;
+ MemoryBuffer &operator=(const MemoryBuffer &) = delete;
virtual ~MemoryBuffer();
const char *getBufferStart() const { return BufferStart; }
@@ -56,9 +62,7 @@ public:
/// Return an identifier for this buffer, typically the filename it was read
/// from.
- virtual const char *getBufferIdentifier() const {
- return "Unknown buffer";
- }
+ virtual StringRef getBufferIdentifier() const { return "Unknown buffer"; }
/// Open the specified file as a MemoryBuffer, returning a new MemoryBuffer
/// if successful, otherwise returning null. If FileSize is specified, this
@@ -72,6 +76,12 @@ public:
getFile(const Twine &Filename, int64_t FileSize = -1,
bool RequiresNullTerminator = true, bool IsVolatileSize = false);
+ /// Read all of the specified file into a MemoryBuffer as a stream
+ /// (i.e. until EOF reached). This is useful for special files that
+ /// look like a regular file but have 0 size (e.g. /proc/cpuinfo on Linux).
+ static ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getFileAsStream(const Twine &Filename);
+
/// Given an already-open file descriptor, map some slice of it into a
/// MemoryBuffer. The slice is specified by an \p Offset and \p MapSize.
/// Since this is in the middle of a file, the buffer is not null terminated.
@@ -150,7 +160,7 @@ class MemoryBufferRef {
StringRef Identifier;
public:
- MemoryBufferRef() {}
+ MemoryBufferRef() = default;
MemoryBufferRef(MemoryBuffer& Buffer)
: Buffer(Buffer.getBuffer()), Identifier(Buffer.getBufferIdentifier()) {}
MemoryBufferRef(StringRef Buffer, StringRef Identifier)
@@ -170,4 +180,4 @@ DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MemoryBuffer, LLVMMemoryBufferRef)
} // end namespace llvm
-#endif
+#endif // LLVM_SUPPORT_MEMORYBUFFER_H
diff --git a/include/llvm/Support/MemoryObject.h b/include/llvm/Support/MemoryObject.h
deleted file mode 100644
index e0c8749da346..000000000000
--- a/include/llvm/Support/MemoryObject.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//===- MemoryObject.h - Abstract memory interface ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_MEMORYOBJECT_H
-#define LLVM_SUPPORT_MEMORYOBJECT_H
-
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-/// Interface to data which might be streamed. Streamability has 2 important
-/// implications/restrictions. First, the data might not yet exist in memory
-/// when the request is made. This just means that readByte/readBytes might have
-/// to block or do some work to get it. More significantly, the exact size of
-/// the object might not be known until it has all been fetched. This means that
-/// to return the right result, getExtent must also wait for all the data to
-/// arrive; therefore it should not be called on objects which are actually
-/// streamed (this would defeat the purpose of streaming). Instead,
-/// isValidAddress can be used to test addresses without knowing the exact size
-/// of the stream. Finally, getPointer can be used instead of readBytes to avoid
-/// extra copying.
-class MemoryObject {
-public:
- virtual ~MemoryObject();
-
- /// Returns the size of the region in bytes. (The region is contiguous, so
- /// the highest valid address of the region is getExtent() - 1).
- ///
- /// @result - The size of the region.
- virtual uint64_t getExtent() const = 0;
-
- /// Tries to read a contiguous range of bytes from the region, up to the end
- /// of the region.
- ///
- /// @param Buf - A pointer to a buffer to be filled in. Must be non-NULL
- /// and large enough to hold size bytes.
- /// @param Size - The number of bytes to copy.
- /// @param Address - The address of the first byte, in the same space as
- /// getBase().
- /// @result - The number of bytes read.
- virtual uint64_t readBytes(uint8_t *Buf, uint64_t Size,
- uint64_t Address) const = 0;
-
- /// Ensures that the requested data is in memory, and returns a pointer to it.
- /// More efficient than using readBytes if the data is already in memory. May
- /// block until (address - base + size) bytes have been read
- /// @param address - address of the byte, in the same space as getBase()
- /// @param size - amount of data that must be available on return
- /// @result - valid pointer to the requested data
- virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const = 0;
-
- /// Returns true if the address is within the object (i.e. between base and
- /// base + extent - 1 inclusive). May block until (address - base) bytes have
- /// been read
- /// @param address - address of the byte, in the same space as getBase()
- /// @result - true if the address may be read with readByte()
- virtual bool isValidAddress(uint64_t address) const = 0;
-};
-
-}
-
-#endif
diff --git a/include/llvm/Support/NativeFormatting.h b/include/llvm/Support/NativeFormatting.h
new file mode 100644
index 000000000000..6d1dd7b422fe
--- /dev/null
+++ b/include/llvm/Support/NativeFormatting.h
@@ -0,0 +1,49 @@
+//===- NativeFormatting.h - Low level formatting helpers ---------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_NATIVE_FORMATTING_H
+#define LLVM_SUPPORT_NATIVE_FORMATTING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <cstdint>
+
+namespace llvm {
+enum class FloatStyle { Exponent, ExponentUpper, Fixed, Percent };
+enum class IntegerStyle {
+ Integer,
+ Number,
+};
+enum class HexPrintStyle { Upper, Lower, PrefixUpper, PrefixLower };
+
+size_t getDefaultPrecision(FloatStyle Style);
+
+bool isPrefixedHexStyle(HexPrintStyle S);
+
+void write_integer(raw_ostream &S, unsigned int N, size_t MinDigits,
+ IntegerStyle Style);
+void write_integer(raw_ostream &S, int N, size_t MinDigits, IntegerStyle Style);
+void write_integer(raw_ostream &S, unsigned long N, size_t MinDigits,
+ IntegerStyle Style);
+void write_integer(raw_ostream &S, long N, size_t MinDigits,
+ IntegerStyle Style);
+void write_integer(raw_ostream &S, unsigned long long N, size_t MinDigits,
+ IntegerStyle Style);
+void write_integer(raw_ostream &S, long long N, size_t MinDigits,
+ IntegerStyle Style);
+
+void write_hex(raw_ostream &S, uint64_t N, HexPrintStyle Style,
+ Optional<size_t> Width = None);
+void write_double(raw_ostream &S, double D, FloatStyle Style,
+ Optional<size_t> Precision = None);
+}
+
+#endif
+
diff --git a/include/llvm/Support/OnDiskHashTable.h b/include/llvm/Support/OnDiskHashTable.h
index c28fcabe78fc..e9c28daf03b9 100644
--- a/include/llvm/Support/OnDiskHashTable.h
+++ b/include/llvm/Support/OnDiskHashTable.h
@@ -14,7 +14,6 @@
#ifndef LLVM_SUPPORT_ONDISKHASHTABLE_H
#define LLVM_SUPPORT_ONDISKHASHTABLE_H
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/EndianStream.h"
@@ -208,7 +207,7 @@ public:
// Pad with zeros so that we can start the hashtable at an aligned address.
offset_type TableOff = Out.tell();
- uint64_t N = llvm::OffsetToAlignment(TableOff, alignOf<offset_type>());
+ uint64_t N = llvm::OffsetToAlignment(TableOff, alignof(offset_type));
TableOff += N;
while (N--)
LE.write<uint8_t>(0);
diff --git a/include/llvm/Support/Options.h b/include/llvm/Support/Options.h
index 7b61b2308f57..9019804d24e0 100644
--- a/include/llvm/Support/Options.h
+++ b/include/llvm/Support/Options.h
@@ -93,7 +93,7 @@ public:
/// option stores (\p ValT), the class that will read the option (\p Base),
/// and the member that the class will store the data into (\p Mem).
template <typename ValT, typename Base, ValT(Base::*Mem)>
- static void registerOption(const char *ArgStr, const char *Desc,
+ static void registerOption(StringRef ArgStr, StringRef Desc,
const ValT &InitValue) {
cl::opt<ValT> *Option = new cl::opt<ValT>(ArgStr, cl::desc(Desc),
cl::Hidden, cl::init(InitValue));
diff --git a/include/llvm/Support/Path.h b/include/llvm/Support/Path.h
index 853f0997571c..0513350d446b 100644
--- a/include/llvm/Support/Path.h
+++ b/include/llvm/Support/Path.h
@@ -445,7 +445,8 @@ StringRef remove_leading_dotslash(StringRef path);
/// @brief In-place remove any './' and optionally '../' components from a path.
///
/// @param path processed path
-/// @param remove_dot_dot specify if '../' should be removed
+/// @param remove_dot_dot specify if '../' (except for leading "../") should be
+/// removed
/// @result True if path was changed
bool remove_dots(SmallVectorImpl<char> &path, bool remove_dot_dot = false);
diff --git a/include/llvm/Support/PointerLikeTypeTraits.h b/include/llvm/Support/PointerLikeTypeTraits.h
index 96cdaed142c2..9ff894edbeb0 100644
--- a/include/llvm/Support/PointerLikeTypeTraits.h
+++ b/include/llvm/Support/PointerLikeTypeTraits.h
@@ -15,8 +15,8 @@
#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/DataTypes.h"
+#include <type_traits>
namespace llvm {
@@ -42,9 +42,7 @@ public:
static inline void *getAsVoidPointer(T *P) { return P; }
static inline T *getFromVoidPointer(void *P) { return static_cast<T *>(P); }
- enum {
- NumLowBitsAvailable = detail::ConstantLog2<AlignOf<T>::Alignment>::value
- };
+ enum { NumLowBitsAvailable = detail::ConstantLog2<alignof(T)>::value };
};
template <> class PointerLikeTypeTraits<void *> {
diff --git a/include/llvm/Support/PrettyStackTrace.h b/include/llvm/Support/PrettyStackTrace.h
index 62e3bbc0ddbc..4d64fe4ef727 100644
--- a/include/llvm/Support/PrettyStackTrace.h
+++ b/include/llvm/Support/PrettyStackTrace.h
@@ -16,6 +16,7 @@
#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H
#define LLVM_SUPPORT_PRETTYSTACKTRACE_H
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -55,6 +56,16 @@ namespace llvm {
void print(raw_ostream &OS) const override;
};
+ /// PrettyStackTraceFormat - This object prints a string (which may use
+ /// printf-style formatting but should not contain newlines) to the stream
+ /// as the stack trace when a crash occurs.
+ class PrettyStackTraceFormat : public PrettyStackTraceEntry {
+ llvm::SmallVector<char, 32> Str;
+ public:
+ PrettyStackTraceFormat(const char *Format, ...);
+ void print(raw_ostream &OS) const override;
+ };
+
/// PrettyStackTraceProgram - This object prints a specified program arguments
/// to the stream as the stack trace when a crash occurs.
class PrettyStackTraceProgram : public PrettyStackTraceEntry {
diff --git a/include/llvm/Support/Printable.h b/include/llvm/Support/Printable.h
index 83b8f0998ae6..28e875e8ff5e 100644
--- a/include/llvm/Support/Printable.h
+++ b/include/llvm/Support/Printable.h
@@ -21,7 +21,7 @@ namespace llvm {
class raw_ostream;
/// Simple wrapper around std::function<void(raw_ostream&)>.
-/// This class is usefull to construct print helpers for raw_ostream.
+/// This class is useful to construct print helpers for raw_ostream.
///
/// Example:
/// Printable PrintRegister(unsigned Register) {
diff --git a/include/llvm/Support/Process.h b/include/llvm/Support/Process.h
index 06fd0af10aa4..780c7e2ddd6f 100644
--- a/include/llvm/Support/Process.h
+++ b/include/llvm/Support/Process.h
@@ -28,8 +28,8 @@
#include "llvm/ADT/Optional.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Chrono.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/TimeValue.h"
#include <system_error>
namespace llvm {
@@ -55,13 +55,14 @@ public:
/// This static function will set \p user_time to the amount of CPU time
/// spent in user (non-kernel) mode and \p sys_time to the amount of CPU
/// time spent in system (kernel) mode. If the operating system does not
- /// support collection of these metrics, a zero TimeValue will be for both
+ /// support collection of these metrics, a zero duration will be for both
/// values.
- /// \param elapsed Returns the TimeValue::now() giving current time
+ /// \param elapsed Returns the system_clock::now() giving current time
/// \param user_time Returns the current amount of user time for the process
/// \param sys_time Returns the current amount of system time for the process
- static void GetTimeUsage(TimeValue &elapsed, TimeValue &user_time,
- TimeValue &sys_time);
+ static void GetTimeUsage(TimePoint<> &elapsed,
+ std::chrono::nanoseconds &user_time,
+ std::chrono::nanoseconds &sys_time);
/// This function makes the necessary calls to the operating system to
/// prevent core files or any other kind of large memory dumps that can
diff --git a/include/llvm/Support/RWMutex.h b/include/llvm/Support/RWMutex.h
index 4be931337765..e4736b8e24eb 100644
--- a/include/llvm/Support/RWMutex.h
+++ b/include/llvm/Support/RWMutex.h
@@ -18,10 +18,9 @@
#include "llvm/Support/Threading.h"
#include <cassert>
-namespace llvm
-{
- namespace sys
- {
+namespace llvm {
+namespace sys {
+
/// @brief Platform agnostic RWMutex class.
class RWMutexImpl
{
@@ -89,9 +88,11 @@ namespace llvm
template<bool mt_only>
class SmartRWMutex {
RWMutexImpl impl;
- unsigned readers, writers;
+ unsigned readers = 0;
+ unsigned writers = 0;
+
public:
- explicit SmartRWMutex() : impl(), readers(0), writers(0) { }
+ explicit SmartRWMutex() = default;
bool lock_shared() {
if (!mt_only || llvm_is_multithreaded())
@@ -140,6 +141,7 @@ namespace llvm
SmartRWMutex(const SmartRWMutex<mt_only> & original);
void operator=(const SmartRWMutex<mt_only> &);
};
+
typedef SmartRWMutex<false> RWMutex;
/// ScopedReader - RAII acquisition of a reader lock
@@ -155,6 +157,7 @@ namespace llvm
mutex.unlock_shared();
}
};
+
typedef SmartScopedReader<false> ScopedReader;
/// ScopedWriter - RAII acquisition of a writer lock
@@ -170,8 +173,10 @@ namespace llvm
mutex.unlock();
}
};
+
typedef SmartScopedWriter<false> ScopedWriter;
- }
-}
-#endif
+} // end namespace sys
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_RWMUTEX_H
diff --git a/include/llvm/Support/RandomNumberGenerator.h b/include/llvm/Support/RandomNumberGenerator.h
index f146e350fe62..1399dab815f8 100644
--- a/include/llvm/Support/RandomNumberGenerator.h
+++ b/include/llvm/Support/RandomNumberGenerator.h
@@ -19,6 +19,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h" // Needed for uint64_t on Windows.
#include <random>
+#include <system_error>
namespace llvm {
class StringRef;
@@ -30,9 +31,21 @@ class StringRef;
/// Module::createRNG to create a new RNG instance for use with that
/// module.
class RandomNumberGenerator {
+
+ // 64-bit Mersenne Twister by Matsumoto and Nishimura, 2000
+ // http://en.cppreference.com/w/cpp/numeric/random/mersenne_twister_engine
+ // This RNG is deterministically portable across C++11
+ // implementations.
+ using generator_type = std::mt19937_64;
+
public:
+ using result_type = generator_type::result_type;
+
/// Returns a random number in the range [0, Max).
- uint_fast64_t operator()();
+ result_type operator()();
+
+ static constexpr result_type min() { return generator_type::min(); }
+ static constexpr result_type max() { return generator_type::max(); }
private:
/// Seeds and salts the underlying RNG engine.
@@ -41,11 +54,7 @@ private:
/// Module::createRNG to create a new RNG salted with the Module ID.
RandomNumberGenerator(StringRef Salt);
- // 64-bit Mersenne Twister by Matsumoto and Nishimura, 2000
- // http://en.cppreference.com/w/cpp/numeric/random/mersenne_twister_engine
- // This RNG is deterministically portable across C++11
- // implementations.
- std::mt19937_64 Generator;
+ generator_type Generator;
// Noncopyable.
RandomNumberGenerator(const RandomNumberGenerator &other) = delete;
@@ -53,6 +62,9 @@ private:
friend class Module;
};
+
+// Get random vector of specified size
+std::error_code getRandomBytes(void *Buffer, size_t Size);
}
#endif
diff --git a/include/llvm/Support/Recycler.h b/include/llvm/Support/Recycler.h
index a38050d81903..1523aad38d46 100644
--- a/include/llvm/Support/Recycler.h
+++ b/include/llvm/Support/Recycler.h
@@ -16,7 +16,6 @@
#define LLVM_SUPPORT_RECYCLER_H
#include "llvm/ADT/ilist.h"
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
@@ -32,7 +31,7 @@ void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize);
/// and facilitates reusing deallocated memory in place of allocating
/// new memory.
///
-template<class T, size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment>
+template <class T, size_t Size = sizeof(T), size_t Align = alignof(T)>
class Recycler {
struct FreeNode {
FreeNode *Next;
@@ -80,7 +79,7 @@ public:
template<class SubClass, class AllocatorType>
SubClass *Allocate(AllocatorType &Allocator) {
- static_assert(AlignOf<SubClass>::Alignment <= Align,
+ static_assert(alignof(SubClass) <= Align,
"Recycler allocation alignment is less than object align!");
static_assert(sizeof(SubClass) <= Size,
"Recycler allocation size is less than object size!");
diff --git a/include/llvm/Support/RecyclingAllocator.h b/include/llvm/Support/RecyclingAllocator.h
index 001d1cf7c3df..32b033b17946 100644
--- a/include/llvm/Support/RecyclingAllocator.h
+++ b/include/llvm/Support/RecyclingAllocator.h
@@ -22,8 +22,8 @@ namespace llvm {
/// RecyclingAllocator - This class wraps an Allocator, adding the
/// functionality of recycling deleted objects.
///
-template<class AllocatorType, class T,
- size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment>
+template <class AllocatorType, class T, size_t Size = sizeof(T),
+ size_t Align = alignof(T)>
class RecyclingAllocator {
private:
/// Base - Implementation details.
diff --git a/include/llvm/Support/Regex.h b/include/llvm/Support/Regex.h
index 31b35ed0cad6..83db80359ee2 100644
--- a/include/llvm/Support/Regex.h
+++ b/include/llvm/Support/Regex.h
@@ -43,6 +43,7 @@ namespace llvm {
BasicRegex=4
};
+ Regex();
/// Compiles the given regular expression \p Regex.
Regex(StringRef Regex, unsigned Flags = NoFlags);
Regex(const Regex &) = delete;
@@ -51,11 +52,7 @@ namespace llvm {
std::swap(error, regex.error);
return *this;
}
- Regex(Regex &&regex) {
- preg = regex.preg;
- error = regex.error;
- regex.preg = nullptr;
- }
+ Regex(Regex &&regex);
~Regex();
/// isValid - returns the error encountered during regex compilation, or
diff --git a/include/llvm/Support/Registry.h b/include/llvm/Support/Registry.h
index 27f025fcd080..02fd5b9354a1 100644
--- a/include/llvm/Support/Registry.h
+++ b/include/llvm/Support/Registry.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_REGISTRY_H
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DynamicLibrary.h"
@@ -25,16 +26,15 @@ namespace llvm {
/// no-argument constructor.
template <typename T>
class SimpleRegistryEntry {
- const char *Name, *Desc;
+ StringRef Name, Desc;
std::unique_ptr<T> (*Ctor)();
public:
- SimpleRegistryEntry(const char *N, const char *D, std::unique_ptr<T> (*C)())
- : Name(N), Desc(D), Ctor(C)
- {}
+ SimpleRegistryEntry(StringRef N, StringRef D, std::unique_ptr<T> (*C)())
+ : Name(N), Desc(D), Ctor(C) {}
- const char *getName() const { return Name; }
- const char *getDesc() const { return Desc; }
+ StringRef getName() const { return Name; }
+ StringRef getDesc() const { return Desc; }
std::unique_ptr<T> instantiate() const { return Ctor(); }
};
@@ -44,6 +44,7 @@ namespace llvm {
template <typename T>
class Registry {
public:
+ typedef T type;
typedef SimpleRegistryEntry<T> entry;
class node;
@@ -69,13 +70,14 @@ namespace llvm {
node(const entry &V) : Next(nullptr), Val(V) {}
};
- static void add_node(node *N) {
- if (Tail)
- Tail->Next = N;
- else
- Head = N;
- Tail = N;
- }
+ /// Add a node to the Registry: this is the interface between the plugin and
+ /// the executable.
+ ///
+ /// This function is exported by the executable and called by the plugin to
+ /// add a node to the executable's registry. Therefore it's not defined here
+ /// to avoid it being instantiated in the plugin and is instead defined in
+ /// the executable (see LLVM_INSTANTIATE_REGISTRY below).
+ static void add_node(node *N);
/// Iterators for registry entries.
///
@@ -92,7 +94,9 @@ namespace llvm {
const entry *operator->() const { return &Cur->Val; }
};
- static iterator begin() { return iterator(Head); }
+ // begin is not defined here in order to avoid usage of an undefined static
+ // data member, instead it's instantiated by LLVM_INSTANTIATE_REGISTRY.
+ static iterator begin();
static iterator end() { return iterator(nullptr); }
static iterator_range<iterator> entries() {
@@ -115,66 +119,42 @@ namespace llvm {
static std::unique_ptr<T> CtorFn() { return make_unique<V>(); }
public:
- Add(const char *Name, const char *Desc)
+ Add(StringRef Name, StringRef Desc)
: Entry(Name, Desc, CtorFn), Node(Entry) {
add_node(&Node);
}
};
-
- /// A dynamic import facility. This is used on Windows to
- /// import the entries added in the plugin.
- static void import(sys::DynamicLibrary &DL, const char *RegistryName) {
- typedef void *(*GetRegistry)();
- std::string Name("LLVMGetRegistry_");
- Name.append(RegistryName);
- GetRegistry Getter =
- (GetRegistry)(intptr_t)DL.getAddressOfSymbol(Name.c_str());
- if (Getter) {
- // Call the getter function in order to get the full copy of the
- // registry defined in the plugin DLL, and copy them over to the
- // current Registry.
- typedef std::pair<const node *, const node *> Info;
- Info *I = static_cast<Info *>(Getter());
- iterator begin(I->first);
- iterator end(I->second);
- for (++end; begin != end; ++begin) {
- // This Node object needs to remain alive for the
- // duration of the program.
- add_node(new node(*begin));
- }
- }
- }
-
- /// Retrieve the data to be passed across DLL boundaries when
- /// importing registries from another DLL on Windows.
- static void *exportRegistry() {
- static std::pair<const node *, const node *> Info(Head, Tail);
- return &Info;
- }
};
-
-
- // Since these are defined in a header file, plugins must be sure to export
- // these symbols.
- template <typename T>
- typename Registry<T>::node *Registry<T>::Head;
-
- template <typename T>
- typename Registry<T>::node *Registry<T>::Tail;
} // end namespace llvm
-#ifdef LLVM_ON_WIN32
-#define LLVM_EXPORT_REGISTRY(REGISTRY_CLASS) \
- extern "C" { \
- __declspec(dllexport) void *__cdecl LLVMGetRegistry_##REGISTRY_CLASS() { \
- return REGISTRY_CLASS::exportRegistry(); \
- } \
+/// Instantiate a registry class.
+///
+/// This provides template definitions of add_node, begin, and the Head and Tail
+/// pointers, then explicitly instantiates them. We could explicitly specialize
+/// them, instead of the two-step process of define then instantiate, but
+/// strictly speaking that's not allowed by the C++ standard (we would need to
+/// have explicit specialization declarations in all translation units where the
+/// specialization is used) so we don't.
+#define LLVM_INSTANTIATE_REGISTRY(REGISTRY_CLASS) \
+ namespace llvm { \
+ template<typename T> typename Registry<T>::node *Registry<T>::Head = nullptr;\
+ template<typename T> typename Registry<T>::node *Registry<T>::Tail = nullptr;\
+ template<typename T> \
+ void Registry<T>::add_node(typename Registry<T>::node *N) { \
+ if (Tail) \
+ Tail->Next = N; \
+ else \
+ Head = N; \
+ Tail = N; \
+ } \
+ template<typename T> typename Registry<T>::iterator Registry<T>::begin() { \
+ return iterator(Head); \
+ } \
+ template REGISTRY_CLASS::node *Registry<REGISTRY_CLASS::type>::Head; \
+ template REGISTRY_CLASS::node *Registry<REGISTRY_CLASS::type>::Tail; \
+ template \
+ void Registry<REGISTRY_CLASS::type>::add_node(REGISTRY_CLASS::node*); \
+ template REGISTRY_CLASS::iterator Registry<REGISTRY_CLASS::type>::begin(); \
}
-#define LLVM_IMPORT_REGISTRY(REGISTRY_CLASS, DL) \
- REGISTRY_CLASS::import(DL, #REGISTRY_CLASS)
-#else
-#define LLVM_EXPORT_REGISTRY(REGISTRY_CLASS)
-#define LLVM_IMPORT_REGISTRY(REGISTRY_CLASS, DL)
-#endif
#endif // LLVM_SUPPORT_REGISTRY_H
diff --git a/include/llvm/Support/SHA1.h b/include/llvm/Support/SHA1.h
index 8347a713f272..1fc60a878f94 100644
--- a/include/llvm/Support/SHA1.h
+++ b/include/llvm/Support/SHA1.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
+#include <array>
#include <cstdint>
namespace llvm {
@@ -53,6 +54,9 @@ public:
/// made into update.
StringRef result();
+ /// Returns a raw 160-bit SHA1 hash for the given data.
+ static std::array<uint8_t, 20> hash(ArrayRef<uint8_t> Data);
+
private:
/// Define some constants.
/// "static constexpr" would be cleaner but MSVC does not support it yet.
@@ -61,7 +65,10 @@ private:
// Internal State
struct {
- uint32_t Buffer[BLOCK_LENGTH / 4];
+ union {
+ uint8_t C[BLOCK_LENGTH];
+ uint32_t L[BLOCK_LENGTH / 4];
+ } Buffer;
uint32_t State[HASH_LENGTH / 4];
uint32_t ByteCount;
uint8_t BufferOffset;
diff --git a/include/llvm/Support/SMLoc.h b/include/llvm/Support/SMLoc.h
index c6e9a14e82ac..eb3a1ba7db51 100644
--- a/include/llvm/Support/SMLoc.h
+++ b/include/llvm/Support/SMLoc.h
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_SMLOC_H
#define LLVM_SUPPORT_SMLOC_H
+#include "llvm/ADT/None.h"
#include <cassert>
namespace llvm {
@@ -49,7 +50,8 @@ class SMRange {
public:
SMLoc Start, End;
- SMRange() {}
+ SMRange() = default;
+ SMRange(NoneType) {}
SMRange(SMLoc St, SMLoc En) : Start(St), End(En) {
assert(Start.isValid() == End.isValid() &&
"Start and end should either both be valid or both be invalid!");
@@ -60,4 +62,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_SUPPORT_SMLOC_H
diff --git a/include/llvm/Support/SourceMgr.h b/include/llvm/Support/SourceMgr.h
index 1f8b1a01865f..bc7478e0d703 100644
--- a/include/llvm/Support/SourceMgr.h
+++ b/include/llvm/Support/SourceMgr.h
@@ -51,11 +51,6 @@ private:
/// This is the location of the parent include, or null if at the top level.
SMLoc IncludeLoc;
-
- SrcBuffer() {}
-
- SrcBuffer(SrcBuffer &&O)
- : Buffer(std::move(O.Buffer)), IncludeLoc(O.IncludeLoc) {}
};
/// This is all of the buffers that we are reading from.
diff --git a/include/llvm/Support/StreamingMemoryObject.h b/include/llvm/Support/StreamingMemoryObject.h
deleted file mode 100644
index 1ab85372cd20..000000000000
--- a/include/llvm/Support/StreamingMemoryObject.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//===- StreamingMemoryObject.h - Streamable data interface -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_STREAMINGMEMORYOBJECT_H
-#define LLVM_SUPPORT_STREAMINGMEMORYOBJECT_H
-
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/DataStream.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryObject.h"
-#include <memory>
-#include <vector>
-
-namespace llvm {
-
-/// Interface to data which is actually streamed from a DataStreamer. In
-/// addition to inherited members, it has the dropLeadingBytes and
-/// setKnownObjectSize methods which are not applicable to non-streamed objects.
-class StreamingMemoryObject : public MemoryObject {
-public:
- StreamingMemoryObject(std::unique_ptr<DataStreamer> Streamer);
- uint64_t getExtent() const override;
- uint64_t readBytes(uint8_t *Buf, uint64_t Size,
- uint64_t Address) const override;
- const uint8_t *getPointer(uint64_t Address, uint64_t Size) const override;
- bool isValidAddress(uint64_t address) const override;
-
- /// Drop s bytes from the front of the stream, pushing the positions of the
- /// remaining bytes down by s. This is used to skip past the bitcode header,
- /// since we don't know a priori if it's present, and we can't put bytes
- /// back into the stream once we've read them.
- bool dropLeadingBytes(size_t s);
-
- /// If the data object size is known in advance, many of the operations can
- /// be made more efficient, so this method should be called before reading
- /// starts (although it can be called anytime).
- void setKnownObjectSize(size_t size);
-
- /// The number of bytes read at a time from the data streamer.
- static const uint32_t kChunkSize = 4096 * 4;
-
-private:
- mutable std::vector<unsigned char> Bytes;
- std::unique_ptr<DataStreamer> Streamer;
- mutable size_t BytesRead; // Bytes read from stream
- size_t BytesSkipped;// Bytes skipped at start of stream (e.g. wrapper/header)
- mutable size_t ObjectSize; // 0 if unknown, set if wrapper seen or EOF reached
- mutable bool EOFReached;
-
- // Fetch enough bytes such that Pos can be read (i.e. BytesRead >
- // Pos). Returns true if Pos can be read. Unlike most of the
- // functions in BitcodeReader, returns true on success. Most of the
- // requests will be small, but we fetch at kChunkSize bytes at a
- // time to avoid making too many potentially expensive GetBytes
- // calls.
- bool fetchToPos(size_t Pos) const {
- while (Pos >= BytesRead) {
- if (EOFReached)
- return false;
- Bytes.resize(BytesRead + BytesSkipped + kChunkSize);
- size_t bytes = Streamer->GetBytes(&Bytes[BytesRead + BytesSkipped],
- kChunkSize);
- BytesRead += bytes;
- if (bytes == 0) { // reached EOF/ran out of bytes
- if (ObjectSize == 0)
- ObjectSize = BytesRead;
- EOFReached = true;
- }
- }
- return !ObjectSize || Pos < ObjectSize;
- }
-
- StreamingMemoryObject(const StreamingMemoryObject&) = delete;
- void operator=(const StreamingMemoryObject&) = delete;
-};
-
-MemoryObject *getNonStreamedMemoryObject(
- const unsigned char *Start, const unsigned char *End);
-
-}
-#endif // STREAMINGMEMORYOBJECT_H_
diff --git a/include/llvm/Support/StringSaver.h b/include/llvm/Support/StringSaver.h
index 38fb7bb38339..fcddd4cde5b6 100644
--- a/include/llvm/Support/StringSaver.h
+++ b/include/llvm/Support/StringSaver.h
@@ -16,17 +16,17 @@
namespace llvm {
-/// \brief Saves strings in the inheritor's stable storage and returns a stable
-/// raw character pointer.
+/// \brief Saves strings in the inheritor's stable storage and returns a
+/// StringRef with a stable character pointer.
class StringSaver final {
BumpPtrAllocator &Alloc;
public:
StringSaver(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
- const char *save(const char *S) { return save(StringRef(S)); }
- const char *save(StringRef S);
- const char *save(const Twine &S) { return save(StringRef(S.str())); }
- const char *save(std::string &S) { return save(StringRef(S)); }
+ StringRef save(const char *S) { return save(StringRef(S)); }
+ StringRef save(StringRef S);
+ StringRef save(const Twine &S) { return save(StringRef(S.str())); }
+ StringRef save(std::string &S) { return save(StringRef(S)); }
};
}
#endif
diff --git a/include/llvm/Support/SwapByteOrder.h b/include/llvm/Support/SwapByteOrder.h
index 91693aceb27d..71d3724950ab 100644
--- a/include/llvm/Support/SwapByteOrder.h
+++ b/include/llvm/Support/SwapByteOrder.h
@@ -18,6 +18,9 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <cstddef>
+#if defined(_MSC_VER) && !defined(_DEBUG)
+#include <stdlib.h>
+#endif
namespace llvm {
namespace sys {
diff --git a/include/llvm/Support/TargetParser.h b/include/llvm/Support/TargetParser.h
index 0e2141f6d46f..63aeca7f4e1e 100644
--- a/include/llvm/Support/TargetParser.h
+++ b/include/llvm/Support/TargetParser.h
@@ -111,17 +111,17 @@ unsigned getFPUNeonSupportLevel(unsigned FPUKind);
unsigned getFPURestriction(unsigned FPUKind);
// FIXME: These should be moved to TargetTuple once it exists
-bool getFPUFeatures(unsigned FPUKind, std::vector<const char *> &Features);
-bool getHWDivFeatures(unsigned HWDivKind, std::vector<const char *> &Features);
+bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
+bool getHWDivFeatures(unsigned HWDivKind, std::vector<StringRef> &Features);
bool getExtensionFeatures(unsigned Extensions,
- std::vector<const char*> &Features);
+ std::vector<StringRef> &Features);
StringRef getArchName(unsigned ArchKind);
unsigned getArchAttr(unsigned ArchKind);
StringRef getCPUAttr(unsigned ArchKind);
StringRef getSubArch(unsigned ArchKind);
StringRef getArchExtName(unsigned ArchExtKind);
-const char *getArchExtFeature(StringRef ArchExt);
+StringRef getArchExtFeature(StringRef ArchExt);
StringRef getHWDivName(unsigned HWDivKind);
// Information by Name
@@ -145,6 +145,13 @@ unsigned parseArchVersion(StringRef Arch);
// FIXME:This should be made into class design,to avoid dupplication.
namespace AArch64 {
+// Arch names.
+enum class ArchKind {
+#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
+#include "AArch64TargetParser.def"
+ AK_LAST
+};
+
// Arch extension modifiers for CPUs.
enum ArchExtKind : unsigned {
AEK_INVALID = 0x0,
@@ -155,7 +162,8 @@ enum ArchExtKind : unsigned {
AEK_SIMD = 0x10,
AEK_FP16 = 0x20,
AEK_PROFILE = 0x40,
- AEK_RAS = 0x80
+ AEK_RAS = 0x80,
+ AEK_LSE = 0x100
};
StringRef getCanonicalArchName(StringRef Arch);
@@ -167,17 +175,17 @@ unsigned getFPUNeonSupportLevel(unsigned FPUKind);
unsigned getFPURestriction(unsigned FPUKind);
// FIXME: These should be moved to TargetTuple once it exists
-bool getFPUFeatures(unsigned FPUKind, std::vector<const char *> &Features);
+bool getFPUFeatures(unsigned FPUKind, std::vector<StringRef> &Features);
bool getExtensionFeatures(unsigned Extensions,
- std::vector<const char*> &Features);
-bool getArchFeatures(unsigned ArchKind, std::vector<const char *> &Features);
+ std::vector<StringRef> &Features);
+bool getArchFeatures(unsigned ArchKind, std::vector<StringRef> &Features);
StringRef getArchName(unsigned ArchKind);
unsigned getArchAttr(unsigned ArchKind);
StringRef getCPUAttr(unsigned ArchKind);
StringRef getSubArch(unsigned ArchKind);
StringRef getArchExtName(unsigned ArchExtKind);
-const char *getArchExtFeature(StringRef ArchExt);
+StringRef getArchExtFeature(StringRef ArchExt);
unsigned checkArchVersion(StringRef Arch);
// Information by Name
diff --git a/include/llvm/Support/TargetRegistry.h b/include/llvm/Support/TargetRegistry.h
index 076558e4df77..954cdb13abaf 100644
--- a/include/llvm/Support/TargetRegistry.h
+++ b/include/llvm/Support/TargetRegistry.h
@@ -112,7 +112,8 @@ public:
TargetMachine &TM, std::unique_ptr<MCStreamer> &&Streamer);
typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T,
const MCRegisterInfo &MRI,
- const Triple &TT, StringRef CPU);
+ const Triple &TT, StringRef CPU,
+ const MCTargetOptions &Options);
typedef MCTargetAsmParser *(*MCAsmParserCtorTy)(
const MCSubtargetInfo &STI, MCAsmParser &P, const MCInstrInfo &MII,
const MCTargetOptions &Options);
@@ -279,6 +280,9 @@ public:
/// hasMCAsmBackend - Check if this target supports .o generation.
bool hasMCAsmBackend() const { return MCAsmBackendCtorFn != nullptr; }
+ /// hasMCAsmParser - Check if this target supports assembly parsing.
+ bool hasMCAsmParser() const { return MCAsmParserCtorFn != nullptr; }
+
/// @}
/// @name Feature Constructors
/// @{
@@ -365,10 +369,12 @@ public:
///
/// \param TheTriple The target triple string.
MCAsmBackend *createMCAsmBackend(const MCRegisterInfo &MRI,
- StringRef TheTriple, StringRef CPU) const {
+ StringRef TheTriple, StringRef CPU,
+ const MCTargetOptions &Options)
+ const {
if (!MCAsmBackendCtorFn)
return nullptr;
- return MCAsmBackendCtorFn(*this, MRI, Triple(TheTriple), CPU);
+ return MCAsmBackendCtorFn(*this, MRI, Triple(TheTriple), CPU, Options);
}
/// createMCAsmParser - Create a target specific assembly parser.
@@ -846,10 +852,13 @@ struct TargetRegistry {
/// target's initialization function. Usage:
///
///
-/// Target TheFooTarget; // The global target instance.
-///
+/// Target &getTheFooTarget() { // The global target instance.
+/// static Target TheFooTarget;
+/// return TheFooTarget;
+/// }
/// extern "C" void LLVMInitializeFooTargetInfo() {
-/// RegisterTarget<Triple::foo> X(TheFooTarget, "foo", "Foo description");
+/// RegisterTarget<Triple::foo> X(getTheFooTarget(), "foo", "Foo
+/// description");
/// }
template <Triple::ArchType TargetArchType = Triple::UnknownArch,
bool HasJIT = false>
@@ -1071,7 +1080,8 @@ template <class MCAsmBackendImpl> struct RegisterMCAsmBackend {
private:
static MCAsmBackend *Allocator(const Target &T, const MCRegisterInfo &MRI,
- const Triple &TheTriple, StringRef CPU) {
+ const Triple &TheTriple, StringRef CPU,
+ const MCTargetOptions &Options) {
return new MCAsmBackendImpl(T, MRI, TheTriple, CPU);
}
};
diff --git a/include/llvm/Support/Threading.h b/include/llvm/Support/Threading.h
index fe407b725314..4bef7ec8dd3f 100644
--- a/include/llvm/Support/Threading.h
+++ b/include/llvm/Support/Threading.h
@@ -115,6 +115,13 @@ namespace llvm {
TsanHappensAfter(&flag);
#endif
}
+
+ /// Get the amount of currency to use for tasks requiring significant
+ /// memory or other resources. Currently based on physical cores, if
+ /// available for the host system, otherwise falls back to
+ /// thread::hardware_concurrency().
+ /// Returns 1 when LLVM is configured with LLVM_ENABLE_THREADS=OFF
+ unsigned heavyweight_hardware_concurrency();
}
#endif
diff --git a/include/llvm/Support/TimeValue.h b/include/llvm/Support/TimeValue.h
deleted file mode 100644
index 6bca58b6bc20..000000000000
--- a/include/llvm/Support/TimeValue.h
+++ /dev/null
@@ -1,386 +0,0 @@
-//===-- TimeValue.h - Declare OS TimeValue Concept --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header file declares the operating system TimeValue concept.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_TIMEVALUE_H
-#define LLVM_SUPPORT_TIMEVALUE_H
-
-#include "llvm/Support/DataTypes.h"
-#include <string>
-
-namespace llvm {
-namespace sys {
- /// This class is used where a precise fixed point in time is required. The
- /// range of TimeValue spans many hundreds of billions of years both past and
- /// present. The precision of TimeValue is to the nanosecond. However, the
- /// actual precision of its values will be determined by the resolution of
- /// the system clock. The TimeValue class is used in conjunction with several
- /// other lib/System interfaces to specify the time at which a call should
- /// timeout, etc.
- /// @since 1.4
- /// @brief Provides an abstraction for a fixed point in time.
- class TimeValue {
-
- /// @name Constants
- /// @{
- public:
-
- /// A constant TimeValue representing the smallest time
- /// value permissible by the class. MinTime is some point
- /// in the distant past, about 300 billion years BCE.
- /// @brief The smallest possible time value.
- static TimeValue MinTime() {
- return TimeValue ( INT64_MIN,0 );
- }
-
- /// A constant TimeValue representing the largest time
- /// value permissible by the class. MaxTime is some point
- /// in the distant future, about 300 billion years AD.
- /// @brief The largest possible time value.
- static TimeValue MaxTime() {
- return TimeValue ( INT64_MAX,0 );
- }
-
- /// A constant TimeValue representing the base time,
- /// or zero time of 00:00:00 (midnight) January 1st, 2000.
- /// @brief 00:00:00 Jan 1, 2000 UTC.
- static TimeValue ZeroTime() {
- return TimeValue ( 0,0 );
- }
-
- /// A constant TimeValue for the Posix base time which is
- /// 00:00:00 (midnight) January 1st, 1970.
- /// @brief 00:00:00 Jan 1, 1970 UTC.
- static TimeValue PosixZeroTime() {
- return TimeValue ( PosixZeroTimeSeconds,0 );
- }
-
- /// A constant TimeValue for the Win32 base time which is
- /// 00:00:00 (midnight) January 1st, 1601.
- /// @brief 00:00:00 Jan 1, 1601 UTC.
- static TimeValue Win32ZeroTime() {
- return TimeValue ( Win32ZeroTimeSeconds,0 );
- }
-
- /// @}
- /// @name Types
- /// @{
- public:
- typedef int64_t SecondsType; ///< Type used for representing seconds.
- typedef int32_t NanoSecondsType;///< Type used for representing nanoseconds.
-
- enum TimeConversions {
- NANOSECONDS_PER_SECOND = 1000000000, ///< One Billion
- MICROSECONDS_PER_SECOND = 1000000, ///< One Million
- MILLISECONDS_PER_SECOND = 1000, ///< One Thousand
- NANOSECONDS_PER_MICROSECOND = 1000, ///< One Thousand
- NANOSECONDS_PER_MILLISECOND = 1000000,///< One Million
- NANOSECONDS_PER_WIN32_TICK = 100 ///< Win32 tick is 10^7 Hz (10ns)
- };
-
- /// @}
- /// @name Constructors
- /// @{
- public:
- /// \brief Default construct a time value, initializing to ZeroTime.
- TimeValue() : seconds_(0), nanos_(0) {}
-
- /// Caller provides the exact value in seconds and nanoseconds. The
- /// \p nanos argument defaults to zero for convenience.
- /// @brief Explicit constructor
- explicit TimeValue (SecondsType seconds, NanoSecondsType nanos = 0)
- : seconds_( seconds ), nanos_( nanos ) { this->normalize(); }
-
- /// Caller provides the exact value as a double in seconds with the
- /// fractional part representing nanoseconds.
- /// @brief Double Constructor.
- explicit TimeValue( double new_time )
- : seconds_( 0 ) , nanos_ ( 0 ) {
- SecondsType integer_part = static_cast<SecondsType>( new_time );
- seconds_ = integer_part;
- nanos_ = static_cast<NanoSecondsType>( (new_time -
- static_cast<double>(integer_part)) * NANOSECONDS_PER_SECOND );
- this->normalize();
- }
-
- /// This is a static constructor that returns a TimeValue that represents
- /// the current time.
- /// @brief Creates a TimeValue with the current time (UTC).
- static TimeValue now();
-
- /// @}
- /// @name Operators
- /// @{
- public:
- /// Add \p that to \p this.
- /// @returns this
- /// @brief Incrementing assignment operator.
- TimeValue& operator += (const TimeValue& that ) {
- this->seconds_ += that.seconds_ ;
- this->nanos_ += that.nanos_ ;
- this->normalize();
- return *this;
- }
-
- /// Subtract \p that from \p this.
- /// @returns this
- /// @brief Decrementing assignment operator.
- TimeValue& operator -= (const TimeValue &that ) {
- this->seconds_ -= that.seconds_ ;
- this->nanos_ -= that.nanos_ ;
- this->normalize();
- return *this;
- }
-
- /// Determine if \p this is less than \p that.
- /// @returns True iff *this < that.
- /// @brief True if this < that.
- int operator < (const TimeValue &that) const { return that > *this; }
-
- /// Determine if \p this is greather than \p that.
- /// @returns True iff *this > that.
- /// @brief True if this > that.
- int operator > (const TimeValue &that) const {
- if ( this->seconds_ > that.seconds_ ) {
- return 1;
- } else if ( this->seconds_ == that.seconds_ ) {
- if ( this->nanos_ > that.nanos_ ) return 1;
- }
- return 0;
- }
-
- /// Determine if \p this is less than or equal to \p that.
- /// @returns True iff *this <= that.
- /// @brief True if this <= that.
- int operator <= (const TimeValue &that) const { return that >= *this; }
-
- /// Determine if \p this is greater than or equal to \p that.
- /// @returns True iff *this >= that.
- int operator >= (const TimeValue &that) const {
- if ( this->seconds_ > that.seconds_ ) {
- return 1;
- } else if ( this->seconds_ == that.seconds_ ) {
- if ( this->nanos_ >= that.nanos_ ) return 1;
- }
- return 0;
- }
-
- /// Determines if two TimeValue objects represent the same moment in time.
- /// @returns True iff *this == that.
- int operator == (const TimeValue &that) const {
- return (this->seconds_ == that.seconds_) &&
- (this->nanos_ == that.nanos_);
- }
-
- /// Determines if two TimeValue objects represent times that are not the
- /// same.
- /// @returns True iff *this != that.
- int operator != (const TimeValue &that) const { return !(*this == that); }
-
- /// Adds two TimeValue objects together.
- /// @returns The sum of the two operands as a new TimeValue
- /// @brief Addition operator.
- friend TimeValue operator + (const TimeValue &tv1, const TimeValue &tv2);
-
- /// Subtracts two TimeValue objects.
- /// @returns The difference of the two operands as a new TimeValue
- /// @brief Subtraction operator.
- friend TimeValue operator - (const TimeValue &tv1, const TimeValue &tv2);
-
- /// @}
- /// @name Accessors
- /// @{
- public:
-
- /// Returns only the seconds component of the TimeValue. The nanoseconds
- /// portion is ignored. No rounding is performed.
- /// @brief Retrieve the seconds component
- SecondsType seconds() const { return seconds_; }
-
- /// Returns only the nanoseconds component of the TimeValue. The seconds
- /// portion is ignored.
- /// @brief Retrieve the nanoseconds component.
- NanoSecondsType nanoseconds() const { return nanos_; }
-
- /// Returns only the fractional portion of the TimeValue rounded down to the
- /// nearest microsecond (divide by one thousand).
- /// @brief Retrieve the fractional part as microseconds;
- uint32_t microseconds() const {
- return nanos_ / NANOSECONDS_PER_MICROSECOND;
- }
-
- /// Returns only the fractional portion of the TimeValue rounded down to the
- /// nearest millisecond (divide by one million).
- /// @brief Retrieve the fractional part as milliseconds;
- uint32_t milliseconds() const {
- return nanos_ / NANOSECONDS_PER_MILLISECOND;
- }
-
- /// Returns the TimeValue as a number of microseconds. Note that the value
- /// returned can overflow because the range of a uint64_t is smaller than
- /// the range of a TimeValue. Nevertheless, this is useful on some operating
- /// systems and is therefore provided.
- /// @brief Convert to a number of microseconds (can overflow)
- uint64_t usec() const {
- return seconds_ * MICROSECONDS_PER_SECOND +
- ( nanos_ / NANOSECONDS_PER_MICROSECOND );
- }
-
- /// Returns the TimeValue as a number of milliseconds. Note that the value
- /// returned can overflow because the range of a uint64_t is smaller than
- /// the range of a TimeValue. Nevertheless, this is useful on some operating
- /// systems and is therefore provided.
- /// @brief Convert to a number of milliseconds (can overflow)
- uint64_t msec() const {
- return seconds_ * MILLISECONDS_PER_SECOND +
- ( nanos_ / NANOSECONDS_PER_MILLISECOND );
- }
-
- /// Converts the TimeValue into the corresponding number of seconds
- /// since the epoch (00:00:00 Jan 1,1970).
- uint64_t toEpochTime() const {
- return seconds_ - PosixZeroTimeSeconds;
- }
-
- /// Converts the TimeValue into the corresponding number of "ticks" for
- /// Win32 platforms, correcting for the difference in Win32 zero time.
- /// @brief Convert to Win32's FILETIME
- /// (100ns intervals since 00:00:00 Jan 1, 1601 UTC)
- uint64_t toWin32Time() const {
- uint64_t result = (uint64_t)10000000 * (seconds_ - Win32ZeroTimeSeconds);
- result += nanos_ / NANOSECONDS_PER_WIN32_TICK;
- return result;
- }
-
- /// Provides the seconds and nanoseconds as results in its arguments after
- /// correction for the Posix zero time.
- /// @brief Convert to timespec time (ala POSIX.1b)
- void getTimespecTime( uint64_t& seconds, uint32_t& nanos ) const {
- seconds = seconds_ - PosixZeroTimeSeconds;
- nanos = nanos_;
- }
-
- /// Provides conversion of the TimeValue into a readable time & date.
- /// @returns std::string containing the readable time value
- /// @brief Convert time to a string.
- std::string str() const;
-
- /// @}
- /// @name Mutators
- /// @{
- public:
- /// The seconds component of the TimeValue is set to \p sec without
- /// modifying the nanoseconds part. This is useful for whole second
- /// arithmetic.
- /// @brief Set the seconds component.
- void seconds (SecondsType sec ) {
- this->seconds_ = sec;
- this->normalize();
- }
-
- /// The nanoseconds component of the TimeValue is set to \p nanos without
- /// modifying the seconds part. This is useful for basic computations
- /// involving just the nanoseconds portion. Note that the TimeValue will be
- /// normalized after this call so that the fractional (nanoseconds) portion
- /// will have the smallest equivalent value.
- /// @brief Set the nanoseconds component using a number of nanoseconds.
- void nanoseconds ( NanoSecondsType nanos ) {
- this->nanos_ = nanos;
- this->normalize();
- }
-
- /// The seconds component remains unchanged.
- /// @brief Set the nanoseconds component using a number of microseconds.
- void microseconds ( int32_t micros ) {
- this->nanos_ = micros * NANOSECONDS_PER_MICROSECOND;
- this->normalize();
- }
-
- /// The seconds component remains unchanged.
- /// @brief Set the nanoseconds component using a number of milliseconds.
- void milliseconds ( int32_t millis ) {
- this->nanos_ = millis * NANOSECONDS_PER_MILLISECOND;
- this->normalize();
- }
-
- /// @brief Converts from microsecond format to TimeValue format
- void usec( int64_t microseconds ) {
- this->seconds_ = microseconds / MICROSECONDS_PER_SECOND;
- this->nanos_ = NanoSecondsType(microseconds % MICROSECONDS_PER_SECOND) *
- NANOSECONDS_PER_MICROSECOND;
- this->normalize();
- }
-
- /// @brief Converts from millisecond format to TimeValue format
- void msec( int64_t milliseconds ) {
- this->seconds_ = milliseconds / MILLISECONDS_PER_SECOND;
- this->nanos_ = NanoSecondsType(milliseconds % MILLISECONDS_PER_SECOND) *
- NANOSECONDS_PER_MILLISECOND;
- this->normalize();
- }
-
- /// Converts the \p seconds argument from PosixTime to the corresponding
- /// TimeValue and assigns that value to \p this.
- /// @brief Convert seconds form PosixTime to TimeValue
- void fromEpochTime( SecondsType seconds ) {
- seconds_ = seconds + PosixZeroTimeSeconds;
- nanos_ = 0;
- this->normalize();
- }
-
- /// Converts the \p win32Time argument from Windows FILETIME to the
- /// corresponding TimeValue and assigns that value to \p this.
- /// @brief Convert seconds form Windows FILETIME to TimeValue
- void fromWin32Time( uint64_t win32Time ) {
- this->seconds_ = win32Time / 10000000 + Win32ZeroTimeSeconds;
- this->nanos_ = NanoSecondsType(win32Time % 10000000) * 100;
- }
-
- /// @}
- /// @name Implementation
- /// @{
- private:
- /// This causes the values to be represented so that the fractional
- /// part is minimized, possibly incrementing the seconds part.
- /// @brief Normalize to canonical form.
- void normalize();
-
- /// @}
- /// @name Data
- /// @{
- private:
- /// Store the values as a <timeval>.
- SecondsType seconds_;///< Stores the seconds part of the TimeVal
- NanoSecondsType nanos_; ///< Stores the nanoseconds part of the TimeVal
-
- static const SecondsType PosixZeroTimeSeconds;
- static const SecondsType Win32ZeroTimeSeconds;
- /// @}
-
- };
-
-inline TimeValue operator + (const TimeValue &tv1, const TimeValue &tv2) {
- TimeValue sum (tv1.seconds_ + tv2.seconds_, tv1.nanos_ + tv2.nanos_);
- sum.normalize ();
- return sum;
-}
-
-inline TimeValue operator - (const TimeValue &tv1, const TimeValue &tv2) {
- TimeValue difference (tv1.seconds_ - tv2.seconds_, tv1.nanos_ - tv2.nanos_ );
- difference.normalize ();
- return difference;
-}
-
-}
-}
-
-#endif
diff --git a/include/llvm/Support/Timer.h b/include/llvm/Support/Timer.h
index f0cb07599b86..80e8f13dccfe 100644
--- a/include/llvm/Support/Timer.h
+++ b/include/llvm/Support/Timer.h
@@ -24,17 +24,17 @@ class TimerGroup;
class raw_ostream;
class TimeRecord {
- double WallTime; // Wall clock time elapsed in seconds
- double UserTime; // User time elapsed
- double SystemTime; // System time elapsed
- ssize_t MemUsed; // Memory allocated (in bytes)
+ double WallTime; ///< Wall clock time elapsed in seconds.
+ double UserTime; ///< User time elapsed.
+ double SystemTime; ///< System time elapsed.
+ ssize_t MemUsed; ///< Memory allocated (in bytes).
public:
TimeRecord() : WallTime(0), UserTime(0), SystemTime(0), MemUsed(0) {}
- /// getCurrentTime - Get the current time and memory usage. If Start is true
- /// we get the memory usage before the time, otherwise we get time before
- /// memory usage. This matters if the time to get the memory usage is
- /// significant and shouldn't be counted as part of a duration.
+ /// Get the current time and memory usage. If Start is true we get the memory
+ /// usage before the time, otherwise we get time before memory usage. This
+ /// matters if the time to get the memory usage is significant and shouldn't
+ /// be counted as part of a duration.
static TimeRecord getCurrentTime(bool Start = true);
double getProcessTime() const { return UserTime + SystemTime; }
@@ -43,7 +43,6 @@ public:
double getWallTime() const { return WallTime; }
ssize_t getMemUsed() const { return MemUsed; }
- // operator< - Allow sorting.
bool operator<(const TimeRecord &T) const {
// Sort by Wall Time elapsed, as it is the only thing really accurate
return WallTime < T.WallTime;
@@ -67,27 +66,32 @@ public:
void print(const TimeRecord &Total, raw_ostream &OS) const;
};
-/// Timer - This class is used to track the amount of time spent between
-/// invocations of its startTimer()/stopTimer() methods. Given appropriate OS
-/// support it can also keep track of the RSS of the program at various points.
-/// By default, the Timer will print the amount of time it has captured to
-/// standard error when the last timer is destroyed, otherwise it is printed
-/// when its TimerGroup is destroyed. Timers do not print their information
-/// if they are never started.
-///
+/// This class is used to track the amount of time spent between invocations of
+/// its startTimer()/stopTimer() methods. Given appropriate OS support it can
+/// also keep track of the RSS of the program at various points. By default,
+/// the Timer will print the amount of time it has captured to standard error
+/// when the last timer is destroyed, otherwise it is printed when its
+/// TimerGroup is destroyed. Timers do not print their information if they are
+/// never started.
class Timer {
- TimeRecord Time; // The total time captured
- TimeRecord StartTime; // The time startTimer() was last called
- std::string Name; // The name of this time variable.
- bool Running; // Is the timer currently running?
- bool Triggered; // Has the timer ever been triggered?
- TimerGroup *TG; // The TimerGroup this Timer is in.
-
- Timer **Prev, *Next; // Doubly linked list of timers in the group.
+ TimeRecord Time; ///< The total time captured.
+ TimeRecord StartTime; ///< The time startTimer() was last called.
+ std::string Name; ///< The name of this time variable.
+ std::string Description; ///< Description of this time variable.
+ bool Running; ///< Is the timer currently running?
+ bool Triggered; ///< Has the timer ever been triggered?
+ TimerGroup *TG = nullptr; ///< The TimerGroup this Timer is in.
+
+ Timer **Prev; ///< Pointer to \p Next of previous timer in group.
+ Timer *Next; ///< Next timer in the group.
public:
- explicit Timer(StringRef N) : TG(nullptr) { init(N); }
- Timer(StringRef N, TimerGroup &tg) : TG(nullptr) { init(N, tg); }
- Timer(const Timer &RHS) : TG(nullptr) {
+ explicit Timer(StringRef Name, StringRef Description) {
+ init(Name, Description);
+ }
+ Timer(StringRef Name, StringRef Description, TimerGroup &tg) {
+ init(Name, Description, tg);
+ }
+ Timer(const Timer &RHS) {
assert(!RHS.TG && "Can only copy uninitialized timers");
}
const Timer &operator=(const Timer &T) {
@@ -96,12 +100,13 @@ public:
}
~Timer();
- // Create an uninitialized timer, client must use 'init'.
- explicit Timer() : TG(nullptr) {}
- void init(StringRef N);
- void init(StringRef N, TimerGroup &tg);
+ /// Create an uninitialized timer, client must use 'init'.
+ explicit Timer() {}
+ void init(StringRef Name, StringRef Description);
+ void init(StringRef Name, StringRef Description, TimerGroup &tg);
const std::string &getName() const { return Name; }
+ const std::string &getDescription() const { return Description; }
bool isInitialized() const { return TG != nullptr; }
/// Check if the timer is currently running.
@@ -132,7 +137,6 @@ private:
/// stopTimer() methods of the Timer class. When the object is constructed, it
/// starts the timer specified as its argument. When it is destroyed, it stops
/// the relevant timer. This makes it easy to time a region of code.
-///
class TimeRegion {
Timer *T;
TimeRegion(const TimeRegion &) = delete;
@@ -149,51 +153,77 @@ public:
}
};
-/// NamedRegionTimer - This class is basically a combination of TimeRegion and
-/// Timer. It allows you to declare a new timer, AND specify the region to
-/// time, all in one statement. All timers with the same name are merged. This
-/// is primarily used for debugging and for hunting performance problems.
-///
+/// This class is basically a combination of TimeRegion and Timer. It allows
+/// you to declare a new timer, AND specify the region to time, all in one
+/// statement. All timers with the same name are merged. This is primarily
+/// used for debugging and for hunting performance problems.
struct NamedRegionTimer : public TimeRegion {
- explicit NamedRegionTimer(StringRef Name,
- bool Enabled = true);
- explicit NamedRegionTimer(StringRef Name, StringRef GroupName,
- bool Enabled = true);
+ explicit NamedRegionTimer(StringRef Name, StringRef Description,
+ StringRef GroupName,
+ StringRef GroupDescription, bool Enabled = true);
};
/// The TimerGroup class is used to group together related timers into a single
/// report that is printed when the TimerGroup is destroyed. It is illegal to
/// destroy a TimerGroup object before all of the Timers in it are gone. A
/// TimerGroup can be specified for a newly created timer in its constructor.
-///
class TimerGroup {
+ struct PrintRecord {
+ TimeRecord Time;
+ std::string Name;
+ std::string Description;
+
+ PrintRecord(const PrintRecord &Other) = default;
+ PrintRecord(const TimeRecord &Time, const std::string &Name,
+ const std::string &Description)
+ : Time(Time), Name(Name), Description(Description) {}
+
+ bool operator <(const PrintRecord &Other) const {
+ return Time < Other.Time;
+ }
+ };
std::string Name;
- Timer *FirstTimer; // First timer in the group.
- std::vector<std::pair<TimeRecord, std::string>> TimersToPrint;
+ std::string Description;
+ Timer *FirstTimer = nullptr; ///< First timer in the group.
+ std::vector<PrintRecord> TimersToPrint;
- TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's.
+ TimerGroup **Prev; ///< Pointer to Next field of previous timergroup in list.
+ TimerGroup *Next; ///< Pointer to next timergroup in list.
TimerGroup(const TimerGroup &TG) = delete;
void operator=(const TimerGroup &TG) = delete;
public:
- explicit TimerGroup(StringRef name);
+ explicit TimerGroup(StringRef Name, StringRef Description);
~TimerGroup();
- void setName(StringRef name) { Name.assign(name.begin(), name.end()); }
+ void setName(StringRef NewName, StringRef NewDescription) {
+ Name.assign(NewName.begin(), NewName.end());
+ Description.assign(NewDescription.begin(), NewDescription.end());
+ }
- /// print - Print any started timers in this group and zero them.
+ /// Print any started timers in this group and zero them.
void print(raw_ostream &OS);
- /// printAll - This static method prints all timers and clears them all out.
+ /// This static method prints all timers and clears them all out.
static void printAll(raw_ostream &OS);
+ /// Ensure global timer group lists are initialized. This function is mostly
+ /// used by the Statistic code to influence the construction and destruction
+ /// order of the global timer lists.
+ static void ConstructTimerLists();
private:
friend class Timer;
+ friend void PrintStatisticsJSON(raw_ostream &OS);
void addTimer(Timer &T);
void removeTimer(Timer &T);
+ void prepareToPrintList();
void PrintQueuedTimers(raw_ostream &OS);
+ void printJSONValue(raw_ostream &OS, const PrintRecord &R,
+ const char *suffix, double Value);
+ const char *printJSONValues(raw_ostream &OS, const char *delim);
+ static const char *printAllJSONValues(raw_ostream &OS, const char *delim);
};
-} // End llvm namespace
+} // end namespace llvm
#endif
diff --git a/include/llvm/Support/TrailingObjects.h b/include/llvm/Support/TrailingObjects.h
index 5a21cddf9731..4d355724149c 100644
--- a/include/llvm/Support/TrailingObjects.h
+++ b/include/llvm/Support/TrailingObjects.h
@@ -62,7 +62,7 @@ namespace trailing_objects_internal {
template <typename First, typename... Rest> class AlignmentCalcHelper {
private:
enum {
- FirstAlignment = AlignOf<First>::Alignment,
+ FirstAlignment = alignof(First),
RestAlignment = AlignmentCalcHelper<Rest...>::Alignment,
};
@@ -74,7 +74,7 @@ public:
template <typename First> class AlignmentCalcHelper<First> {
public:
- enum { Alignment = AlignOf<First>::Alignment };
+ enum { Alignment = alignof(First) };
};
/// The base class for TrailingObjects* classes.
@@ -127,29 +127,33 @@ template <typename Ty1, typename Ty2> struct ExtractSecondType {
template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
typename... MoreTys>
-struct TrailingObjectsImpl {
+class TrailingObjectsImpl {
// The main template definition is never used -- the two
// specializations cover all possibilities.
};
template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
typename NextTy, typename... MoreTys>
-struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
- MoreTys...>
+class TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
+ MoreTys...>
: public TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy,
MoreTys...> {
typedef TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy, MoreTys...>
ParentType;
- // Ensure the methods we inherit are not hidden.
- using ParentType::getTrailingObjectsImpl;
- using ParentType::additionalSizeToAllocImpl;
+ struct RequiresRealignment {
+ static const bool value = alignof(PrevTy) < alignof(NextTy);
+ };
- static LLVM_CONSTEXPR bool requiresRealignment() {
- return llvm::AlignOf<PrevTy>::Alignment < llvm::AlignOf<NextTy>::Alignment;
+ static constexpr bool requiresRealignment() {
+ return RequiresRealignment::value;
}
+protected:
+ // Ensure the inherited getTrailingObjectsImpl is not hidden.
+ using ParentType::getTrailingObjectsImpl;
+
// These two functions are helper functions for
// TrailingObjects::getTrailingObjects. They recurse to the left --
// the result for each type in the list of trailing types depends on
@@ -169,7 +173,7 @@ struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
if (requiresRealignment())
return reinterpret_cast<const NextTy *>(
- llvm::alignAddr(Ptr, llvm::alignOf<NextTy>()));
+ llvm::alignAddr(Ptr, alignof(NextTy)));
else
return reinterpret_cast<const NextTy *>(Ptr);
}
@@ -183,8 +187,7 @@ struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
Obj, TrailingObjectsBase::OverloadToken<PrevTy>());
if (requiresRealignment())
- return reinterpret_cast<NextTy *>(
- llvm::alignAddr(Ptr, llvm::alignOf<NextTy>()));
+ return reinterpret_cast<NextTy *>(llvm::alignAddr(Ptr, alignof(NextTy)));
else
return reinterpret_cast<NextTy *>(Ptr);
}
@@ -192,13 +195,12 @@ struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
// Helper function for TrailingObjects::additionalSizeToAlloc: this
// function recurses to superclasses, each of which requires one
// fewer size_t argument, and adds its own size.
- static LLVM_CONSTEXPR size_t additionalSizeToAllocImpl(
+ static constexpr size_t additionalSizeToAllocImpl(
size_t SizeSoFar, size_t Count1,
typename ExtractSecondType<MoreTys, size_t>::type... MoreCounts) {
- return additionalSizeToAllocImpl(
- (requiresRealignment()
- ? llvm::alignTo(SizeSoFar, llvm::alignOf<NextTy>())
- : SizeSoFar) +
+ return ParentType::additionalSizeToAllocImpl(
+ (requiresRealignment() ? llvm::alignTo<alignof(NextTy)>(SizeSoFar)
+ : SizeSoFar) +
sizeof(NextTy) * Count1,
MoreCounts...);
}
@@ -207,14 +209,15 @@ struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
// The base case of the TrailingObjectsImpl inheritance recursion,
// when there's no more trailing types.
template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy>
-struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy>
+class TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy>
: public TrailingObjectsAligner<Align> {
+protected:
// This is a dummy method, only here so the "using" doesn't fail --
// it will never be called, because this function recurses backwards
// up the inheritance chain to subclasses.
static void getTrailingObjectsImpl();
- static LLVM_CONSTEXPR size_t additionalSizeToAllocImpl(size_t SizeSoFar) {
+ static constexpr size_t additionalSizeToAllocImpl(size_t SizeSoFar) {
return SizeSoFar;
}
@@ -235,7 +238,7 @@ class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl<
BaseTy, TrailingTys...> {
template <int A, typename B, typename T, typename P, typename... M>
- friend struct trailing_objects_internal::TrailingObjectsImpl;
+ friend class trailing_objects_internal::TrailingObjectsImpl;
template <typename... Tys> class Foo {};
@@ -323,11 +326,10 @@ public:
/// used in the class; they are supplied here redundantly only so
/// that it's clear what the counts are counting in callers.
template <typename... Tys>
- static LLVM_CONSTEXPR typename std::enable_if<
+ static constexpr typename std::enable_if<
std::is_same<Foo<TrailingTys...>, Foo<Tys...>>::value, size_t>::type
- additionalSizeToAlloc(
- typename trailing_objects_internal::ExtractSecondType<
- TrailingTys, size_t>::type... Counts) {
+ additionalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
+ TrailingTys, size_t>::type... Counts) {
return ParentType::additionalSizeToAllocImpl(0, Counts...);
}
@@ -336,10 +338,10 @@ public:
/// additionalSizeToAlloc, except it *does* include the size of the base
/// object.
template <typename... Tys>
- static LLVM_CONSTEXPR typename std::enable_if<
+ static constexpr typename std::enable_if<
std::is_same<Foo<TrailingTys...>, Foo<Tys...>>::value, size_t>::type
- totalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
- TrailingTys, size_t>::type... Counts) {
+ totalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
+ TrailingTys, size_t>::type... Counts) {
return sizeof(BaseTy) + ParentType::additionalSizeToAllocImpl(0, Counts...);
}
@@ -361,9 +363,7 @@ public:
template <typename... Tys> struct FixedSizeStorage {
template <size_t... Counts> struct with_counts {
enum { Size = totalSizeToAlloc<Tys...>(Counts...) };
- typedef llvm::AlignedCharArray<
- llvm::AlignOf<BaseTy>::Alignment, Size
- > type;
+ typedef llvm::AlignedCharArray<alignof(BaseTy), Size> type;
};
};
diff --git a/include/llvm/Support/TrigramIndex.h b/include/llvm/Support/TrigramIndex.h
new file mode 100644
index 000000000000..da0b6daf47ed
--- /dev/null
+++ b/include/llvm/Support/TrigramIndex.h
@@ -0,0 +1,70 @@
+//===-- TrigramIndex.h - a heuristic for SpecialCaseList --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// TrigramIndex implements a heuristic for SpecialCaseList that allows to
+// filter out ~99% incoming queries when all regular expressions in the
+// SpecialCaseList are simple wildcards with '*' and '.'. If rules are more
+// complicated, the check is defeated and it will always pass the queries to a
+// full regex.
+//
+// The basic idea is that in order for a wildcard to match a query, the query
+// needs to have all trigrams which occur in the wildcard. We create a trigram
+// index (trigram -> list of rules with it) and then count trigrams in the query
+// for each rule. If the count for one of the rules reaches the expected value,
+// the check passes the query to a regex. If none of the rules got enough
+// trigrams, the check tells that the query is definitely not matched by any
+// of the rules, and no regex matching is needed.
+// A similar idea was used in Google Code Search as described in the blog post:
+// https://swtch.com/~rsc/regexp/regexp4.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TRIGRAMINDEX_H
+#define LLVM_SUPPORT_TRIGRAMINDEX_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+namespace llvm {
+class StringRef;
+
+class TrigramIndex {
+ public:
+ /// Inserts a new Regex into the index.
+ void insert(std::string Regex);
+
+ /// Returns true, if special case list definitely does not have a line
+ /// that matches the query. Returns false, if it's not sure.
+ bool isDefinitelyOut(StringRef Query) const;
+
+ /// Returned true, iff the heuristic is defeated and not useful.
+ /// In this case isDefinitelyOut always returns false.
+ bool isDefeated() { return Defeated; }
+ private:
+ // If true, the rules are too complicated for the check to work, and full
+ // regex matching is needed for every rule.
+ bool Defeated = false;
+ // The minimum number of trigrams which should match for a rule to have a
+ // chance to match the query. The number of elements equals the number of
+ // regex rules in the SpecialCaseList.
+ std::vector<unsigned> Counts;
+ // Index holds a list of rules indices for each trigram. The same indices
+ // are used in Counts to store per-rule limits.
+ // If a trigram is too common (>4 rules with it), we stop tracking it,
+ // which increases the probability for a need to match using regex, but
+ // decreases the costs in the regular case.
+ std::unordered_map<unsigned, SmallVector<size_t, 4>> Index{256};
+};
+
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_TRIGRAMINDEX_H
diff --git a/include/llvm/Support/UnicodeCharRanges.h b/include/llvm/Support/UnicodeCharRanges.h
index 134698c3ec6b..d4d4d8eb84a4 100644
--- a/include/llvm/Support/UnicodeCharRanges.h
+++ b/include/llvm/Support/UnicodeCharRanges.h
@@ -56,7 +56,7 @@ public:
// may get rid of NDEBUG in this header. Unfortunately there are some
// problems to get this working with MSVC 2013. Change this when
// the support for MSVC 2013 is dropped.
- LLVM_CONSTEXPR UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {}
+ constexpr UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {}
#else
UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {
assert(rangesAreValid());
diff --git a/include/llvm/Support/Wasm.h b/include/llvm/Support/Wasm.h
new file mode 100644
index 000000000000..8ac6b9038e91
--- /dev/null
+++ b/include/llvm/Support/Wasm.h
@@ -0,0 +1,87 @@
+//===- Wasm.h - Wasm object file format -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines manifest constants for the wasm object file format.
+// See: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WASM_H
+#define LLVM_SUPPORT_WASM_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+namespace wasm {
+
+// Object file magic string.
+const char WasmMagic[] = {'\0', 'a', 's', 'm'};
+// Wasm binary format version
+const uint32_t WasmVersion = 0xd;
+
+struct WasmObjectHeader {
+ StringRef Magic;
+ uint32_t Version;
+};
+
+struct WasmSection {
+ uint32_t Type; // Section type (See below)
+ uint32_t Offset; // Offset with in the file
+ StringRef Name; // Section name (User-defined sections only)
+ ArrayRef<uint8_t> Content; // Section content
+};
+
+enum : unsigned {
+ WASM_SEC_USER = 0, // User-defined section
+ WASM_SEC_TYPE = 1, // Function signature declarations
+ WASM_SEC_IMPORT = 2, // Import declarations
+ WASM_SEC_FUNCTION = 3, // Function declarations
+ WASM_SEC_TABLE = 4, // Indirect function table and other tables
+ WASM_SEC_MEMORY = 5, // Memory attributes
+ WASM_SEC_GLOBAL = 6, // Global declarations
+ WASM_SEC_EXPORT = 7, // Exports
+ WASM_SEC_START = 8, // Start function declaration
+ WASM_SEC_ELEM = 9, // Elements section
+ WASM_SEC_CODE = 10, // Function bodies (code)
+ WASM_SEC_DATA = 11 // Data segments
+};
+
+// Type immediate encodings used in various contexts.
+enum : unsigned {
+ WASM_TYPE_I32 = 0x7f,
+ WASM_TYPE_I64 = 0x7e,
+ WASM_TYPE_F32 = 0x7d,
+ WASM_TYPE_F64 = 0x7c,
+ WASM_TYPE_ANYFUNC = 0x70,
+ WASM_TYPE_FUNC = 0x60,
+ WASM_TYPE_NORESULT = 0x40, // for blocks with no result values
+};
+
+// Kinds of externals (for imports and exports).
+enum : unsigned {
+ WASM_EXTERNAL_FUNCTION = 0x0,
+ WASM_EXTERNAL_TABLE = 0x1,
+ WASM_EXTERNAL_MEMORY = 0x2,
+ WASM_EXTERNAL_GLOBAL = 0x3,
+};
+
+// Opcodes used in initializer expressions.
+enum : unsigned {
+ WASM_OPCODE_END = 0x0b,
+ WASM_OPCODE_GET_GLOBAL = 0x23,
+ WASM_OPCODE_I32_CONST = 0x41,
+ WASM_OPCODE_I64_CONST = 0x42,
+ WASM_OPCODE_F32_CONST = 0x43,
+ WASM_OPCODE_F64_CONST = 0x44,
+};
+
+} // end namespace wasm
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/Support/YAMLParser.h b/include/llvm/Support/YAMLParser.h
index 23014fc10a3f..b9e3fa47752c 100644
--- a/include/llvm/Support/YAMLParser.h
+++ b/include/llvm/Support/YAMLParser.h
@@ -42,6 +42,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/SMLoc.h"
#include <map>
+#include <system_error>
#include <utility>
namespace llvm {
@@ -75,9 +76,11 @@ std::string escape(StringRef Input);
class Stream {
public:
/// \brief This keeps a reference to the string referenced by \p Input.
- Stream(StringRef Input, SourceMgr &, bool ShowColors = true);
+ Stream(StringRef Input, SourceMgr &, bool ShowColors = true,
+ std::error_code *EC = nullptr);
- Stream(MemoryBufferRef InputBuffer, SourceMgr &, bool ShowColors = true);
+ Stream(MemoryBufferRef InputBuffer, SourceMgr &, bool ShowColors = true,
+ std::error_code *EC = nullptr);
~Stream();
document_iterator begin();
@@ -144,12 +147,12 @@ public:
unsigned int getType() const { return TypeID; }
void *operator new(size_t Size, BumpPtrAllocator &Alloc,
- size_t Alignment = 16) LLVM_NOEXCEPT {
+ size_t Alignment = 16) noexcept {
return Alloc.Allocate(Size, Alignment);
}
void operator delete(void *Ptr, BumpPtrAllocator &Alloc,
- size_t Size) LLVM_NOEXCEPT {
+ size_t Size) noexcept {
Alloc.Deallocate(Ptr, Size);
}
@@ -157,7 +160,7 @@ protected:
std::unique_ptr<Document> &Doc;
SMRange SourceRange;
- void operator delete(void *) LLVM_NOEXCEPT = delete;
+ void operator delete(void *) noexcept = delete;
~Node() = default;
diff --git a/include/llvm/Support/YAMLTraits.h b/include/llvm/Support/YAMLTraits.h
index bc3fa8ad11da..38acb36942bc 100644
--- a/include/llvm/Support/YAMLTraits.h
+++ b/include/llvm/Support/YAMLTraits.h
@@ -14,19 +14,30 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cctype>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <new>
+#include <string>
#include <system_error>
+#include <type_traits>
+#include <vector>
namespace llvm {
namespace yaml {
+struct EmptyContext {};
+
/// This class should be specialized by any type that needs to be converted
/// to/from a YAML mapping. For example:
///
@@ -49,6 +60,28 @@ struct MappingTraits {
// static const bool flow = true;
};
+/// This class is similar to MappingTraits<T> but allows you to pass in
+/// additional context for each map operation. For example:
+///
+/// struct MappingContextTraits<MyStruct, MyContext> {
+/// static void mapping(IO &io, MyStruct &s, MyContext &c) {
+/// io.mapRequired("name", s.name);
+/// io.mapRequired("size", s.size);
+/// io.mapOptional("age", s.age);
+/// ++c.TimesMapped;
+/// }
+/// };
+template <class T, class Context> struct MappingContextTraits {
+ // Must provide:
+ // static void mapping(IO &io, T &fields, Context &Ctx);
+ // Optionally may provide:
+ // static StringRef validate(IO &io, T &fields, Context &Ctx);
+ //
+ // The optional flow flag will cause generated YAML to use a flow mapping
+ // (e.g. { a: 0, b: 1 }):
+ // static const bool flow = true;
+};
+
/// This class should be specialized by any integral type that converts
/// to/from a YAML scalar where there is a one-to-one mapping between
/// in-memory values and a string in YAML. For example:
@@ -114,7 +147,6 @@ struct ScalarTraits {
//static bool mustQuote(StringRef);
};
-
/// This class should be specialized by type that requires custom conversion
/// to/from a YAML literal block scalar. For example:
///
@@ -147,7 +179,7 @@ struct BlockScalarTraits {
/// to/from a YAML sequence. For example:
///
/// template<>
-/// struct SequenceTraits< std::vector<MyType> > {
+/// struct SequenceTraits< std::vector<MyType>> {
/// static size_t size(IO &io, std::vector<MyType> &seq) {
/// return seq.size();
/// }
@@ -177,10 +209,6 @@ struct DocumentListTraits {
// static T::value_type& element(IO &io, T &seq, size_t index);
};
-// Only used by compiler if both template types are the same
-template <typename T, T>
-struct SameType;
-
// Only used for better diagnostics of missing traits
template <typename T>
struct MissingTrait;
@@ -199,7 +227,7 @@ struct has_ScalarEnumerationTraits
public:
static bool const value =
- (sizeof(test<ScalarEnumerationTraits<T> >(nullptr)) == 1);
+ (sizeof(test<ScalarEnumerationTraits<T>>(nullptr)) == 1);
};
// Test if ScalarBitSetTraits<T> is defined on type T.
@@ -215,7 +243,7 @@ struct has_ScalarBitSetTraits
static double test(...);
public:
- static bool const value = (sizeof(test<ScalarBitSetTraits<T> >(nullptr)) == 1);
+ static bool const value = (sizeof(test<ScalarBitSetTraits<T>>(nullptr)) == 1);
};
// Test if ScalarTraits<T> is defined on type T.
@@ -258,11 +286,9 @@ public:
(sizeof(test<BlockScalarTraits<T>>(nullptr, nullptr)) == 1);
};
-// Test if MappingTraits<T> is defined on type T.
-template <class T>
-struct has_MappingTraits
-{
- typedef void (*Signature_mapping)(class IO&, T&);
+// Test if MappingContextTraits<T> is defined on type T.
+template <class T, class Context> struct has_MappingTraits {
+ typedef void (*Signature_mapping)(class IO &, T &, Context &);
template <typename U>
static char test(SameType<Signature_mapping, &U::mapping>*);
@@ -271,14 +297,26 @@ struct has_MappingTraits
static double test(...);
public:
- static bool const value = (sizeof(test<MappingTraits<T> >(nullptr)) == 1);
+ static bool const value =
+ (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
};
-// Test if MappingTraits<T>::validate() is defined on type T.
-template <class T>
-struct has_MappingValidateTraits
-{
- typedef StringRef (*Signature_validate)(class IO&, T&);
+// Test if MappingTraits<T> is defined on type T.
+template <class T> struct has_MappingTraits<T, EmptyContext> {
+ typedef void (*Signature_mapping)(class IO &, T &);
+
+ template <typename U>
+ static char test(SameType<Signature_mapping, &U::mapping> *);
+
+ template <typename U> static double test(...);
+
+public:
+ static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
+};
+
+// Test if MappingContextTraits<T>::validate() is defined on type T.
+template <class T, class Context> struct has_MappingValidateTraits {
+ typedef StringRef (*Signature_validate)(class IO &, T &, Context &);
template <typename U>
static char test(SameType<Signature_validate, &U::validate>*);
@@ -287,7 +325,21 @@ struct has_MappingValidateTraits
static double test(...);
public:
- static bool const value = (sizeof(test<MappingTraits<T> >(nullptr)) == 1);
+ static bool const value =
+ (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
+};
+
+// Test if MappingTraits<T>::validate() is defined on type T.
+template <class T> struct has_MappingValidateTraits<T, EmptyContext> {
+ typedef StringRef (*Signature_validate)(class IO &, T &);
+
+ template <typename U>
+ static char test(SameType<Signature_validate, &U::validate> *);
+
+ template <typename U> static double test(...);
+
+public:
+ static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
};
// Test if SequenceTraits<T> is defined on type T.
@@ -303,7 +355,7 @@ struct has_SequenceMethodTraits
static double test(...);
public:
- static bool const value = (sizeof(test<SequenceTraits<T> >(nullptr)) == 1);
+ static bool const value = (sizeof(test<SequenceTraits<T>>(nullptr)) == 1);
};
// has_FlowTraits<int> will cause an error with some compilers because
@@ -353,7 +405,7 @@ struct has_DocumentListTraits
static double test(...);
public:
- static bool const value = (sizeof(test<DocumentListTraits<T> >(nullptr))==1);
+ static bool const value = (sizeof(test<DocumentListTraits<T>>(nullptr))==1);
};
inline bool isNumber(StringRef S) {
@@ -432,29 +484,32 @@ inline bool needsQuotes(StringRef S) {
return false;
}
-template<typename T>
-struct missingTraits : public std::integral_constant<bool,
- !has_ScalarEnumerationTraits<T>::value
- && !has_ScalarBitSetTraits<T>::value
- && !has_ScalarTraits<T>::value
- && !has_BlockScalarTraits<T>::value
- && !has_MappingTraits<T>::value
- && !has_SequenceTraits<T>::value
- && !has_DocumentListTraits<T>::value > {};
-
-template<typename T>
-struct validatedMappingTraits : public std::integral_constant<bool,
- has_MappingTraits<T>::value
- && has_MappingValidateTraits<T>::value> {};
+template <typename T, typename Context>
+struct missingTraits
+ : public std::integral_constant<bool,
+ !has_ScalarEnumerationTraits<T>::value &&
+ !has_ScalarBitSetTraits<T>::value &&
+ !has_ScalarTraits<T>::value &&
+ !has_BlockScalarTraits<T>::value &&
+ !has_MappingTraits<T, Context>::value &&
+ !has_SequenceTraits<T>::value &&
+ !has_DocumentListTraits<T>::value> {};
+
+template <typename T, typename Context>
+struct validatedMappingTraits
+ : public std::integral_constant<
+ bool, has_MappingTraits<T, Context>::value &&
+ has_MappingValidateTraits<T, Context>::value> {};
+
+template <typename T, typename Context>
+struct unvalidatedMappingTraits
+ : public std::integral_constant<
+ bool, has_MappingTraits<T, Context>::value &&
+ !has_MappingValidateTraits<T, Context>::value> {};
-template<typename T>
-struct unvalidatedMappingTraits : public std::integral_constant<bool,
- has_MappingTraits<T>::value
- && !has_MappingValidateTraits<T>::value> {};
// Base class for Input and Output.
class IO {
public:
-
IO(void *Ctxt=nullptr);
virtual ~IO();
@@ -512,9 +567,10 @@ public:
template <typename FBT, typename T>
void enumFallback(T &Val) {
if (matchEnumFallback()) {
+ EmptyContext Context;
// FIXME: Force integral conversion to allow strong typedefs to convert.
FBT Res = static_cast<typename FBT::BaseType>(Val);
- yamlize(*this, Res, true);
+ yamlize(*this, Res, true, Context);
Val = static_cast<T>(static_cast<typename FBT::BaseType>(Res));
}
}
@@ -550,40 +606,58 @@ public:
void *getContext();
void setContext(void *);
- template <typename T>
- void mapRequired(const char* Key, T& Val) {
- this->processKey(Key, Val, true);
+ template <typename T> void mapRequired(const char *Key, T &Val) {
+ EmptyContext Ctx;
+ this->processKey(Key, Val, true, Ctx);
+ }
+ template <typename T, typename Context>
+ void mapRequired(const char *Key, T &Val, Context &Ctx) {
+ this->processKey(Key, Val, true, Ctx);
+ }
+
+ template <typename T> void mapOptional(const char *Key, T &Val) {
+ EmptyContext Ctx;
+ mapOptionalWithContext(Key, Val, Ctx);
}
template <typename T>
- typename std::enable_if<has_SequenceTraits<T>::value,void>::type
- mapOptional(const char* Key, T& Val) {
+ void mapOptional(const char *Key, T &Val, const T &Default) {
+ EmptyContext Ctx;
+ mapOptionalWithContext(Key, Val, Default, Ctx);
+ }
+
+ template <typename T, typename Context>
+ typename std::enable_if<has_SequenceTraits<T>::value, void>::type
+ mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) {
// omit key/value instead of outputting empty sequence
- if ( this->canElideEmptySequence() && !(Val.begin() != Val.end()) )
+ if (this->canElideEmptySequence() && !(Val.begin() != Val.end()))
return;
- this->processKey(Key, Val, false);
+ this->processKey(Key, Val, false, Ctx);
}
- template <typename T>
- void mapOptional(const char* Key, Optional<T> &Val) {
- processKeyWithDefault(Key, Val, Optional<T>(), /*Required=*/false);
+ template <typename T, typename Context>
+ void mapOptionalWithContext(const char *Key, Optional<T> &Val, Context &Ctx) {
+ this->processKeyWithDefault(Key, Val, Optional<T>(), /*Required=*/false,
+ Ctx);
}
- template <typename T>
- typename std::enable_if<!has_SequenceTraits<T>::value,void>::type
- mapOptional(const char* Key, T& Val) {
- this->processKey(Key, Val, false);
+ template <typename T, typename Context>
+ typename std::enable_if<!has_SequenceTraits<T>::value, void>::type
+ mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) {
+ this->processKey(Key, Val, false, Ctx);
}
- template <typename T>
- void mapOptional(const char* Key, T& Val, const T& Default) {
- this->processKeyWithDefault(Key, Val, Default, false);
+ template <typename T, typename Context>
+ void mapOptionalWithContext(const char *Key, T &Val, const T &Default,
+ Context &Ctx) {
+ this->processKeyWithDefault(Key, Val, Default, false, Ctx);
}
private:
- template <typename T>
+ template <typename T, typename Context>
void processKeyWithDefault(const char *Key, Optional<T> &Val,
- const Optional<T> &DefaultValue, bool Required) {
+ const Optional<T> &DefaultValue, bool Required,
+ Context &Ctx) {
assert(DefaultValue.hasValue() == false &&
"Optional<T> shouldn't have a value!");
void *SaveInfo;
@@ -593,7 +667,7 @@ private:
Val = T();
if (this->preflightKey(Key, Required, sameAsDefault, UseDefault,
SaveInfo)) {
- yamlize(*this, Val.getValue(), Required);
+ yamlize(*this, Val.getValue(), Required, Ctx);
this->postflightKey(SaveInfo);
} else {
if (UseDefault)
@@ -601,15 +675,15 @@ private:
}
}
- template <typename T>
- void processKeyWithDefault(const char *Key, T &Val, const T& DefaultValue,
- bool Required) {
+ template <typename T, typename Context>
+ void processKeyWithDefault(const char *Key, T &Val, const T &DefaultValue,
+ bool Required, Context &Ctx) {
void *SaveInfo;
bool UseDefault;
const bool sameAsDefault = outputting() && Val == DefaultValue;
if ( this->preflightKey(Key, Required, sameAsDefault, UseDefault,
SaveInfo) ) {
- yamlize(*this, Val, Required);
+ yamlize(*this, Val, Required, Ctx);
this->postflightKey(SaveInfo);
}
else {
@@ -618,12 +692,12 @@ private:
}
}
- template <typename T>
- void processKey(const char *Key, T &Val, bool Required) {
+ template <typename T, typename Context>
+ void processKey(const char *Key, T &Val, bool Required, Context &Ctx) {
void *SaveInfo;
bool UseDefault;
if ( this->preflightKey(Key, Required, false, UseDefault, SaveInfo) ) {
- yamlize(*this, Val, Required);
+ yamlize(*this, Val, Required, Ctx);
this->postflightKey(SaveInfo);
}
}
@@ -632,17 +706,30 @@ private:
void *Ctxt;
};
-template<typename T>
-typename std::enable_if<has_ScalarEnumerationTraits<T>::value,void>::type
-yamlize(IO &io, T &Val, bool) {
+namespace detail {
+
+template <typename T, typename Context>
+void doMapping(IO &io, T &Val, Context &Ctx) {
+ MappingContextTraits<T, Context>::mapping(io, Val, Ctx);
+}
+
+template <typename T> void doMapping(IO &io, T &Val, EmptyContext &Ctx) {
+ MappingTraits<T>::mapping(io, Val);
+}
+
+} // end namespace detail
+
+template <typename T>
+typename std::enable_if<has_ScalarEnumerationTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
io.beginEnumScalar();
ScalarEnumerationTraits<T>::enumeration(io, Val);
io.endEnumScalar();
}
-template<typename T>
-typename std::enable_if<has_ScalarBitSetTraits<T>::value,void>::type
-yamlize(IO &io, T &Val, bool) {
+template <typename T>
+typename std::enable_if<has_ScalarBitSetTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
bool DoClear;
if ( io.beginBitSetScalar(DoClear) ) {
if ( DoClear )
@@ -652,9 +739,9 @@ yamlize(IO &io, T &Val, bool) {
}
}
-template<typename T>
-typename std::enable_if<has_ScalarTraits<T>::value,void>::type
-yamlize(IO &io, T &Val, bool) {
+template <typename T>
+typename std::enable_if<has_ScalarTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
if ( io.outputting() ) {
std::string Storage;
llvm::raw_string_ostream Buffer(Storage);
@@ -674,7 +761,7 @@ yamlize(IO &io, T &Val, bool) {
template <typename T>
typename std::enable_if<has_BlockScalarTraits<T>::value, void>::type
-yamlize(IO &YamlIO, T &Val, bool) {
+yamlize(IO &YamlIO, T &Val, bool, EmptyContext &Ctx) {
if (YamlIO.outputting()) {
std::string Storage;
llvm::raw_string_ostream Buffer(Storage);
@@ -691,9 +778,9 @@ yamlize(IO &YamlIO, T &Val, bool) {
}
}
-template<typename T>
-typename std::enable_if<validatedMappingTraits<T>::value, void>::type
-yamlize(IO &io, T &Val, bool) {
+template <typename T, typename Context>
+typename std::enable_if<validatedMappingTraits<T, Context>::value, void>::type
+yamlize(IO &io, T &Val, bool, Context &Ctx) {
if (has_FlowTraits<MappingTraits<T>>::value)
io.beginFlowMapping();
else
@@ -705,7 +792,7 @@ yamlize(IO &io, T &Val, bool) {
assert(Err.empty() && "invalid struct trying to be written as yaml");
}
}
- MappingTraits<T>::mapping(io, Val);
+ detail::doMapping(io, Val, Ctx);
if (!io.outputting()) {
StringRef Err = MappingTraits<T>::validate(io, Val);
if (!Err.empty())
@@ -717,36 +804,36 @@ yamlize(IO &io, T &Val, bool) {
io.endMapping();
}
-template<typename T>
-typename std::enable_if<unvalidatedMappingTraits<T>::value, void>::type
-yamlize(IO &io, T &Val, bool) {
+template <typename T, typename Context>
+typename std::enable_if<unvalidatedMappingTraits<T, Context>::value, void>::type
+yamlize(IO &io, T &Val, bool, Context &Ctx) {
if (has_FlowTraits<MappingTraits<T>>::value) {
io.beginFlowMapping();
- MappingTraits<T>::mapping(io, Val);
+ detail::doMapping(io, Val, Ctx);
io.endFlowMapping();
} else {
io.beginMapping();
- MappingTraits<T>::mapping(io, Val);
+ detail::doMapping(io, Val, Ctx);
io.endMapping();
}
}
-template<typename T>
-typename std::enable_if<missingTraits<T>::value, void>::type
-yamlize(IO &io, T &Val, bool) {
+template <typename T>
+typename std::enable_if<missingTraits<T, EmptyContext>::value, void>::type
+yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
}
-template<typename T>
-typename std::enable_if<has_SequenceTraits<T>::value,void>::type
-yamlize(IO &io, T &Seq, bool) {
+template <typename T, typename Context>
+typename std::enable_if<has_SequenceTraits<T>::value, void>::type
+yamlize(IO &io, T &Seq, bool, Context &Ctx) {
if ( has_FlowTraits< SequenceTraits<T> >::value ) {
unsigned incnt = io.beginFlowSequence();
unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
for(unsigned i=0; i < count; ++i) {
void *SaveInfo;
if ( io.preflightFlowElement(i, SaveInfo) ) {
- yamlize(io, SequenceTraits<T>::element(io, Seq, i), true);
+ yamlize(io, SequenceTraits<T>::element(io, Seq, i), true, Ctx);
io.postflightFlowElement(SaveInfo);
}
}
@@ -758,7 +845,7 @@ yamlize(IO &io, T &Seq, bool) {
for(unsigned i=0; i < count; ++i) {
void *SaveInfo;
if ( io.preflightElement(i, SaveInfo) ) {
- yamlize(io, SequenceTraits<T>::element(io, Seq, i), true);
+ yamlize(io, SequenceTraits<T>::element(io, Seq, i), true, Ctx);
io.postflightElement(SaveInfo);
}
}
@@ -871,6 +958,7 @@ struct ScalarTraits<support::detail::packed_endian_specific_integral<
llvm::raw_ostream &Stream) {
ScalarTraits<value_type>::output(static_cast<value_type>(E), Ctx, Stream);
}
+
static StringRef input(StringRef Str, void *Ctx, endian_type &E) {
value_type V;
auto R = ScalarTraits<value_type>::input(Str, Ctx, V);
@@ -1010,9 +1098,11 @@ private:
class HNode {
virtual void anchor();
+
public:
HNode(Node *n) : _node(n) { }
- virtual ~HNode() { }
+ virtual ~HNode() = default;
+
static inline bool classof(const HNode *) { return true; }
Node *_node;
@@ -1020,16 +1110,20 @@ private:
class EmptyHNode : public HNode {
void anchor() override;
+
public:
EmptyHNode(Node *n) : HNode(n) { }
+
static inline bool classof(const HNode *n) {
return NullNode::classof(n->_node);
}
+
static inline bool classof(const EmptyHNode *) { return true; }
};
class ScalarHNode : public HNode {
void anchor() override;
+
public:
ScalarHNode(Node *n, StringRef s) : HNode(n), _value(s) { }
@@ -1039,7 +1133,9 @@ private:
return ScalarNode::classof(n->_node) ||
BlockScalarNode::classof(n->_node);
}
+
static inline bool classof(const ScalarHNode *) { return true; }
+
protected:
StringRef _value;
};
@@ -1053,6 +1149,7 @@ private:
static inline bool classof(const HNode *n) {
return MappingNode::classof(n->_node);
}
+
static inline bool classof(const MapHNode *) { return true; }
typedef llvm::StringMap<std::unique_ptr<HNode>> NameToNode;
@@ -1072,6 +1169,7 @@ private:
static inline bool classof(const HNode *n) {
return SequenceNode::classof(n->_node);
}
+
static inline bool classof(const SequenceHNode *) { return true; }
std::vector<std::unique_ptr<HNode>> Entries;
@@ -1138,7 +1236,7 @@ public:
void blockScalarString(StringRef &) override;
void setError(const Twine &message) override;
bool canElideEmptySequence() override;
-public:
+
// These are only used by operator<<. They could be private
// if that templated operator could be made a friend.
void beginDocuments();
@@ -1185,10 +1283,10 @@ private:
/// Based on BOOST_STRONG_TYPEDEF
#define LLVM_YAML_STRONG_TYPEDEF(_base, _type) \
struct _type { \
- _type() { } \
- _type(const _base v) : value(v) { } \
- _type(const _type &v) : value(v.value) {} \
- _type &operator=(const _type &rhs) { value = rhs.value; return *this; }\
+ _type() = default; \
+ _type(const _base v) : value(v) {} \
+ _type(const _type &v) = default; \
+ _type &operator=(const _type &rhs) = default; \
_type &operator=(const _base &rhs) { value = rhs; return *this; } \
operator const _base & () const { return value; } \
bool operator==(const _type &rhs) const { return value == rhs.value; } \
@@ -1241,8 +1339,9 @@ inline
typename std::enable_if<has_DocumentListTraits<T>::value, Input &>::type
operator>>(Input &yin, T &docList) {
int i = 0;
+ EmptyContext Ctx;
while ( yin.setCurrentDocument() ) {
- yamlize(yin, DocumentListTraits<T>::element(yin, docList, i), true);
+ yamlize(yin, DocumentListTraits<T>::element(yin, docList, i), true, Ctx);
if ( yin.error() )
return yin;
yin.nextDocument();
@@ -1253,11 +1352,12 @@ operator>>(Input &yin, T &docList) {
// Define non-member operator>> so that Input can stream in a map as a document.
template <typename T>
-inline
-typename std::enable_if<has_MappingTraits<T>::value, Input &>::type
+inline typename std::enable_if<has_MappingTraits<T, EmptyContext>::value,
+ Input &>::type
operator>>(Input &yin, T &docMap) {
+ EmptyContext Ctx;
yin.setCurrentDocument();
- yamlize(yin, docMap, true);
+ yamlize(yin, docMap, true, Ctx);
return yin;
}
@@ -1267,8 +1367,9 @@ template <typename T>
inline
typename std::enable_if<has_SequenceTraits<T>::value, Input &>::type
operator>>(Input &yin, T &docSeq) {
+ EmptyContext Ctx;
if (yin.setCurrentDocument())
- yamlize(yin, docSeq, true);
+ yamlize(yin, docSeq, true, Ctx);
return yin;
}
@@ -1277,15 +1378,16 @@ template <typename T>
inline
typename std::enable_if<has_BlockScalarTraits<T>::value, Input &>::type
operator>>(Input &In, T &Val) {
+ EmptyContext Ctx;
if (In.setCurrentDocument())
- yamlize(In, Val, true);
+ yamlize(In, Val, true, Ctx);
return In;
}
// Provide better error message about types missing a trait specialization
template <typename T>
-inline
-typename std::enable_if<missingTraits<T>::value, Input &>::type
+inline typename std::enable_if<missingTraits<T, EmptyContext>::value,
+ Input &>::type
operator>>(Input &yin, T &docSeq) {
char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
return yin;
@@ -1296,11 +1398,13 @@ template <typename T>
inline
typename std::enable_if<has_DocumentListTraits<T>::value, Output &>::type
operator<<(Output &yout, T &docList) {
+ EmptyContext Ctx;
yout.beginDocuments();
const size_t count = DocumentListTraits<T>::size(yout, docList);
for(size_t i=0; i < count; ++i) {
if ( yout.preflightDocument(i) ) {
- yamlize(yout, DocumentListTraits<T>::element(yout, docList, i), true);
+ yamlize(yout, DocumentListTraits<T>::element(yout, docList, i), true,
+ Ctx);
yout.postflightDocument();
}
}
@@ -1310,12 +1414,13 @@ operator<<(Output &yout, T &docList) {
// Define non-member operator<< so that Output can stream out a map.
template <typename T>
-inline
-typename std::enable_if<has_MappingTraits<T>::value, Output &>::type
+inline typename std::enable_if<has_MappingTraits<T, EmptyContext>::value,
+ Output &>::type
operator<<(Output &yout, T &map) {
+ EmptyContext Ctx;
yout.beginDocuments();
if ( yout.preflightDocument(0) ) {
- yamlize(yout, map, true);
+ yamlize(yout, map, true, Ctx);
yout.postflightDocument();
}
yout.endDocuments();
@@ -1327,9 +1432,10 @@ template <typename T>
inline
typename std::enable_if<has_SequenceTraits<T>::value, Output &>::type
operator<<(Output &yout, T &seq) {
+ EmptyContext Ctx;
yout.beginDocuments();
if ( yout.preflightDocument(0) ) {
- yamlize(yout, seq, true);
+ yamlize(yout, seq, true, Ctx);
yout.postflightDocument();
}
yout.endDocuments();
@@ -1341,9 +1447,10 @@ template <typename T>
inline
typename std::enable_if<has_BlockScalarTraits<T>::value, Output &>::type
operator<<(Output &Out, T &Val) {
+ EmptyContext Ctx;
Out.beginDocuments();
if (Out.preflightDocument(0)) {
- yamlize(Out, Val, true);
+ yamlize(Out, Val, true, Ctx);
Out.postflightDocument();
}
Out.endDocuments();
@@ -1352,73 +1459,75 @@ operator<<(Output &Out, T &Val) {
// Provide better error message about types missing a trait specialization
template <typename T>
-inline
-typename std::enable_if<missingTraits<T>::value, Output &>::type
+inline typename std::enable_if<missingTraits<T, EmptyContext>::value,
+ Output &>::type
operator<<(Output &yout, T &seq) {
char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
return yout;
}
-} // namespace yaml
-} // namespace llvm
+template <typename T> struct SequenceTraitsImpl {
+ typedef typename T::value_type _type;
+ static size_t size(IO &io, T &seq) { return seq.size(); }
+ static _type &element(IO &io, T &seq, size_t index) {
+ if (index >= seq.size())
+ seq.resize(index + 1);
+ return seq[index];
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
/// Utility for declaring that a std::vector of a particular type
/// should be considered a YAML sequence.
-#define LLVM_YAML_IS_SEQUENCE_VECTOR(_type) \
- namespace llvm { \
- namespace yaml { \
- template<> \
- struct SequenceTraits< std::vector<_type> > { \
- static size_t size(IO &io, std::vector<_type> &seq) { \
- return seq.size(); \
- } \
- static _type& element(IO &io, std::vector<_type> &seq, size_t index) {\
- if ( index >= seq.size() ) \
- seq.resize(index+1); \
- return seq[index]; \
- } \
- }; \
- } \
+#define LLVM_YAML_IS_SEQUENCE_VECTOR(_type) \
+ namespace llvm { \
+ namespace yaml { \
+ template <> \
+ struct SequenceTraits<std::vector<_type>> \
+ : public SequenceTraitsImpl<std::vector<_type>> {}; \
+ template <unsigned N> \
+ struct SequenceTraits<SmallVector<_type, N>> \
+ : public SequenceTraitsImpl<SmallVector<_type, N>> {}; \
+ } \
}
/// Utility for declaring that a std::vector of a particular type
/// should be considered a YAML flow sequence.
-#define LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(_type) \
- namespace llvm { \
- namespace yaml { \
- template<> \
- struct SequenceTraits< std::vector<_type> > { \
- static size_t size(IO &io, std::vector<_type> &seq) { \
- return seq.size(); \
- } \
- static _type& element(IO &io, std::vector<_type> &seq, size_t index) {\
- (void)flow; /* Remove this workaround after PR17897 is fixed */ \
- if ( index >= seq.size() ) \
- seq.resize(index+1); \
- return seq[index]; \
- } \
- static const bool flow = true; \
- }; \
- } \
+/// We need to do a partial specialization on the vector version, not a full.
+/// If this is a full specialization, the compiler is a bit too "smart" and
+/// decides to warn on -Wunused-const-variable. This workaround can be
+/// removed and we can do a full specialization on std::vector<T> once
+/// PR28878 is fixed.
+#define LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(_type) \
+ namespace llvm { \
+ namespace yaml { \
+ template <unsigned N> \
+ struct SequenceTraits<SmallVector<_type, N>> \
+ : public SequenceTraitsImpl<SmallVector<_type, N>> { \
+ static const bool flow = true; \
+ }; \
+ template <typename Allocator> \
+ struct SequenceTraits<std::vector<_type, Allocator>> \
+ : public SequenceTraitsImpl<std::vector<_type, Allocator>> { \
+ static const bool flow = true; \
+ }; \
+ } \
}
/// Utility for declaring that a std::vector of a particular type
/// should be considered a YAML document list.
-#define LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(_type) \
- namespace llvm { \
- namespace yaml { \
- template<> \
- struct DocumentListTraits< std::vector<_type> > { \
- static size_t size(IO &io, std::vector<_type> &seq) { \
- return seq.size(); \
- } \
- static _type& element(IO &io, std::vector<_type> &seq, size_t index) {\
- if ( index >= seq.size() ) \
- seq.resize(index+1); \
- return seq[index]; \
- } \
- }; \
- } \
+#define LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(_type) \
+ namespace llvm { \
+ namespace yaml { \
+ template <unsigned N> \
+ struct DocumentListTraits<SmallVector<_type, N>> \
+ : public SequenceTraitsImpl<SmallVector<_type, N>> {}; \
+ template <> \
+ struct DocumentListTraits<std::vector<_type>> \
+ : public SequenceTraitsImpl<std::vector<_type>> {}; \
+ } \
}
#endif // LLVM_SUPPORT_YAMLTRAITS_H
diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h
index d1e96f892a4b..e644a5bda5ef 100644
--- a/include/llvm/Support/raw_ostream.h
+++ b/include/llvm/Support/raw_ostream.h
@@ -16,20 +16,26 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <string>
#include <system_error>
namespace llvm {
+
+class formatv_object_base;
class format_object_base;
class FormattedString;
class FormattedNumber;
-template <typename T> class SmallVectorImpl;
+class FormattedBytes;
namespace sys {
namespace fs {
enum OpenFlags : unsigned;
-}
-}
+} // end namespace fs
+} // end namespace sys
/// This class implements an extremely fast bulk output stream that can *only*
/// output to a stream. It does not support seeking, reopening, rewinding, line
@@ -37,9 +43,6 @@ enum OpenFlags : unsigned;
/// a chunk at a time.
class raw_ostream {
private:
- void operator=(const raw_ostream &) = delete;
- raw_ostream(const raw_ostream &) = delete;
-
/// The buffer is handled in such a way that the buffer is
/// uninitialized, unbuffered, or out of space when OutBufCur >=
/// OutBufEnd. Thus a single comparison suffices to determine if we
@@ -69,7 +72,7 @@ private:
public:
// color order matches ANSI escape sequence, don't change
enum Colors {
- BLACK=0,
+ BLACK = 0,
RED,
GREEN,
YELLOW,
@@ -86,6 +89,9 @@ public:
OutBufStart = OutBufEnd = OutBufCur = nullptr;
}
+ raw_ostream(const raw_ostream &) = delete;
+ void operator=(const raw_ostream &) = delete;
+
virtual ~raw_ostream();
/// tell - Return the current offset with the file.
@@ -184,7 +190,7 @@ public:
return write(Str.data(), Str.length());
}
- raw_ostream &operator<<(const llvm::SmallVectorImpl<char> &Str) {
+ raw_ostream &operator<<(const SmallVectorImpl<char> &Str) {
return write(Str.data(), Str.size());
}
@@ -193,6 +199,7 @@ public:
raw_ostream &operator<<(unsigned long long N);
raw_ostream &operator<<(long long N);
raw_ostream &operator<<(const void *P);
+
raw_ostream &operator<<(unsigned int N) {
return this->operator<<(static_cast<unsigned long>(N));
}
@@ -222,6 +229,12 @@ public:
// Formatted output, see the formatHex() function in Support/Format.h.
raw_ostream &operator<<(const FormattedNumber &);
+ // Formatted output, see the formatv() function in Support/FormatVariadic.h.
+ raw_ostream &operator<<(const formatv_object_base &);
+
+ // Formatted output, see the format_bytes() function in Support/Format.h.
+ raw_ostream &operator<<(const FormattedBytes &);
+
/// indent - Insert 'NumSpaces' spaces.
raw_ostream &indent(unsigned NumSpaces);
@@ -493,7 +506,8 @@ public:
explicit raw_svector_ostream(SmallVectorImpl<char> &O) : OS(O) {
SetUnbuffered();
}
- ~raw_svector_ostream() override {}
+
+ ~raw_svector_ostream() override = default;
void flush() = delete;
@@ -512,7 +526,7 @@ class raw_null_ostream : public raw_pwrite_stream {
uint64_t current_pos() const override;
public:
- explicit raw_null_ostream() {}
+ explicit raw_null_ostream() = default;
~raw_null_ostream() override;
};
@@ -525,6 +539,6 @@ public:
~buffer_ostream() override { OS << str(); }
};
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_SUPPORT_RAW_OSTREAM_H
diff --git a/include/llvm/Support/xxhash.h b/include/llvm/Support/xxhash.h
new file mode 100644
index 000000000000..f7ca460188a2
--- /dev/null
+++ b/include/llvm/Support/xxhash.h
@@ -0,0 +1,47 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* based on revision d2df04efcbef7d7f6886d345861e5dfda4edacc1 Removed
+ * everything but a simple interface for computing XXh64. */
+
+#ifndef LLVM_SUPPORT_XXHASH_H
+#define LLVM_SUPPORT_XXHASH_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+uint64_t xxHash64(llvm::StringRef Data);
+}
+
+#endif
diff --git a/include/llvm/TableGen/Record.h b/include/llvm/TableGen/Record.h
index 393cafa7924a..5a100f0cba76 100644
--- a/include/llvm/TableGen/Record.h
+++ b/include/llvm/TableGen/Record.h
@@ -18,21 +18,31 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/TrailingObjects.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
namespace llvm {
class ListRecTy;
-struct MultiClass;
class Record;
-class RecordVal;
class RecordKeeper;
+class RecordVal;
+class StringInit;
+struct MultiClass;
//===----------------------------------------------------------------------===//
// Type Classes
@@ -54,13 +64,13 @@ public:
private:
RecTyKind Kind;
- std::unique_ptr<ListRecTy> ListTy;
+ ListRecTy *ListTy = nullptr;
public:
- RecTyKind getRecTyKind() const { return Kind; }
-
RecTy(RecTyKind K) : Kind(K) {}
- virtual ~RecTy() {}
+ virtual ~RecTy() = default;
+
+ RecTyKind getRecTyKind() const { return Kind; }
virtual std::string getAsString() const = 0;
void print(raw_ostream &OS) const { OS << getAsString(); }
@@ -83,6 +93,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
///
class BitRecTy : public RecTy {
static BitRecTy Shared;
+
BitRecTy() : RecTy(BitRecTyKind) {}
public:
@@ -101,6 +112,7 @@ public:
///
class BitsRecTy : public RecTy {
unsigned Size;
+
explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {}
public:
@@ -121,6 +133,7 @@ public:
///
class CodeRecTy : public RecTy {
static CodeRecTy Shared;
+
CodeRecTy() : RecTy(CodeRecTyKind) {}
public:
@@ -137,6 +150,7 @@ public:
///
class IntRecTy : public RecTy {
static IntRecTy Shared;
+
IntRecTy() : RecTy(IntRecTyKind) {}
public:
@@ -155,6 +169,7 @@ public:
///
class StringRecTy : public RecTy {
static StringRecTy Shared;
+
StringRecTy() : RecTy(StringRecTyKind) {}
public:
@@ -173,7 +188,9 @@ public:
///
class ListRecTy : public RecTy {
RecTy *Ty;
+
explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
+
friend ListRecTy *RecTy::getListTy();
public:
@@ -193,6 +210,7 @@ public:
///
class DagRecTy : public RecTy {
static DagRecTy Shared;
+
DagRecTy() : RecTy(DagRecTyKind) {}
public:
@@ -210,7 +228,9 @@ public:
///
class RecordRecTy : public RecTy {
Record *Rec;
+
explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {}
+
friend class Record;
public:
@@ -276,11 +296,11 @@ protected:
private:
const InitKind Kind;
+
protected:
uint8_t Opc; // Used by UnOpInit, BinOpInit, and TernOpInit
+
private:
- Init(const Init &) = delete;
- Init &operator=(const Init &) = delete;
virtual void anchor();
public:
@@ -290,7 +310,9 @@ protected:
explicit Init(InitKind K, uint8_t Opc = 0) : Kind(K), Opc(Opc) {}
public:
- virtual ~Init() {}
+ Init(const Init &) = delete;
+ Init &operator=(const Init &) = delete;
+ virtual ~Init() = default;
/// This virtual method should be overridden by values that may
/// not be completely specified yet.
@@ -320,8 +342,7 @@ public:
/// out, returning them as a new init of bits type. If it is not legal to use
/// the bit subscript operator on this initializer, return null.
///
- virtual Init *
- convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
+ virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
return nullptr;
}
@@ -330,8 +351,7 @@ public:
/// elements, returning them as a new init of list type. If it is not legal
/// to take a slice of this, return null.
///
- virtual Init *
- convertInitListSlice(const std::vector<unsigned> &Elements) const {
+ virtual Init *convertInitListSlice(ArrayRef<unsigned> Elements) const {
return nullptr;
}
@@ -339,7 +359,7 @@ public:
/// Implementors of this method should return the type of the named field if
/// they are of record type.
///
- virtual RecTy *getFieldType(const std::string &FieldName) const {
+ virtual RecTy *getFieldType(StringInit *FieldName) const {
return nullptr;
}
@@ -348,7 +368,7 @@ public:
/// this method should return non-null, otherwise it returns null.
///
virtual Init *getFieldInit(Record &R, const RecordVal *RV,
- const std::string &FieldName) const {
+ StringInit *FieldName) const {
return nullptr;
}
@@ -384,37 +404,31 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
class TypedInit : public Init {
RecTy *Ty;
- TypedInit(const TypedInit &Other) = delete;
- TypedInit &operator=(const TypedInit &Other) = delete;
-
protected:
explicit TypedInit(InitKind K, RecTy *T, uint8_t Opc = 0)
: Init(K, Opc), Ty(T) {}
- ~TypedInit() override {
- // If this is a DefInit we need to delete the RecordRecTy.
- if (getKind() == IK_DefInit)
- delete Ty;
- }
public:
+ TypedInit(const TypedInit &Other) = delete;
+ TypedInit &operator=(const TypedInit &Other) = delete;
+
static bool classof(const Init *I) {
return I->getKind() >= IK_FirstTypedInit &&
I->getKind() <= IK_LastTypedInit;
}
+
RecTy *getType() const { return Ty; }
Init *convertInitializerTo(RecTy *Ty) const override;
- Init *
- convertInitializerBitRange(const std::vector<unsigned> &Bits) const override;
- Init *
- convertInitListSlice(const std::vector<unsigned> &Elements) const override;
+ Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
+ Init *convertInitListSlice(ArrayRef<unsigned> Elements) const override;
/// This method is used to implement the FieldInit class.
/// Implementors of this method should return the type of the named field if
/// they are of record type.
///
- RecTy *getFieldType(const std::string &FieldName) const override;
+ RecTy *getFieldType(StringInit *FieldName) const override;
/// This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
@@ -427,13 +441,15 @@ public:
///
class UnsetInit : public Init {
UnsetInit() : Init(IK_UnsetInit) {}
+
+public:
UnsetInit(const UnsetInit &) = delete;
UnsetInit &operator=(const UnsetInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_UnsetInit;
}
+
static UnsetInit *get();
Init *convertInitializerTo(RecTy *Ty) const override;
@@ -452,13 +468,15 @@ class BitInit : public Init {
bool Value;
explicit BitInit(bool V) : Init(IK_BitInit), Value(V) {}
+
+public:
BitInit(const BitInit &Other) = delete;
BitInit &operator=(BitInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_BitInit;
}
+
static BitInit *get(bool V);
bool getValue() const { return Value; }
@@ -483,16 +501,17 @@ class BitsInit final : public TypedInit, public FoldingSetNode,
BitsInit(unsigned N)
: TypedInit(IK_BitsInit, BitsRecTy::get(N)), NumBits(N) {}
+public:
BitsInit(const BitsInit &Other) = delete;
BitsInit &operator=(const BitsInit &Other) = delete;
-public:
// Do not use sized deallocation due to trailing objects.
void operator delete(void *p) { ::operator delete(p); }
static bool classof(const Init *I) {
return I->getKind() == IK_BitsInit;
}
+
static BitsInit *get(ArrayRef<Init *> Range);
void Profile(FoldingSetNodeID &ID) const;
@@ -500,19 +519,20 @@ public:
unsigned getNumBits() const { return NumBits; }
Init *convertInitializerTo(RecTy *Ty) const override;
- Init *
- convertInitializerBitRange(const std::vector<unsigned> &Bits) const override;
+ Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
bool isComplete() const override {
for (unsigned i = 0; i != getNumBits(); ++i)
if (!getBit(i)->isComplete()) return false;
return true;
}
+
bool allInComplete() const {
for (unsigned i = 0; i != getNumBits(); ++i)
if (getBit(i)->isComplete()) return false;
return true;
}
+
std::string getAsString() const override;
/// This method is used to implement
@@ -539,20 +559,20 @@ class IntInit : public TypedInit {
explicit IntInit(int64_t V)
: TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {}
+public:
IntInit(const IntInit &Other) = delete;
IntInit &operator=(const IntInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_IntInit;
}
+
static IntInit *get(int64_t V);
int64_t getValue() const { return Value; }
Init *convertInitializerTo(RecTy *Ty) const override;
- Init *
- convertInitializerBitRange(const std::vector<unsigned> &Bits) const override;
+ Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;
std::string getAsString() const override;
@@ -572,25 +592,26 @@ public:
/// "foo" - Represent an initialization by a string value.
///
class StringInit : public TypedInit {
- std::string Value;
+ StringRef Value;
explicit StringInit(StringRef V)
: TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {}
+public:
StringInit(const StringInit &Other) = delete;
StringInit &operator=(const StringInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_StringInit;
}
+
static StringInit *get(StringRef);
- const std::string &getValue() const { return Value; }
+ StringRef getValue() const { return Value; }
Init *convertInitializerTo(RecTy *Ty) const override;
- std::string getAsString() const override { return "\"" + Value + "\""; }
+ std::string getAsString() const override { return "\"" + Value.str() + "\""; }
std::string getAsUnquotedString() const override { return Value; }
@@ -608,27 +629,28 @@ public:
};
class CodeInit : public TypedInit {
- std::string Value;
+ StringRef Value;
explicit CodeInit(StringRef V)
: TypedInit(IK_CodeInit, static_cast<RecTy *>(CodeRecTy::get())),
Value(V) {}
+public:
CodeInit(const StringInit &Other) = delete;
CodeInit &operator=(const StringInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_CodeInit;
}
+
static CodeInit *get(StringRef);
- const std::string &getValue() const { return Value; }
+ StringRef getValue() const { return Value; }
Init *convertInitializerTo(RecTy *Ty) const override;
std::string getAsString() const override {
- return "[{" + Value + "}]";
+ return "[{" + Value.str() + "}]";
}
std::string getAsUnquotedString() const override { return Value; }
@@ -659,10 +681,10 @@ private:
explicit ListInit(unsigned N, RecTy *EltTy)
: TypedInit(IK_ListInit, ListRecTy::get(EltTy)), NumValues(N) {}
+public:
ListInit(const ListInit &Other) = delete;
ListInit &operator=(const ListInit &Other) = delete;
-public:
// Do not use sized deallocation due to trailing objects.
void operator delete(void *p) { ::operator delete(p); }
@@ -680,8 +702,7 @@ public:
Record *getElementAsRecord(unsigned i) const;
- Init *
- convertInitListSlice(const std::vector<unsigned> &Elements) const override;
+ Init *convertInitListSlice(ArrayRef<unsigned> Elements) const override;
Init *convertInitializerTo(RecTy *Ty) const override;
@@ -718,20 +739,21 @@ public:
/// Base class for operators
///
class OpInit : public TypedInit {
- OpInit(const OpInit &Other) = delete;
- OpInit &operator=(OpInit &Other) = delete;
-
protected:
explicit OpInit(InitKind K, RecTy *Type, uint8_t Opc)
: TypedInit(K, Type, Opc) {}
public:
+ OpInit(const OpInit &Other) = delete;
+ OpInit &operator=(OpInit &Other) = delete;
+
static bool classof(const Init *I) {
return I->getKind() >= IK_FirstOpInit &&
I->getKind() <= IK_LastOpInit;
}
+
// Clone - Clone this operator, replacing arguments with the new list
- virtual OpInit *clone(std::vector<Init *> &Operands) const = 0;
+ virtual OpInit *clone(ArrayRef<Init *> Operands) const = 0;
virtual unsigned getNumOperands() const = 0;
virtual Init *getOperand(unsigned i) const = 0;
@@ -758,25 +780,27 @@ private:
UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
: OpInit(IK_UnOpInit, Type, opc), LHS(lhs) {}
+public:
UnOpInit(const UnOpInit &Other) = delete;
UnOpInit &operator=(const UnOpInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_UnOpInit;
}
+
static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type);
void Profile(FoldingSetNodeID &ID) const;
// Clone - Clone this operator, replacing arguments with the new list
- OpInit *clone(std::vector<Init *> &Operands) const override {
+ OpInit *clone(ArrayRef<Init *> Operands) const override {
assert(Operands.size() == 1 &&
"Wrong number of operands for unary operation");
return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
}
unsigned getNumOperands() const override { return 1; }
+
Init *getOperand(unsigned i) const override {
assert(i == 0 && "Invalid operand id for unary operator");
return getOperand();
@@ -798,7 +822,7 @@ public:
///
class BinOpInit : public OpInit, public FoldingSetNode {
public:
- enum BinaryOp : uint8_t { ADD, AND, SHL, SRA, SRL, LISTCONCAT,
+ enum BinaryOp : uint8_t { ADD, AND, OR, SHL, SRA, SRL, LISTCONCAT,
STRCONCAT, CONCAT, EQ };
private:
@@ -807,20 +831,21 @@ private:
BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
OpInit(IK_BinOpInit, Type, opc), LHS(lhs), RHS(rhs) {}
+public:
BinOpInit(const BinOpInit &Other) = delete;
BinOpInit &operator=(const BinOpInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_BinOpInit;
}
+
static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs,
RecTy *Type);
void Profile(FoldingSetNodeID &ID) const;
// Clone - Clone this operator, replacing arguments with the new list
- OpInit *clone(std::vector<Init *> &Operands) const override {
+ OpInit *clone(ArrayRef<Init *> Operands) const override {
assert(Operands.size() == 2 &&
"Wrong number of operands for binary operation");
return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
@@ -861,13 +886,14 @@ private:
RecTy *Type) :
OpInit(IK_TernOpInit, Type, opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
+public:
TernOpInit(const TernOpInit &Other) = delete;
TernOpInit &operator=(const TernOpInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_TernOpInit;
}
+
static TernOpInit *get(TernaryOp opc, Init *lhs,
Init *mhs, Init *rhs,
RecTy *Type);
@@ -875,7 +901,7 @@ public:
void Profile(FoldingSetNodeID &ID) const;
// Clone - Clone this operator, replacing arguments with the new list
- OpInit *clone(std::vector<Init *> &Operands) const override {
+ OpInit *clone(ArrayRef<Init *> Operands) const override {
assert(Operands.size() == 3 &&
"Wrong number of operands for ternary operation");
return TernOpInit::get(getOpcode(), Operands[0], Operands[1], Operands[2],
@@ -916,18 +942,20 @@ class VarInit : public TypedInit {
explicit VarInit(Init *VN, RecTy *T)
: TypedInit(IK_VarInit, T), VarName(VN) {}
+public:
VarInit(const VarInit &Other) = delete;
VarInit &operator=(const VarInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_VarInit;
}
- static VarInit *get(const std::string &VN, RecTy *T);
+
+ static VarInit *get(StringRef VN, RecTy *T);
static VarInit *get(Init *VN, RecTy *T);
- const std::string &getName() const;
+ StringRef getName() const;
Init *getNameInit() const { return VarName; }
+
std::string getNameInitAsString() const {
return getNameInit()->getAsUnquotedString();
}
@@ -935,9 +963,9 @@ public:
Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const override;
- RecTy *getFieldType(const std::string &FieldName) const override;
+ RecTy *getFieldType(StringInit *FieldName) const override;
Init *getFieldInit(Record &R, const RecordVal *RV,
- const std::string &FieldName) const override;
+ StringInit *FieldName) const override;
/// This method is used by classes that refer to other
/// variables which may not be defined at the time they expression is formed.
@@ -965,13 +993,14 @@ class VarBitInit : public Init {
"Illegal VarBitInit expression!");
}
+public:
VarBitInit(const VarBitInit &Other) = delete;
VarBitInit &operator=(const VarBitInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_VarBitInit;
}
+
static VarBitInit *get(TypedInit *T, unsigned B);
Init *convertInitializerTo(RecTy *Ty) const override;
@@ -1002,13 +1031,14 @@ class VarListElementInit : public TypedInit {
"Illegal VarBitInit expression!");
}
+public:
VarListElementInit(const VarListElementInit &Other) = delete;
void operator=(const VarListElementInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_VarListElementInit;
}
+
static VarListElementInit *get(TypedInit *T, unsigned E);
TypedInit *getVariable() const { return TI; }
@@ -1032,26 +1062,28 @@ class DefInit : public TypedInit {
Record *Def;
DefInit(Record *D, RecordRecTy *T) : TypedInit(IK_DefInit, T), Def(D) {}
+
friend class Record;
+public:
DefInit(const DefInit &Other) = delete;
DefInit &operator=(const DefInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_DefInit;
}
+
static DefInit *get(Record*);
Init *convertInitializerTo(RecTy *Ty) const override;
Record *getDef() const { return Def; }
- //virtual Init *convertInitializerBitRange(const std::vector<unsigned> &Bits);
+ //virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits);
- RecTy *getFieldType(const std::string &FieldName) const override;
+ RecTy *getFieldType(StringInit *FieldName) const override;
Init *getFieldInit(Record &R, const RecordVal *RV,
- const std::string &FieldName) const override;
+ StringInit *FieldName) const override;
std::string getAsString() const override;
@@ -1072,21 +1104,22 @@ public:
///
class FieldInit : public TypedInit {
Init *Rec; // Record we are referring to
- std::string FieldName; // Field we are accessing
+ StringInit *FieldName; // Field we are accessing
- FieldInit(Init *R, const std::string &FN)
+ FieldInit(Init *R, StringInit *FN)
: TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
assert(getType() && "FieldInit with non-record type!");
}
+public:
FieldInit(const FieldInit &Other) = delete;
FieldInit &operator=(const FieldInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_FieldInit;
}
- static FieldInit *get(Init *R, const std::string &FN);
+
+ static FieldInit *get(Init *R, StringInit *FN);
Init *getBit(unsigned Bit) const override;
@@ -1096,7 +1129,7 @@ public:
Init *resolveReferences(Record &R, const RecordVal *RV) const override;
std::string getAsString() const override {
- return Rec->getAsString() + "." + FieldName;
+ return Rec->getAsString() + "." + FieldName->getValue().str();
}
};
@@ -1106,30 +1139,28 @@ public:
///
class DagInit : public TypedInit, public FoldingSetNode {
Init *Val;
- std::string ValName;
- std::vector<Init*> Args;
- std::vector<std::string> ArgNames;
+ StringInit *ValName;
+ SmallVector<Init*, 4> Args;
+ SmallVector<StringInit*, 4> ArgNames;
- DagInit(Init *V, const std::string &VN,
- ArrayRef<Init *> ArgRange,
- ArrayRef<std::string> NameRange)
+ DagInit(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
+ ArrayRef<StringInit *> NameRange)
: TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
Args(ArgRange.begin(), ArgRange.end()),
ArgNames(NameRange.begin(), NameRange.end()) {}
+public:
DagInit(const DagInit &Other) = delete;
DagInit &operator=(const DagInit &Other) = delete;
-public:
static bool classof(const Init *I) {
return I->getKind() == IK_DagInit;
}
- static DagInit *get(Init *V, const std::string &VN,
- ArrayRef<Init *> ArgRange,
- ArrayRef<std::string> NameRange);
- static DagInit *get(Init *V, const std::string &VN,
- const std::vector<
- std::pair<Init*, std::string> > &args);
+
+ static DagInit *get(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
+ ArrayRef<StringInit*> NameRange);
+ static DagInit *get(Init *V, StringInit *VN,
+ ArrayRef<std::pair<Init*, StringInit*>> Args);
void Profile(FoldingSetNodeID &ID) const;
@@ -1137,24 +1168,31 @@ public:
Init *getOperator() const { return Val; }
- const std::string &getName() const { return ValName; }
+ StringInit *getName() const { return ValName; }
+ StringRef getNameStr() const {
+ return ValName ? ValName->getValue() : StringRef();
+ }
unsigned getNumArgs() const { return Args.size(); }
Init *getArg(unsigned Num) const {
assert(Num < Args.size() && "Arg number out of range!");
return Args[Num];
}
- const std::string &getArgName(unsigned Num) const {
+ StringInit *getArgName(unsigned Num) const {
assert(Num < ArgNames.size() && "Arg number out of range!");
return ArgNames[Num];
}
+ StringRef getArgNameStr(unsigned Num) const {
+ StringInit *Init = getArgName(Num);
+ return Init ? Init->getValue() : StringRef();
+ }
Init *resolveReferences(Record &R, const RecordVal *RV) const override;
std::string getAsString() const override;
- typedef std::vector<Init*>::const_iterator const_arg_iterator;
- typedef std::vector<std::string>::const_iterator const_name_iterator;
+ typedef SmallVectorImpl<Init*>::const_iterator const_arg_iterator;
+ typedef SmallVectorImpl<StringInit*>::const_iterator const_name_iterator;
inline const_arg_iterator arg_begin() const { return Args.begin(); }
inline const_arg_iterator arg_end () const { return Args.end(); }
@@ -1183,27 +1221,29 @@ public:
//===----------------------------------------------------------------------===//
class RecordVal {
- PointerIntPair<Init *, 1, bool> NameAndPrefix;
- RecTy *Ty;
+ friend class Record;
+ Init *Name;
+ PointerIntPair<RecTy *, 1, bool> TyAndPrefix;
Init *Value;
public:
RecordVal(Init *N, RecTy *T, bool P);
- RecordVal(const std::string &N, RecTy *T, bool P);
+ RecordVal(StringRef N, RecTy *T, bool P);
+
+ StringRef getName() const;
+ Init *getNameInit() const { return Name; }
- const std::string &getName() const;
- const Init *getNameInit() const { return NameAndPrefix.getPointer(); }
std::string getNameInitAsString() const {
return getNameInit()->getAsUnquotedString();
}
- bool getPrefix() const { return NameAndPrefix.getInt(); }
- RecTy *getType() const { return Ty; }
+ bool getPrefix() const { return TyAndPrefix.getInt(); }
+ RecTy *getType() const { return TyAndPrefix.getPointer(); }
Init *getValue() const { return Value; }
bool setValue(Init *V) {
if (V) {
- Value = V->convertInitializerTo(Ty);
+ Value = V->convertInitializerTo(getType());
return Value == nullptr;
}
Value = nullptr;
@@ -1233,7 +1273,7 @@ class Record {
// Tracks Record instances. Not owned by Record.
RecordKeeper &TrackedRecords;
- std::unique_ptr<DefInit> TheInit;
+ DefInit *TheInit = nullptr;
// Unique record ID.
unsigned ID;
@@ -1263,10 +1303,10 @@ public:
ID(LastID++), IsAnonymous(Anonymous), ResolveFirst(false) {
init();
}
- explicit Record(const std::string &N, ArrayRef<SMLoc> locs,
- RecordKeeper &records, bool Anonymous = false)
- : Record(StringInit::get(N), locs, records, Anonymous) {}
+ explicit Record(StringRef N, ArrayRef<SMLoc> locs, RecordKeeper &records,
+ bool Anonymous = false)
+ : Record(StringInit::get(N), locs, records, Anonymous) {}
// When copy-constructing a Record, we must still guarantee a globally unique
// ID number. Don't copy TheInit either since it's owned by the original
@@ -1281,16 +1321,17 @@ public:
unsigned getID() const { return ID; }
- const std::string &getName() const;
+ StringRef getName() const;
Init *getNameInit() const {
return Name;
}
+
const std::string getNameInitAsString() const {
return getNameInit()->getAsUnquotedString();
}
- void setName(Init *Name); // Also updates RecordKeeper.
- void setName(const std::string &Name); // Also updates RecordKeeper.
+ void setName(Init *Name); // Also updates RecordKeeper.
+ void setName(StringRef Name); // Also updates RecordKeeper.
ArrayRef<SMLoc> getLoc() const { return Locs; }
@@ -1300,7 +1341,9 @@ public:
ArrayRef<Init *> getTemplateArgs() const {
return TemplateArgs;
}
+
ArrayRef<RecordVal> getValues() const { return Values; }
+
ArrayRef<std::pair<Record *, SMRange>> getSuperClasses() const {
return SuperClasses;
}
@@ -1310,23 +1353,27 @@ public:
if (TA == Name) return true;
return false;
}
+
bool isTemplateArg(StringRef Name) const {
return isTemplateArg(StringInit::get(Name));
}
const RecordVal *getValue(const Init *Name) const {
for (const RecordVal &Val : Values)
- if (Val.getNameInit() == Name) return &Val;
+ if (Val.Name == Name) return &Val;
return nullptr;
}
+
const RecordVal *getValue(StringRef Name) const {
return getValue(StringInit::get(Name));
}
+
RecordVal *getValue(const Init *Name) {
for (RecordVal &Val : Values)
- if (Val.getNameInit() == Name) return &Val;
+ if (Val.Name == Name) return &Val;
return nullptr;
}
+
RecordVal *getValue(StringRef Name) {
return getValue(StringInit::get(Name));
}
@@ -1335,6 +1382,7 @@ public:
assert(!isTemplateArg(Name) && "Template arg already defined!");
TemplateArgs.push_back(Name);
}
+
void addTemplateArg(StringRef Name) {
addTemplateArg(StringInit::get(Name));
}
@@ -1506,7 +1554,7 @@ struct MultiClass {
void dump() const;
- MultiClass(const std::string &Name, SMLoc Loc, RecordKeeper &Records) :
+ MultiClass(StringRef Name, SMLoc Loc, RecordKeeper &Records) :
Rec(Name, Loc, Records) {}
};
@@ -1518,20 +1566,23 @@ public:
const RecordMap &getClasses() const { return Classes; }
const RecordMap &getDefs() const { return Defs; }
- Record *getClass(const std::string &Name) const {
+ Record *getClass(StringRef Name) const {
auto I = Classes.find(Name);
return I == Classes.end() ? nullptr : I->second.get();
}
- Record *getDef(const std::string &Name) const {
+
+ Record *getDef(StringRef Name) const {
auto I = Defs.find(Name);
return I == Defs.end() ? nullptr : I->second.get();
}
+
void addClass(std::unique_ptr<Record> R) {
bool Ins = Classes.insert(std::make_pair(R->getName(),
std::move(R))).second;
(void)Ins;
assert(Ins && "Class already exists");
}
+
void addDef(std::unique_ptr<Record> R) {
bool Ins = Defs.insert(std::make_pair(R->getName(),
std::move(R))).second;
@@ -1545,8 +1596,7 @@ public:
/// This method returns all concrete definitions
/// that derive from the specified class name. A class with the specified
/// name must exist.
- std::vector<Record *>
- getAllDerivedDefinitions(const std::string &ClassName) const;
+ std::vector<Record *> getAllDerivedDefinitions(StringRef ClassName) const;
void dump() const;
};
@@ -1662,13 +1712,8 @@ raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);
/// Return an Init with a qualifier prefix referring
/// to CurRec's name.
Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
- Init *Name, const std::string &Scoper);
-
-/// Return an Init with a qualifier prefix referring
-/// to CurRec's name.
-Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
- const std::string &Name, const std::string &Scoper);
+ Init *Name, StringRef Scoper);
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_TABLEGEN_RECORD_H
diff --git a/include/llvm/TableGen/SetTheory.h b/include/llvm/TableGen/SetTheory.h
index d4e0f53d3efa..818b0549b66a 100644
--- a/include/llvm/TableGen/SetTheory.h
+++ b/include/llvm/TableGen/SetTheory.h
@@ -50,8 +50,10 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SMLoc.h"
#include <map>
+#include <memory>
#include <vector>
namespace llvm {
@@ -68,13 +70,14 @@ public:
/// Operator - A callback representing a DAG operator.
class Operator {
virtual void anchor();
+
public:
- virtual ~Operator() {}
+ virtual ~Operator() = default;
/// apply - Apply this operator to Expr's arguments and insert the result
/// in Elts.
virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts,
- ArrayRef<SMLoc> Loc) =0;
+ ArrayRef<SMLoc> Loc) = 0;
};
/// Expander - A callback function that can transform a Record representing a
@@ -82,10 +85,11 @@ public:
/// users to define named sets that can be used in DAG expressions.
class Expander {
virtual void anchor();
+
public:
- virtual ~Expander() {}
+ virtual ~Expander() = default;
- virtual void expand(SetTheory&, Record*, RecSet &Elts) =0;
+ virtual void expand(SetTheory&, Record*, RecSet &Elts) = 0;
};
private:
@@ -138,5 +142,4 @@ public:
} // end namespace llvm
-#endif
-
+#endif // LLVM_TABLEGEN_SETTHEORY_H
diff --git a/include/llvm/Target/CostTable.h b/include/llvm/Target/CostTable.h
index 2499f5c3189c..b7d9240a91f5 100644
--- a/include/llvm/Target/CostTable.h
+++ b/include/llvm/Target/CostTable.h
@@ -16,6 +16,7 @@
#define LLVM_TARGET_COSTTABLE_H_
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineValueType.h"
namespace llvm {
@@ -30,9 +31,9 @@ struct CostTblEntry {
/// Find in cost table, TypeTy must be comparable to CompareTy by ==
inline const CostTblEntry *CostTableLookup(ArrayRef<CostTblEntry> Tbl,
int ISD, MVT Ty) {
- auto I = std::find_if(Tbl.begin(), Tbl.end(),
- [=](const CostTblEntry &Entry) {
- return ISD == Entry.ISD && Ty == Entry.Type; });
+ auto I = find_if(Tbl, [=](const CostTblEntry &Entry) {
+ return ISD == Entry.ISD && Ty == Entry.Type;
+ });
if (I != Tbl.end())
return I;
@@ -53,11 +54,9 @@ struct TypeConversionCostTblEntry {
inline const TypeConversionCostTblEntry *
ConvertCostTableLookup(ArrayRef<TypeConversionCostTblEntry> Tbl,
int ISD, MVT Dst, MVT Src) {
- auto I = std::find_if(Tbl.begin(), Tbl.end(),
- [=](const TypeConversionCostTblEntry &Entry) {
- return ISD == Entry.ISD && Src == Entry.Src &&
- Dst == Entry.Dst;
- });
+ auto I = find_if(Tbl, [=](const TypeConversionCostTblEntry &Entry) {
+ return ISD == Entry.ISD && Src == Entry.Src && Dst == Entry.Dst;
+ });
if (I != Tbl.end())
return I;
diff --git a/include/llvm/Target/GenericOpcodes.td b/include/llvm/Target/GenericOpcodes.td
index b4d95508f0a5..8694eb5797d0 100644
--- a/include/llvm/Target/GenericOpcodes.td
+++ b/include/llvm/Target/GenericOpcodes.td
@@ -13,27 +13,419 @@
//===----------------------------------------------------------------------===//
//------------------------------------------------------------------------------
+// Unary ops.
+//------------------------------------------------------------------------------
+
+// Extend the underlying scalar type of an operation, leaving the high bits
+// unspecified.
+def G_ANYEXT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+// Sign extend the underlying scalar type of an operation, copying the sign bit
+// into the newly-created space.
+def G_SEXT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+// Zero extend the underlying scalar type of an operation, putting zero bits
+// into the newly-created space.
+def G_ZEXT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+
+// Truncate the underlying scalar type of an operation. This is equivalent to
+// G_EXTRACT for scalar types, but acts elementwise on vectors.
+def G_TRUNC : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_FRAME_INDEX : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$src2);
+ let hasSideEffects = 0;
+}
+
+def G_GLOBAL_VALUE : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$src);
+ let hasSideEffects = 0;
+}
+
+def G_INTTOPTR : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_PTRTOINT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_BITCAST : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_CONSTANT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$imm);
+ let hasSideEffects = 0;
+}
+
+def G_FCONSTANT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$imm);
+ let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
// Binary ops.
//------------------------------------------------------------------------------
+
// Generic addition.
def G_ADD : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic pointer offset.
+def G_GEP : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type1:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic subtraction.
+def G_SUB : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 0;
+}
+
+// Generic multiplication.
+def G_MUL : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic signed division.
+def G_SDIV : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 0;
+}
+
+// Generic unsigned division.
+def G_UDIV : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 0;
+}
+
+// Generic signed remainder.
+def G_SREM : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 0;
+}
+
+// Generic unsigned remainder.
+def G_UREM : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 0;
+}
+
+// Generic bitwise and.
+def G_AND : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic bitwise or.
def G_OR : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic bitwise xor.
+def G_XOR : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic left-shift.
+def G_SHL : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic logical right-shift.
+def G_LSHR : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic arithmetic right-shift.
+def G_ASHR : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic integer comparison.
+def G_ICMP : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic floating-point comparison.
+def G_FCMP : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic select
+def G_SELECT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$tst, type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Overflow ops
+//------------------------------------------------------------------------------
+
+// Generic unsigned addition consuming and producing a carry flag.
+def G_UADDE : Instruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
+ let hasSideEffects = 0;
+}
+
+// Generic signed addition producing a carry flag.
+def G_SADDO : Instruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic unsigned subtraction consuming and producing a carry flag.
+def G_USUBE : Instruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
+ let hasSideEffects = 0;
+}
+
+// Generic unsigned subtraction producing a carry flag.
+def G_SSUBO : Instruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic unsigned multiplication producing a carry flag.
+def G_UMULO : Instruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic signed multiplication producing a carry flag.
+def G_SMULO : Instruction {
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+//------------------------------------------------------------------------------
+// Floating Point Unary Ops.
+//------------------------------------------------------------------------------
+
+def G_FPEXT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_FPTRUNC : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_FPTOSI : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_FPTOUI : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_SITOFP : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+def G_UITOFP : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
+ let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Floating Point Binary ops.
+//------------------------------------------------------------------------------
+
+// Generic FP addition.
+def G_FADD : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 1;
+}
+
+// Generic FP subtraction.
+def G_FSUB : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+ let isCommutable = 0;
+}
+
+// Generic FP multiplication.
+def G_FMUL : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
+// Generic FP division.
+def G_FDIV : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+// Generic FP remainder.
+def G_FREM : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
+//------------------------------------------------------------------------------
+// Memory ops
+//------------------------------------------------------------------------------
+
+// Generic load. Expects a MachineMemOperand in addition to explicit operands.
+def G_LOAD : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$addr);
+ let hasSideEffects = 0;
+ let mayLoad = 1;
+}
+
+// Generic store. Expects a MachineMemOperand in addition to explicit operands.
+def G_STORE : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins type0:$src, type1:$addr);
+ let hasSideEffects = 0;
+ let mayStore = 1;
+}
+
+//------------------------------------------------------------------------------
+// Variadic ops
+//------------------------------------------------------------------------------
+
+// Extract multiple registers specified size, starting from blocks given by
+// indexes. This will almost certainly be mapped to sub-register COPYs after
+// register banks have been selected.
+def G_EXTRACT : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let hasSideEffects = 0;
+}
+
+// Insert a sequence of smaller registers into a larger one at the specified
+// indices (interleaved with the values in the operand list "op0, bit0, op1,
+// bit1, ...")).
+def G_INSERT : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src, variable_ops);
+ let hasSideEffects = 0;
+}
+
+// Combine a sequence of generic vregs into a single larger value (starting at
+// bit 0). Essentially a G_INSERT where $src is an IMPLICIT_DEF, but it's so
+// important to legalization it probably deserves its own instruction.
+def G_SEQUENCE : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins variable_ops);
+ let hasSideEffects = 0;
+}
+
+// Intrinsic without side effects.
+def G_INTRINSIC : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins unknown:$intrin, variable_ops);
+ let hasSideEffects = 0;
+}
+
+// Intrinsic with side effects.
+def G_INTRINSIC_W_SIDE_EFFECTS : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins unknown:$intrin, variable_ops);
+ let hasSideEffects = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
//------------------------------------------------------------------------------
// Branches.
//------------------------------------------------------------------------------
+
// Generic unconditional branch.
def G_BR : Instruction {
let OutOperandList = (outs);
@@ -41,6 +433,16 @@ def G_BR : Instruction {
let hasSideEffects = 0;
let isBranch = 1;
let isTerminator = 1;
+ let isBarrier = 1;
+}
+
+// Generic conditional branch.
+def G_BRCOND : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins type0:$tst, unknown:$truebb);
+ let hasSideEffects = 0;
+ let isBranch = 1;
+ let isTerminator = 1;
}
// TODO: Add the other generic opcodes.
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index c71435a2564c..729d7669e0fa 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -141,7 +141,10 @@ class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
// of Operand's that are legal as type qualifiers in DAG patterns. This should
// only ever be used for defining multiclasses that are polymorphic over both
// RegisterClass's and other Operand's.
-class DAGOperand { }
+class DAGOperand {
+ string OperandNamespace = "MCOI";
+ string DecoderMethod = "";
+}
// RegisterClass - Now that all of the registers are defined, and aliases
// between registers are defined, specify which registers belong to which
@@ -368,6 +371,7 @@ class Instruction {
bit isSelect = 0; // Is this instruction a select instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
+ bit isAdd = 0; // Is this instruction an add instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
bit mayLoad = ?; // Is it possible for this inst to read memory?
bit mayStore = ?; // Is it possible for this inst to write memory?
@@ -485,6 +489,12 @@ class Instruction {
/// constraint. For example, "$Rn = $Rd".
string TwoOperandAliasConstraint = "";
+ /// Assembler variant name to use for this instruction. If specified then
+ /// instruction will be presented only in MatchTable for this variant. If
+ /// not specified then assembler variants will be determined based on
+ /// AsmString
+ string AsmVariantName = "";
+
///@}
/// UseNamedOperandTable - If set, the operand indices of this instruction
@@ -639,9 +649,7 @@ class Operand<ValueType ty> : DAGOperand {
ValueType Type = ty;
string PrintMethod = "printOperand";
string EncoderMethod = "";
- string DecoderMethod = "";
bit hasCompleteDecoder = 1;
- string OperandNamespace = "MCOI";
string OperandType = "OPERAND_UNKNOWN";
dag MIOperandInfo = (ops);
@@ -679,7 +687,6 @@ class RegisterOperand<RegisterClass regclass, string pm = "printOperand">
// should declare the other operand as one of its super classes.
AsmOperandClass ParserMatchClass;
- string OperandNamespace = "MCOI";
string OperandType = "OPERAND_REGISTER";
}
@@ -694,6 +701,20 @@ def f32imm : Operand<f32>;
def f64imm : Operand<f64>;
}
+// Register operands for generic instructions don't have an MVT, but do have
+// constraints linking the operands (e.g. all operands of a G_ADD must
+// have the same LLT).
+class TypedOperand<string Ty> : Operand<untyped> {
+ let OperandType = Ty;
+}
+
+def type0 : TypedOperand<"OPERAND_GENERIC_0">;
+def type1 : TypedOperand<"OPERAND_GENERIC_1">;
+def type2 : TypedOperand<"OPERAND_GENERIC_2">;
+def type3 : TypedOperand<"OPERAND_GENERIC_3">;
+def type4 : TypedOperand<"OPERAND_GENERIC_4">;
+def type5 : TypedOperand<"OPERAND_GENERIC_5">;
+
/// zero_reg definition - Special node to stand for the zero register.
///
def zero_reg;
@@ -956,8 +977,24 @@ def PATCHABLE_FUNCTION_ENTER : Instruction {
def PATCHABLE_RET : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins variable_ops);
+ let AsmString = "# XRay Function Patchable RET.";
+ let usesCustomInserter = 1;
+ let hasSideEffects = 1;
+ let isReturn = 1;
+}
+def PATCHABLE_FUNCTION_EXIT : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins);
let AsmString = "# XRay Function Exit.";
let usesCustomInserter = 1;
+ let hasSideEffects = 0; // FIXME: is this correct?
+ let isReturn = 0; // Original return instruction will follow
+}
+def PATCHABLE_TAIL_CALL : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "# XRay Tail Call Exit.";
+ let usesCustomInserter = 1;
let hasSideEffects = 1;
let isReturn = 1;
}
@@ -1118,6 +1155,10 @@ class InstAlias<string Asm, dag Result, int Emit = 1> {
// defined AsmMatchConverter and instead use the function generated by the
// dag Result.
bit UseInstAsmMatchConverter = 1;
+
+ // Assembler variant name to use for this alias. If not specified then
+ // assembler variants will be determined based on AsmString
+ string AsmVariantName = "";
}
//===----------------------------------------------------------------------===//
@@ -1299,3 +1340,8 @@ include "llvm/Target/TargetCallingConv.td"
// Pull in the common support for DAG isel generation.
//
include "llvm/Target/TargetSelectionDAG.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for Global ISel generation.
+//
+include "llvm/Target/TargetGlobalISel.td"
diff --git a/include/llvm/Target/TargetCallingConv.h b/include/llvm/Target/TargetCallingConv.h
index 19d8917f17d3..be09236cdab0 100644
--- a/include/llvm/Target/TargetCallingConv.h
+++ b/include/llvm/Target/TargetCallingConv.h
@@ -51,6 +51,15 @@ namespace ISD {
static const uint64_t SwiftSelfOffs = 14;
static const uint64_t SwiftError = 1ULL<<15; ///< Swift error parameter
static const uint64_t SwiftErrorOffs = 15;
+ static const uint64_t Hva = 1ULL << 16; ///< HVA field for
+ ///< vectorcall
+ static const uint64_t HvaOffs = 16;
+ static const uint64_t HvaStart = 1ULL << 17; ///< HVA structure start
+ ///< for vectorcall
+ static const uint64_t HvaStartOffs = 17;
+ static const uint64_t SecArgPass = 1ULL << 18; ///< Second argument
+ ///< pass for vectorcall
+ static const uint64_t SecArgPassOffs = 18;
static const uint64_t OrigAlign = 0x1FULL<<27;
static const uint64_t OrigAlignOffs = 27;
static const uint64_t ByValSize = 0x3fffffffULL<<32; ///< Struct size
@@ -91,6 +100,15 @@ namespace ISD {
bool isSwiftError() const { return Flags & SwiftError; }
void setSwiftError() { Flags |= One << SwiftErrorOffs; }
+ bool isHva() const { return Flags & Hva; }
+ void setHva() { Flags |= One << HvaOffs; }
+
+ bool isHvaStart() const { return Flags & HvaStart; }
+ void setHvaStart() { Flags |= One << HvaStartOffs; }
+
+ bool isSecArgPass() const { return Flags & SecArgPass; }
+ void setSecArgPass() { Flags |= One << SecArgPassOffs; }
+
bool isNest() const { return Flags & Nest; }
void setNest() { Flags |= One << NestOffs; }
diff --git a/include/llvm/Target/TargetFrameLowering.h b/include/llvm/Target/TargetFrameLowering.h
index 98065aca16f3..4576f8c7582b 100644
--- a/include/llvm/Target/TargetFrameLowering.h
+++ b/include/llvm/Target/TargetFrameLowering.h
@@ -179,12 +179,6 @@ public:
virtual void adjustForHiPEPrologue(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const {}
- /// Adjust the prologue to add an allocation at a fixed offset from the frame
- /// pointer.
- virtual void
- adjustForFrameAllocatePrologue(MachineFunction &MF,
- MachineBasicBlock &PrologueMBB) const {}
-
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
/// saved registers and returns true if it isn't possible / profitable to do
/// so by issuing a series of store instructions via
diff --git a/include/llvm/Target/TargetGlobalISel.td b/include/llvm/Target/TargetGlobalISel.td
new file mode 100644
index 000000000000..0727c9802e5e
--- /dev/null
+++ b/include/llvm/Target/TargetGlobalISel.td
@@ -0,0 +1,29 @@
+//===- TargetGlobalISel.td - Common code for GlobalISel ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used to support
+// SelectionDAG instruction selection patterns (specified in
+// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
+//
+// This is intended as a compatibility layer, to enable reuse of target
+// descriptions written for SelectionDAG without requiring explicit GlobalISel
+// support. It will eventually supersede SelectionDAG patterns.
+//
+//===----------------------------------------------------------------------===//
+
+// Declare that a generic Instruction is 'equivalent' to an SDNode, that is,
+// SelectionDAG patterns involving the SDNode can be transformed to match the
+// Instruction instead.
+class GINodeEquiv<Instruction i, SDNode node> {
+ Instruction I = i;
+ SDNode Node = node;
+}
+
+def : GINodeEquiv<G_ADD, add>;
+def : GINodeEquiv<G_BR, br>;
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index e0b9a22ed5d0..83515bc91841 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -250,6 +251,12 @@ public:
unsigned &Size, unsigned &Offset,
const MachineFunction &MF) const;
+ /// Returns the size in bytes of the specified MachineInstr, or ~0U
+ /// when this function is not implemented by a target.
+ virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
+ return ~0U;
+ }
+
/// Return true if the instruction is as cheap as a move instruction.
///
/// Targets for different archs need to override this, and different
@@ -263,11 +270,8 @@ public:
/// MachineSink determines on its own whether the instruction is safe to sink;
/// this gives the target a hook to override the default behavior with regards
/// to which instructions should be sunk.
- /// The default behavior is to not sink insert_subreg, subreg_to_reg, and
- /// reg_sequence. These are meant to be close to the source to make it easier
- /// to coalesce.
virtual bool shouldSink(const MachineInstr &MI) const {
- return !MI.isInsertSubreg() && !MI.isSubregToReg() && !MI.isRegSequence();
+ return true;
}
/// Re-issue the specified 'original' instruction at the
@@ -439,6 +443,31 @@ public:
const MachineInstr &MI1,
const MachineRegisterInfo *MRI = nullptr) const;
+ /// \returns true if a branch from an instruction with opcode \p BranchOpc
+ /// bytes is capable of jumping to a position \p BrOffset bytes away.
+ virtual bool isBranchOffsetInRange(unsigned BranchOpc,
+ int64_t BrOffset) const {
+ llvm_unreachable("target did not implement");
+ }
+
+ /// \returns The block that branch instruction \p MI jumps to.
+ virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
+ llvm_unreachable("target did not implement");
+ }
+
+ /// Insert an unconditional indirect branch at the end of \p MBB to \p
+ /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to
+ /// the offset of the position to insert the new branch.
+ ///
+ /// \returns The number of bytes added to the block.
+ virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &NewDestBB,
+ const DebugLoc &DL,
+ int64_t BrOffset = 0,
+ RegScavenger *RS = nullptr) const {
+ llvm_unreachable("target did not implement");
+ }
+
/// Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
/// implemented for a target). Upon success, this returns false and returns
@@ -458,7 +487,7 @@ public:
/// condition. These operands can be passed to other TargetInstrInfo
/// methods to create new branches.
///
- /// Note that RemoveBranch and InsertBranch must be implemented to support
+ /// Note that removeBranch and insertBranch must be implemented to support
/// cases where this method returns success.
///
/// If AllowModify is true, then this routine is allowed to modify the basic
@@ -521,15 +550,18 @@ public:
/// Remove the branching code at the end of the specific MBB.
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
- llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!");
+ /// If \p BytesRemoved is non-null, report the change in code size from the
+ /// removed instructions.
+ virtual unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
}
- /// Insert branch code into the end of the specified MachineBasicBlock.
- /// The operands to this method are the same as those
- /// returned by AnalyzeBranch. This is only invoked in cases where
- /// AnalyzeBranch returns success. It returns the number of instructions
- /// inserted.
+ /// Insert branch code into the end of the specified MachineBasicBlock. The
+ /// operands to this method are the same as those returned by AnalyzeBranch.
+ /// This is only invoked in cases where AnalyzeBranch returns success. It
+ /// returns the number of instructions inserted. If \p BytesAdded is non-null,
+ /// report the change in code size from the added instructions.
///
/// It is also invoked by tail merging to add unconditional branches in
/// cases where AnalyzeBranch doesn't apply because there was no original
@@ -538,11 +570,40 @@ public:
///
/// The CFG information in MBB.Predecessors and MBB.Successors must be valid
/// before calling this function.
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
- const DebugLoc &DL) const {
- llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
+ const DebugLoc &DL,
+ int *BytesAdded = nullptr) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
+ }
+
+ unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *DestBB,
+ const DebugLoc &DL,
+ int *BytesAdded = nullptr) const {
+ return insertBranch(MBB, DestBB, nullptr,
+ ArrayRef<MachineOperand>(), DL, BytesAdded);
+ }
+
+ /// Analyze the loop code, return true if it cannot be understoo. Upon
+ /// success, this function returns false and returns information about the
+ /// induction variable and compare instruction used at the end.
+ virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
+ MachineInstr *&CmpInst) const {
+ return true;
+ }
+
+ /// Generate code to reduce the loop iteration by one and check if the loop is
+ /// finished. Return the value/register of the the new loop count. We need
+ /// this function when peeling off one or more iterations of a loop. This
+ /// function assumes the nth iteration is peeled first.
+ virtual unsigned reduceLoopCount(MachineBasicBlock &MBB,
+ MachineInstr *IndVar, MachineInstr &Cmp,
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallVectorImpl<MachineInstr *> &PrevInsts,
+ unsigned Iter, unsigned MaxIter) const {
+ llvm_unreachable("Target didn't implement ReduceLoopCount");
}
/// Delete the instruction OldInst and everything after it, replacing it with
@@ -550,40 +611,6 @@ public:
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const;
- /// Get an instruction that performs an unconditional branch to the given
- /// symbol.
- virtual void
- getUnconditionalBranch(MCInst &MI,
- const MCSymbolRefExpr *BranchTarget) const {
- llvm_unreachable("Target didn't implement "
- "TargetInstrInfo::getUnconditionalBranch!");
- }
-
- /// Get a machine trap instruction.
- virtual void getTrap(MCInst &MI) const {
- llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!");
- }
-
- /// Get a number of bytes that suffices to hold
- /// either the instruction returned by getUnconditionalBranch or the
- /// instruction returned by getTrap. This only makes sense because
- /// getUnconditionalBranch returns a single, specific instruction. This
- /// information is needed by the jumptable construction code, since it must
- /// decide how many bytes to use for a jumptable entry so it can generate the
- /// right mask.
- ///
- /// Note that if the jumptable instruction requires alignment, then that
- /// alignment should be factored into this required bound so that the
- /// resulting bound gives the right alignment for the instruction.
- virtual unsigned getJumpInstrTableEntryBound() const {
- // This method gets called by LLVMTargetMachine always, so it can't fail
- // just because there happens to be no implementation for this target.
- // Any code that tries to use a jumptable annotation without defining
- // getUnconditionalBranch on the appropriate Target will fail anyway, and
- // the value returned here won't matter in that case.
- return 0;
- }
-
/// Return true if it's legal to split the given basic
/// block at the specified instruction (i.e. instruction would be the start
/// of a new basic block).
@@ -790,6 +817,20 @@ public:
/// anything was changed.
virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
+ /// Check whether the target can fold a load that feeds a subreg operand
+ /// (or a subreg operand that feeds a store).
+ /// For example, X86 may want to return true if it can fold
+ /// movl (%esp), %eax
+ /// subb, %al, ...
+ /// Into:
+ /// subb (%esp), ...
+ ///
+ /// Ideally, we'd like the target implementation of foldMemoryOperand() to
+ /// reject subregs - but since this behavior used to be enforced in the
+ /// target-independent code, moving this responsibility to the targets
+ /// has the potential of causing nasty silent breakage in out-of-tree targets.
+ virtual bool isSubregFoldable() const { return false; }
+
/// Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
@@ -1003,27 +1044,45 @@ public:
return false;
}
- virtual bool enableClusterLoads() const { return false; }
+ /// Return true if the instruction contains a base register and offset. If
+ /// true, the function also sets the operand position in the instruction
+ /// for the base register and offset.
+ virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
+ unsigned &BasePos,
+ unsigned &OffsetPos) const {
+ return false;
+ }
- virtual bool enableClusterStores() const { return false; }
+ /// If the instruction is an increment of a constant value, return the amount.
+ virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
+ return false;
+ }
+ /// Returns true if the two given memory operations should be scheduled
+ /// adjacent. Note that you have to add:
+ /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+ /// or
+ /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
+ /// to TargetPassConfig::createMachineScheduler() to have an effect.
virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt,
MachineInstr &SecondLdSt,
unsigned NumLoads) const {
- return false;
+ llvm_unreachable("target did not implement shouldClusterMemOps()");
}
/// Can this target fuse the given instructions if they are scheduled
- /// adjacent.
- virtual bool shouldScheduleAdjacent(MachineInstr &First,
- MachineInstr &Second) const {
- return false;
+ /// adjacent. Note that you have to add:
+ /// DAG.addMutation(createMacroFusionDAGMutation());
+ /// to TargetPassConfig::createMachineScheduler() to have an effect.
+ virtual bool shouldScheduleAdjacent(const MachineInstr &First,
+ const MachineInstr &Second) const {
+ llvm_unreachable("target did not implement shouldScheduleAdjacent()");
}
/// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed.
virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ bool reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return true;
}
@@ -1035,6 +1094,10 @@ public:
/// Return the noop instruction to use for a noop.
virtual void getNoopForMachoTarget(MCInst &NopInst) const;
+ /// Return true for post-incremented instructions.
+ virtual bool isPostIncrement(const MachineInstr &MI) const {
+ return false;
+ }
/// Returns true if the instruction is already predicated.
virtual bool isPredicated(const MachineInstr &MI) const {
@@ -1045,6 +1108,25 @@ public:
/// terminator instruction that has not been predicated.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
+ /// Returns true if MI is an unconditional tail call.
+ virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
+ return false;
+ }
+
+ /// Returns true if the tail call can be made conditional on BranchCond.
+ virtual bool
+ canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
+ const MachineInstr &TailCall) const {
+ return false;
+ }
+
+ /// Replace the conditional branch in MBB with a conditional tail call.
+ virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ const MachineInstr &TailCall) const {
+ llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
+ }
+
/// Convert the instruction into a predicated instruction.
/// It returns true if the operation was successful.
virtual bool PredicateInstruction(MachineInstr &MI,
@@ -1195,22 +1277,6 @@ public:
const MachineInstr &UseMI,
unsigned UseIdx) const;
- /// Compute and return the latency of the given data dependent def and use
- /// when the operand indices are already known. UseMI may be \c nullptr for
- /// an unknown use.
- ///
- /// FindMin may be set to get the minimum vs. expected latency. Minimum
- /// latency is used for scheduling groups, while expected latency is for
- /// instruction cost and critical path.
- ///
- /// Depending on the subtarget's itinerary properties, this may or may not
- /// need to call getOperandLatency(). For most subtargets, we don't need
- /// DefIdx or UseIdx to compute min latency.
- unsigned computeOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr &DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
- unsigned UseIdx) const;
-
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
@@ -1439,6 +1505,11 @@ public:
return None;
}
+ /// Determines whether |Inst| is a tail call instruction.
+ virtual bool isTailCall(const MachineInstr &Inst) const {
+ return false;
+ }
+
private:
unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
unsigned CatchRetOpcode;
diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h
index c630f5b12a15..6a92bdee747e 100644
--- a/include/llvm/Target/TargetIntrinsicInfo.h
+++ b/include/llvm/Target/TargetIntrinsicInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
#define LLVM_TARGET_TARGETINTRINSICINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include <string>
@@ -47,8 +48,12 @@ public:
/// names.
virtual unsigned lookupName(const char *Name, unsigned Len) const =0;
+ unsigned lookupName(StringRef Name) const {
+ return lookupName(Name.data(), Name.size());
+ }
+
/// Return the target intrinsic ID of a function, or 0.
- virtual unsigned getIntrinsicID(Function *F) const;
+ virtual unsigned getIntrinsicID(const Function *F) const;
/// Returns true if the intrinsic can be overloaded.
virtual bool isOverloaded(unsigned IID) const = 0;
diff --git a/include/llvm/Target/TargetItinerary.td b/include/llvm/Target/TargetItinerary.td
index a37bbf2474c5..3b1998dfb1ff 100644
--- a/include/llvm/Target/TargetItinerary.td
+++ b/include/llvm/Target/TargetItinerary.td
@@ -92,7 +92,7 @@ def NoItinerary : InstrItinClass;
//
// OperandCycles are optional "cycle counts". They specify the cycle after
// instruction issue the values which correspond to specific operand indices
-// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
+// are defined or read. Bypasses are optional "pipeline forwarding paths", if
// a def by an instruction is available on a specific bypass and the use can
// read from the same bypass, then the operand use latency is reduced by one.
//
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 4586a172e76b..fb43ef19a645 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -142,6 +142,13 @@ public:
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
};
+ /// Enum that specifies when a multiplication should be expanded.
+ enum class MulExpansionKind {
+ Always, // Always expand the instruction.
+ OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
+ // or custom.
+ };
+
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
@@ -190,9 +197,6 @@ public:
return getPointerTy(DL);
}
- /// Return true if the select operation is expensive for this target.
- bool isSelectExpensive() const { return SelectIsExpensive; }
-
virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
return true;
}
@@ -243,11 +247,43 @@ public:
return true;
}
- /// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
- bool isFsqrtCheap() const {
- return FsqrtIsCheap;
+ /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
+ virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
+ // Default behavior is to replace SQRT(X) with X*RSQRT(X).
+ return false;
}
+ /// Reciprocal estimate status values used by the functions below.
+ enum ReciprocalEstimate : int {
+ Unspecified = -1,
+ Disabled = 0,
+ Enabled = 1
+ };
+
+ /// Return a ReciprocalEstimate enum value for a square root of the given type
+ /// based on the function's attributes. If the operation is not overridden by
+ /// the function's attributes, "Unspecified" is returned and target defaults
+ /// are expected to be used for instruction selection.
+ int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
+
+ /// Return a ReciprocalEstimate enum value for a division of the given type
+ /// based on the function's attributes. If the operation is not overridden by
+ /// the function's attributes, "Unspecified" is returned and target defaults
+ /// are expected to be used for instruction selection.
+ int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
+
+ /// Return the refinement step count for a square root of the given type based
+ /// on the function's attributes. If the operation is not overridden by
+ /// the function's attributes, "Unspecified" is returned and target defaults
+ /// are expected to be used for instruction selection.
+ int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
+
+ /// Return the refinement step count for a division of the given type based
+ /// on the function's attributes. If the operation is not overridden by
+ /// the function's attributes, "Unspecified" is returned and target defaults
+ /// are expected to be used for instruction selection.
+ int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
+
/// Returns true if target has indicated at least one type should be bypassed.
bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
@@ -321,6 +357,11 @@ public:
return false;
}
+ /// \brief Return true if ctlz instruction is fast.
+ virtual bool isCtlzFast() const {
+ return false;
+ }
+
/// Return true if it is safe to transform an integer-domain bitwise operation
/// into the equivalent floating-point operation. This should be set to true
/// if the target has IEEE-754-compliant fabs/fneg operations for the input
@@ -329,6 +370,12 @@ public:
return false;
}
+ /// \brief Return true if it is cheaper to split the store of a merged int val
+ /// from a pair of smaller values into multiple stores.
+ virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
+ return false;
+ }
+
/// \brief Return if the target supports combining a
/// chain like:
/// \code
@@ -360,6 +407,15 @@ public:
return false;
}
+ /// Return true if the target has a bitwise and-not operation:
+ /// X = ~A & B
+ /// This can be used to simplify select or other instructions.
+ virtual bool hasAndNot(SDValue X) const {
+ // If the target has the more complex version of this operation, assume that
+ // it has this operation too.
+ return hasAndNotCompare(X);
+ }
+
/// \brief Return true if the target wants to use the optimization that
/// turns ext(promotableInst1(...(promotableInstN(load)))) into
/// promotedInst1(...(promotedInstN(ext(load)))).
@@ -586,7 +642,7 @@ public:
/// Returns true if the operation can trap for the value type.
///
/// VT must be a legal type. By default, we optimistically assume most
- /// operations don't trap except for divide and remainder.
+ /// operations don't trap except for integer divide and remainder.
virtual bool canOpTrap(unsigned Op, EVT VT) const;
/// Similar to isShuffleMaskLegal. This is used by Targets can use this to
@@ -1010,10 +1066,15 @@ public:
return UseUnderscoreLongJmp;
}
- /// Return integer threshold on number of blocks to use jump tables rather
- /// than if sequence.
- int getMinimumJumpTableEntries() const {
- return MinimumJumpTableEntries;
+ /// Return lower limit for number of blocks in a jump table.
+ unsigned getMinimumJumpTableEntries() const;
+
+ /// Return upper limit for number of entries in a jump table.
+ /// Zero if no limit.
+ unsigned getMaximumJumpTableSize() const;
+
+ virtual bool isJumpTableRelative() const {
+ return TM.isPositionIndependent();
}
/// If a physical register, this specifies the register that
@@ -1095,8 +1156,12 @@ public:
/// Should be used only when getIRStackGuard returns nullptr.
virtual Value *getSSPStackGuardCheck(const Module &M) const;
- /// If the target has a standard location for the unsafe stack pointer,
- /// returns the address of that location. Otherwise, returns nullptr.
+protected:
+ Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
+ bool UseTLS) const;
+
+public:
+ /// Returns the target-specific address of the unsafe stack pointer.
virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
/// Returns true if a cast between SrcAS and DestAS is a noop.
@@ -1104,6 +1169,12 @@ public:
return false;
}
+ /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
+ /// are happy to sink it into basic blocks.
+ virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+ return isNoopAddrSpaceCast(SrcAS, DestAS);
+ }
+
/// Return true if the pointer arguments to CI should be aligned by aligning
/// the object whose address is being passed. If so then MinSize is set to the
/// minimum size the object must be to be aligned and PrefAlign is set to the
@@ -1341,11 +1412,12 @@ protected:
UseUnderscoreLongJmp = Val;
}
- /// Indicate the number of blocks to generate jump tables rather than if
- /// sequence.
- void setMinimumJumpTableEntries(int Val) {
- MinimumJumpTableEntries = Val;
- }
+ /// Indicate the minimum number of blocks to generate jump tables.
+ void setMinimumJumpTableEntries(unsigned Val);
+
+ /// Indicate the maximum number of entries in jump tables.
+ /// Set to zero to generate unlimited jump tables.
+ void setMaximumJumpTableSize(unsigned);
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
@@ -1353,12 +1425,6 @@ protected:
StackPointerRegisterToSaveRestore = R;
}
- /// Tells the code generator not to expand operations into sequences that use
- /// the select operations if possible.
- void setSelectIsExpensive(bool isExpensive = true) {
- SelectIsExpensive = isExpensive;
- }
-
/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
@@ -1381,10 +1447,6 @@ protected:
/// control.
void setJumpIsExpensive(bool isExpensive = true);
- /// Tells the code generator that fsqrt is cheap, and should not be replaced
- /// with an alternative sequence of instructions.
- void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
-
/// Tells the code generator that this target supports floating point
/// exceptions and cares about preserving floating point exception behavior.
void setHasFloatingPointExceptions(bool FPExceptions = true) {
@@ -1401,21 +1463,9 @@ protected:
/// that class natively.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
- AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.SimpleTy] = RC;
}
- /// Remove all register classes.
- void clearRegisterClasses() {
- std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
-
- AvailableRegClasses.clear();
- }
-
- /// \brief Remove all operation actions.
- void clearOperationActions() {
- }
-
/// Return the largest legal super-reg register class of the register class
/// for the specified type and its associated "cost".
virtual std::pair<const TargetRegisterClass *, uint8_t>
@@ -1628,6 +1678,10 @@ public:
return -1;
}
+ virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const {
+ return true;
+ }
+
/// Return true if the specified immediate is legal icmp immediate, that is
/// the target has icmp instructions which can compare a register against the
/// immediate without having to materialize the immediate into a register.
@@ -1739,11 +1793,6 @@ public:
/// In other words, unless the target performs a post-isel load combining,
/// this information should not be provided because it will generate more
/// loads.
- virtual bool hasPairedLoad(Type * /*LoadedType*/,
- unsigned & /*RequiredAligment*/) const {
- return false;
- }
-
virtual bool hasPairedLoad(EVT /*LoadedType*/,
unsigned & /*RequiredAligment*/) const {
return false;
@@ -1893,10 +1942,6 @@ public:
private:
const TargetMachine &TM;
- /// Tells the code generator not to expand operations into sequences that use
- /// the select operations if possible.
- bool SelectIsExpensive;
-
/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
@@ -1910,9 +1955,6 @@ private:
/// combined with "shift" to BitExtract instructions.
bool HasExtractBitsInsn;
- // Don't expand fsqrt with an approximation based on the inverse sqrt.
- bool FsqrtIsCheap;
-
/// Tells the code generator to bypass slow divide or remainder
/// instructions. For example, BypassSlowDivWidths[32,8] tells the code
/// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
@@ -1938,9 +1980,6 @@ private:
/// Defaults to false.
bool UseUnderscoreLongJmp;
- /// Number of blocks threshold to use jump tables.
- int MinimumJumpTableEntries;
-
/// Information about the contents of the high-bits in boolean values held in
/// a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -2055,7 +2094,6 @@ private:
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
private:
- std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
/// Targets can specify ISD nodes that they would like PerformDAGCombine
/// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
@@ -2089,7 +2127,7 @@ protected:
virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
/// Depth that GatherAllAliases should should continue looking for chain
- /// dependencies when trying to find a more preferrable chain. As an
+ /// dependencies when trying to find a more preferable chain. As an
/// approximation, this should be more than the number of consecutive stores
/// expected to be merged.
unsigned GatherAllAliasesMaxDepth;
@@ -2175,6 +2213,8 @@ class TargetLowering : public TargetLoweringBase {
void operator=(const TargetLowering&) = delete;
public:
+ struct DAGCombinerInfo;
+
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLowering(const TargetMachine &TM);
@@ -2287,6 +2327,16 @@ public:
/// generalized for targets with other types of implicit widening casts.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
const SDLoc &dl);
+
+ /// Helper for SimplifyDemandedBits that can simplify an operation with
+ /// multiple uses. This function uses TLI.SimplifyDemandedBits to
+ /// simplify Operand \p OpIdx of \p User and then updated \p User with
+ /// the simplified version. No other uses of \p OpIdx are updated.
+ /// If \p User is the only user of \p OpIdx, this function behaves exactly
+ /// like TLI.SimplifyDemandedBits except that it also updates the DAG by
+ /// calling DCI.CommitTargetLoweringOpt.
+ bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx,
+ const APInt &Demanded, DAGCombinerInfo &DCI);
};
/// Look at Op. At this point, we know that only the DemandedMask bits of the
@@ -2296,9 +2346,17 @@ public:
/// expression and return a mask of KnownOne and KnownZero bits for the
/// expression (used to simplify the caller). The KnownZero/One bits may only
/// be accurate for those bits in the DemandedMask.
+ /// \p AssumeSingleUse When this paramater is true, this function will
+ /// attempt to simplify \p Op even if there are multiple uses.
+ /// Callers are responsible for correctly updating the DAG based on the
+ /// results of this function, because simply replacing replacing TLO.Old
+ /// with TLO.New will be incorrect when this paramater is true and TLO.Old
+ /// has multiple uses.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
APInt &KnownZero, APInt &KnownOne,
- TargetLoweringOpt &TLO, unsigned Depth = 0) const;
+ TargetLoweringOpt &TLO,
+ unsigned Depth = 0,
+ bool AssumeSingleUse = false) const;
/// Determine which of the bits specified in Mask are known to be either zero
/// or one and return them in the KnownZero/KnownOne bitsets.
@@ -2333,7 +2391,6 @@ public:
bool isCalledByLegalizer() const { return CalledByLegalizer; }
void AddToWorklist(SDNode *N);
- void RemoveFromWorklist(SDNode *N);
SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
@@ -2955,32 +3012,39 @@ public:
/// Hooks for building estimates in place of slower divisions and square
/// roots.
- /// Return a reciprocal square root estimate value for the input operand.
- /// The RefinementSteps output is the number of Newton-Raphson refinement
- /// iterations required to generate a sufficient (though not necessarily
- /// IEEE-754 compliant) estimate for the value type.
+ /// Return either a square root or its reciprocal estimate value for the input
+ /// operand.
+ /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
+ /// 'Enabled' as set by a potential default override attribute.
+ /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
+ /// refinement iterations required to generate a sufficient (though not
+ /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
/// The boolean UseOneConstNR output is used to select a Newton-Raphson
- /// algorithm implementation that uses one constant or two constants.
+ /// algorithm implementation that uses either one or two constants.
+ /// The boolean Reciprocal is used to select whether the estimate is for the
+ /// square root of the input operand or the reciprocal of its square root.
/// A target may choose to implement its own refinement within this function.
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
- virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
- unsigned &RefinementSteps,
- bool &UseOneConstNR) const {
+ virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+ int Enabled, int &RefinementSteps,
+ bool &UseOneConstNR, bool Reciprocal) const {
return SDValue();
}
/// Return a reciprocal estimate value for the input operand.
- /// The RefinementSteps output is the number of Newton-Raphson refinement
- /// iterations required to generate a sufficient (though not necessarily
- /// IEEE-754 compliant) estimate for the value type.
+ /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
+ /// 'Enabled' as set by a potential default override attribute.
+ /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
+ /// refinement iterations required to generate a sufficient (though not
+ /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
/// A target may choose to implement its own refinement within this function.
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
- virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
- unsigned &RefinementSteps) const {
+ virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
+ int Enabled, int &RefinementSteps) const {
return SDValue();
}
@@ -2988,6 +3052,22 @@ public:
// Legalization utility functions
//
+ /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
+ /// respectively, each computing an n/2-bit part of the result.
+ /// \param Result A vector that will be filled with the parts of the result
+ /// in little-endian order.
+ /// \param LL Low bits of the LHS of the MUL. You can use this parameter
+ /// if you want to control how low bits are extracted from the LHS.
+ /// \param LH High bits of the LHS of the MUL. See LL for meaning.
+ /// \param RL Low bits of the RHS of the MUL. See LL for meaning
+ /// \param RH High bits of the RHS of the MUL. See LL for meaning.
+ /// \returns true if the node has been expanded, false if it has not
+ bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
+ SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
+ SelectionDAG &DAG, MulExpansionKind Kind,
+ SDValue LL = SDValue(), SDValue LH = SDValue(),
+ SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+
/// Expand a MUL into two nodes. One that computes the high bits of
/// the result and one that computes the low bits.
/// \param HiLoVT The value type to use for the Lo and Hi nodes.
@@ -2998,9 +3078,9 @@ public:
/// \param RH High bits of the RHS of the MUL. See LL for meaning.
/// \returns true if the node has been expanded. false if it has not
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
- SelectionDAG &DAG, SDValue LL = SDValue(),
- SDValue LH = SDValue(), SDValue RL = SDValue(),
- SDValue RH = SDValue()) const;
+ SelectionDAG &DAG, MulExpansionKind Kind,
+ SDValue LL = SDValue(), SDValue LH = SDValue(),
+ SDValue RL = SDValue(), SDValue RH = SDValue()) const;
/// Expand float(f32) to SINT(i64) conversion
/// \param N Node to expand
@@ -3027,6 +3107,17 @@ public:
/// possibly more for vectors.
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
+ /// Increments memory address \p Addr according to the type of the value
+ /// \p DataVT that should be stored. If the data is stored in compressed
+ /// form, the memory address should be incremented according to the number of
+ /// the stored elements. This number is equal to the number of '1's bits
+ /// in the \p Mask.
+ /// \p DataVT is a vector type. \p Mask is a vector value.
+ /// \p DataVT and \p Mask have the same number of vector elements.
+ SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
+ EVT DataVT, SelectionDAG &DAG,
+ bool IsCompressedMemory) const;
+
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
@@ -3059,6 +3150,12 @@ public:
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
SelectionDAG &DAG) const;
+ // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
+ // If we're comparing for equality to zero and isCtlzFast is true, expose the
+ // fact that this can be implemented as a ctlz/srl pair, so that the dag
+ // combiner can fold the new nodes.
+ SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
+
private:
SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, DAGCombinerInfo &DCI,
diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h
index 189eff4f3953..72bae0a38e65 100644
--- a/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/include/llvm/Target/TargetLoweringObjectFile.h
@@ -37,6 +37,9 @@ namespace llvm {
class TargetLoweringObjectFile : public MCObjectFileInfo {
MCContext *Ctx;
+ /// Name-mangler for global names.
+ Mangler *Mang = nullptr;
+
TargetLoweringObjectFile(
const TargetLoweringObjectFile&) = delete;
void operator=(const TargetLoweringObjectFile&) = delete;
@@ -45,12 +48,19 @@ protected:
bool SupportIndirectSymViaGOTPCRel;
bool SupportGOTPCRelWithOffset;
+ /// This section contains the static constructor pointer list.
+ MCSection *StaticCtorSection;
+
+ /// This section contains the static destructor pointer list.
+ MCSection *StaticDtorSection;
+
public:
MCContext &getContext() const { return *Ctx; }
+ Mangler &getMangler() const { return *Mang; }
TargetLoweringObjectFile()
- : MCObjectFileInfo(), Ctx(nullptr), SupportIndirectSymViaGOTPCRel(false),
- SupportGOTPCRelWithOffset(true) {}
+ : MCObjectFileInfo(), Ctx(nullptr), Mang(nullptr),
+ SupportIndirectSymViaGOTPCRel(false), SupportGOTPCRelWithOffset(true) {}
virtual ~TargetLoweringObjectFile();
@@ -65,7 +75,7 @@ public:
/// Emit the module flags that the platform cares about.
virtual void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> Flags,
- Mangler &Mang, const TargetMachine &TM) const {}
+ const TargetMachine &TM) const {}
/// Given a constant with the SectionKind, return a section that it should be
/// placed in.
@@ -76,28 +86,28 @@ public:
/// Classify the specified global variable into a set of target independent
/// categories embodied in SectionKind.
- static SectionKind getKindForGlobal(const GlobalValue *GV,
+ static SectionKind getKindForGlobal(const GlobalObject *GO,
const TargetMachine &TM);
/// This method computes the appropriate section to emit the specified global
/// variable or function definition. This should not be passed external (or
/// available externally) globals.
- MCSection *SectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang, const TargetMachine &TM) const;
+ MCSection *SectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+ const TargetMachine &TM) const;
/// This method computes the appropriate section to emit the specified global
/// variable or function definition. This should not be passed external (or
/// available externally) globals.
- MCSection *SectionForGlobal(const GlobalValue *GV, Mangler &Mang,
+ MCSection *SectionForGlobal(const GlobalObject *GO,
const TargetMachine &TM) const {
- return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
+ return SectionForGlobal(GO, getKindForGlobal(GO, TM), TM);
}
virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
- const GlobalValue *GV, Mangler &Mang,
+ const GlobalValue *GV,
const TargetMachine &TM) const;
- virtual MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
+ virtual MCSection *getSectionForJumpTable(const Function &F,
const TargetMachine &TM) const;
virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
@@ -105,40 +115,32 @@ public:
/// Targets should implement this method to assign a section to globals with
/// an explicit section specfied. The implementation of this method can
- /// assume that GV->hasSection() is true.
+ /// assume that GO->hasSection() is true.
virtual MCSection *
- getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang, const TargetMachine &TM) const = 0;
-
- /// Allow the target to completely override section assignment of a global.
- virtual const MCSection *getSpecialCasedSectionGlobals(const GlobalValue *GV,
- SectionKind Kind,
- Mangler &Mang) const {
- return nullptr;
- }
+ getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+ const TargetMachine &TM) const = 0;
/// Return an MCExpr to use for a reference to the specified global variable
/// from exception handling information.
- virtual const MCExpr *
- getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
- Mangler &Mang, const TargetMachine &TM,
- MachineModuleInfo *MMI, MCStreamer &Streamer) const;
+ virtual const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+ unsigned Encoding,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const;
/// Return the MCSymbol for a private symbol with global value name as its
/// base, with the specified suffix.
MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
- StringRef Suffix, Mangler &Mang,
+ StringRef Suffix,
const TargetMachine &TM) const;
// The symbol that gets passed to .cfi_personality.
virtual MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
- Mangler &Mang,
const TargetMachine &TM,
MachineModuleInfo *MMI) const;
- const MCExpr *
- getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
- MCStreamer &Streamer) const;
+ const MCExpr *getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const;
virtual MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const {
@@ -154,9 +156,9 @@ public:
/// emitting the address in debug info.
virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
- virtual const MCExpr *
- lowerRelativeReference(const GlobalValue *LHS, const GlobalValue *RHS,
- Mangler &Mang, const TargetMachine &TM) const {
+ virtual const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+ const GlobalValue *RHS,
+ const TargetMachine &TM) const {
return nullptr;
}
@@ -181,12 +183,12 @@ public:
return nullptr;
}
- virtual void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
- const Mangler &Mang) const {}
+ virtual void emitLinkerFlagsForGlobal(raw_ostream &OS,
+ const GlobalValue *GV) const {}
protected:
- virtual MCSection *SelectSectionForGlobal(const GlobalValue *GV,
- SectionKind Kind, Mangler &Mang,
+ virtual MCSection *SelectSectionForGlobal(const GlobalObject *GO,
+ SectionKind Kind,
const TargetMachine &TM) const = 0;
};
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index 563fef96acfc..f5493283eee6 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -193,12 +193,6 @@ public:
bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
- /// Returns the default value of asm verbosity.
- ///
- bool getAsmVerbosityDefault() const {
- return Options.MCOptions.AsmVerbose;
- }
-
bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }
/// Return true if data objects should be emitted into their own section,
@@ -241,7 +235,8 @@ public:
virtual bool addPassesToEmitFile(
PassManagerBase &, raw_pwrite_stream &, CodeGenFileType,
bool /*DisableVerify*/ = true, AnalysisID /*StartBefore*/ = nullptr,
- AnalysisID /*StartAfter*/ = nullptr, AnalysisID /*StopAfter*/ = nullptr,
+ AnalysisID /*StartAfter*/ = nullptr, AnalysisID /*StopBefore*/ = nullptr,
+ AnalysisID /*StopAfter*/ = nullptr,
MachineFunctionInitializer * /*MFInitializer*/ = nullptr) {
return true;
}
@@ -266,7 +261,7 @@ public:
void getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV,
Mangler &Mang, bool MayAlwaysUsePrivate = false) const;
- MCSymbol *getSymbol(const GlobalValue *GV, Mangler &Mang) const;
+ MCSymbol *getSymbol(const GlobalValue *GV) const;
/// True if the target uses physical regs at Prolog/Epilog insertion
/// time. If true (most machines), all vregs must be allocated before
@@ -302,7 +297,8 @@ public:
bool addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
bool DisableVerify = true, AnalysisID StartBefore = nullptr,
- AnalysisID StartAfter = nullptr, AnalysisID StopAfter = nullptr,
+ AnalysisID StartAfter = nullptr, AnalysisID StopBefore = nullptr,
+ AnalysisID StopAfter = nullptr,
MachineFunctionInitializer *MFInitializer = nullptr) override;
/// Add passes to the specified pass manager to get machine code emitted with
@@ -312,13 +308,6 @@ public:
bool addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
raw_pwrite_stream &OS,
bool DisableVerify = true) override;
-
- /// Add MachineModuleInfo pass to pass manager.
- MachineModuleInfo &addMachineModuleInfo(PassManagerBase &PM) const;
-
- /// Add MachineFunctionAnalysis pass to pass manager.
- void addMachineFunctionAnalysis(PassManagerBase &PM,
- MachineFunctionInitializer *MFInitializer) const;
};
} // End llvm namespace
diff --git a/include/llvm/Target/TargetOpcodes.def b/include/llvm/Target/TargetOpcodes.def
index abab6c7a2a7c..edb9b7350ca7 100644
--- a/include/llvm/Target/TargetOpcodes.def
+++ b/include/llvm/Target/TargetOpcodes.def
@@ -27,22 +27,22 @@
/// Every instruction defined here must also appear in Target.td.
///
-HANDLE_TARGET_OPCODE(PHI, 0)
-HANDLE_TARGET_OPCODE(INLINEASM, 1)
-HANDLE_TARGET_OPCODE(CFI_INSTRUCTION, 2)
-HANDLE_TARGET_OPCODE(EH_LABEL, 3)
-HANDLE_TARGET_OPCODE(GC_LABEL, 4)
+HANDLE_TARGET_OPCODE(PHI)
+HANDLE_TARGET_OPCODE(INLINEASM)
+HANDLE_TARGET_OPCODE(CFI_INSTRUCTION)
+HANDLE_TARGET_OPCODE(EH_LABEL)
+HANDLE_TARGET_OPCODE(GC_LABEL)
/// KILL - This instruction is a noop that is used only to adjust the
/// liveness of registers. This can be useful when dealing with
/// sub-registers.
-HANDLE_TARGET_OPCODE(KILL, 5)
+HANDLE_TARGET_OPCODE(KILL)
/// EXTRACT_SUBREG - This instruction takes two operands: a register
/// that has subregisters, and a subregister index. It returns the
/// extracted subregister value. This is commonly used to implement
/// truncation operations on target architectures which support it.
-HANDLE_TARGET_OPCODE(EXTRACT_SUBREG, 6)
+HANDLE_TARGET_OPCODE(EXTRACT_SUBREG)
/// INSERT_SUBREG - This instruction takes three operands: a register that
/// has subregisters, a register providing an insert value, and a
@@ -50,16 +50,20 @@ HANDLE_TARGET_OPCODE(EXTRACT_SUBREG, 6)
/// value of the second register inserted. The first register is often
/// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
-HANDLE_TARGET_OPCODE(INSERT_SUBREG, 7)
+HANDLE_TARGET_OPCODE(INSERT_SUBREG)
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
-HANDLE_TARGET_OPCODE(IMPLICIT_DEF, 8)
+HANDLE_TARGET_OPCODE(IMPLICIT_DEF)
-/// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
-/// the first operand is an immediate integer constant. This constant is
-/// often zero, because it is commonly used to assert that the instruction
-/// defining the register implicitly clears the high bits.
-HANDLE_TARGET_OPCODE(SUBREG_TO_REG, 9)
+/// SUBREG_TO_REG - Assert the value of bits in a super register.
+/// The result of this instruction is the value of the second operand inserted
+/// into the subregister specified by the third operand. All other bits are
+/// assumed to be equal to the bits in the immediate integer constant in the
+/// first operand. This instruction just communicates information; No code
+/// should be generated.
+/// This is typically used after an instruction where the write to a subregister
+/// implicitly cleared the bits in the super registers.
+HANDLE_TARGET_OPCODE(SUBREG_TO_REG)
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
/// register-to-register copy into a specific register class. This is only
@@ -67,10 +71,10 @@ HANDLE_TARGET_OPCODE(SUBREG_TO_REG, 9)
/// virtual registers have been created for all the instructions, and it's
/// only needed in cases where the register classes implied by the
/// instructions are insufficient. It is emitted as a COPY MachineInstr.
-HANDLE_TARGET_OPCODE(COPY_TO_REGCLASS, 10)
+ HANDLE_TARGET_OPCODE(COPY_TO_REGCLASS)
/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
-HANDLE_TARGET_OPCODE(DBG_VALUE, 11)
+HANDLE_TARGET_OPCODE(DBG_VALUE)
/// REG_SEQUENCE - This variadic instruction is used to form a register that
/// represents a consecutive sequence of sub-registers. It's used as a
@@ -83,55 +87,55 @@ HANDLE_TARGET_OPCODE(DBG_VALUE, 11)
/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
/// After register coalescing references of v1024 should be replace with
/// v1027:3, v1025 with v1027:4, etc.
-HANDLE_TARGET_OPCODE(REG_SEQUENCE, 12)
+ HANDLE_TARGET_OPCODE(REG_SEQUENCE)
/// COPY - Target-independent register copy. This instruction can also be
/// used to copy between subregisters of virtual registers.
-HANDLE_TARGET_OPCODE(COPY, 13)
+ HANDLE_TARGET_OPCODE(COPY)
/// BUNDLE - This instruction represents an instruction bundle. Instructions
/// which immediately follow a BUNDLE instruction which are marked with
/// 'InsideBundle' flag are inside the bundle.
-HANDLE_TARGET_OPCODE(BUNDLE, 14)
+HANDLE_TARGET_OPCODE(BUNDLE)
/// Lifetime markers.
-HANDLE_TARGET_OPCODE(LIFETIME_START, 15)
-HANDLE_TARGET_OPCODE(LIFETIME_END, 16)
+HANDLE_TARGET_OPCODE(LIFETIME_START)
+HANDLE_TARGET_OPCODE(LIFETIME_END)
/// A Stackmap instruction captures the location of live variables at its
/// position in the instruction stream. It is followed by a shadow of bytes
/// that must lie within the function and not contain another stackmap.
-HANDLE_TARGET_OPCODE(STACKMAP, 17)
+HANDLE_TARGET_OPCODE(STACKMAP)
/// Patchable call instruction - this instruction represents a call to a
/// constant address, followed by a series of NOPs. It is intended to
/// support optimizations for dynamic languages (such as javascript) that
/// rewrite calls to runtimes with more efficient code sequences.
/// This also implies a stack map.
-HANDLE_TARGET_OPCODE(PATCHPOINT, 18)
+HANDLE_TARGET_OPCODE(PATCHPOINT)
/// This pseudo-instruction loads the stack guard value. Targets which need
/// to prevent the stack guard value or address from being spilled to the
/// stack should override TargetLowering::emitLoadStackGuardNode and
/// additionally expand this pseudo after register allocation.
-HANDLE_TARGET_OPCODE(LOAD_STACK_GUARD, 19)
+HANDLE_TARGET_OPCODE(LOAD_STACK_GUARD)
/// Call instruction with associated vm state for deoptimization and list
/// of live pointers for relocation by the garbage collector. It is
/// intended to support garbage collection with fully precise relocating
/// collectors and deoptimizations in either the callee or caller.
-HANDLE_TARGET_OPCODE(STATEPOINT, 20)
+HANDLE_TARGET_OPCODE(STATEPOINT)
/// Instruction that records the offset of a local stack allocation passed to
/// llvm.localescape. It has two arguments: the symbol for the label and the
/// frame index of the local stack allocation.
-HANDLE_TARGET_OPCODE(LOCAL_ESCAPE, 21)
+HANDLE_TARGET_OPCODE(LOCAL_ESCAPE)
/// Loading instruction that may page fault, bundled with associated
/// information on how to handle such a page fault. It is intended to support
/// "zero cost" null checks in managed languages by allowing LLVM to fold
/// comparisons into existing memory operations.
-HANDLE_TARGET_OPCODE(FAULTING_LOAD_OP, 22)
+HANDLE_TARGET_OPCODE(FAULTING_LOAD_OP)
/// Wraps a machine instruction to add patchability constraints. An
/// instruction wrapped in PATCHABLE_OP has to either have a minimum
@@ -140,30 +144,220 @@ HANDLE_TARGET_OPCODE(FAULTING_LOAD_OP, 22)
/// second operand is an immediate denoting the opcode of the original
/// instruction. The rest of the operands are the operands of the
/// original instruction.
-HANDLE_TARGET_OPCODE(PATCHABLE_OP, 23)
+HANDLE_TARGET_OPCODE(PATCHABLE_OP)
/// This is a marker instruction which gets translated into a nop sled, useful
/// for inserting instrumentation instructions at runtime.
-HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_ENTER, 24)
+HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_ENTER)
/// Wraps a return instruction and its operands to enable adding nop sleds
/// either before or after the return. The nop sleds are useful for inserting
/// instrumentation instructions at runtime.
-HANDLE_TARGET_OPCODE(PATCHABLE_RET, 25)
+/// The patch here replaces the return instruction.
+HANDLE_TARGET_OPCODE(PATCHABLE_RET)
+
+/// This is a marker instruction which gets translated into a nop sled, useful
+/// for inserting instrumentation instructions at runtime.
+/// The patch here prepends the return instruction.
+/// The same thing as in x86_64 is not possible for ARM because it has multiple
+/// return instructions. Furthermore, CPU allows parametrized and even
+/// conditional return instructions. In the current ARM implementation we are
+/// making use of the fact that currently LLVM doesn't seem to generate
+/// conditional return instructions.
+/// On ARM, the same instruction can be used for popping multiple registers
+/// from the stack and returning (it just pops pc register too), and LLVM
+/// generates it sometimes. So we can't insert the sled between this stack
+/// adjustment and the return without splitting the original instruction into 2
+/// instructions. So on ARM, rather than jumping into the exit trampoline, we
+/// call it, it does the tracing, preserves the stack and returns.
+HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_EXIT)
+
+/// Wraps a tail call instruction and its operands to enable adding nop sleds
+/// either before or after the tail exit. We use this as a disambiguation from
+/// PATCHABLE_RET which specifically only works for return instructions.
+HANDLE_TARGET_OPCODE(PATCHABLE_TAIL_CALL)
/// The following generic opcodes are not supposed to appear after ISel.
/// This is something we might want to relax, but for now, this is convenient
/// to produce diagnostics.
/// Generic ADD instruction. This is an integer add.
-HANDLE_TARGET_OPCODE(G_ADD, 26)
+HANDLE_TARGET_OPCODE(G_ADD)
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_START, G_ADD)
-/// Generic Bitwise-OR instruction.
-HANDLE_TARGET_OPCODE(G_OR, 27)
+/// Generic SUB instruction. This is an integer sub.
+HANDLE_TARGET_OPCODE(G_SUB)
+
+// Generic multiply instruction.
+HANDLE_TARGET_OPCODE(G_MUL)
+
+// Generic signed division instruction.
+HANDLE_TARGET_OPCODE(G_SDIV)
+
+// Generic unsigned division instruction.
+HANDLE_TARGET_OPCODE(G_UDIV)
+
+// Generic signed remainder instruction.
+HANDLE_TARGET_OPCODE(G_SREM)
+
+// Generic unsigned remainder instruction.
+HANDLE_TARGET_OPCODE(G_UREM)
+
+/// Generic bitwise and instruction.
+HANDLE_TARGET_OPCODE(G_AND)
+
+/// Generic bitwise or instruction.
+HANDLE_TARGET_OPCODE(G_OR)
+
+/// Generic bitwise exclusive-or instruction.
+HANDLE_TARGET_OPCODE(G_XOR)
+
+
+/// Generic instruction to materialize the address of an alloca or other
+/// stack-based object.
+HANDLE_TARGET_OPCODE(G_FRAME_INDEX)
+
+/// Generic reference to global value.
+HANDLE_TARGET_OPCODE(G_GLOBAL_VALUE)
+
+/// Generic instruction to extract blocks of bits from the register given
+/// (typically a sub-register COPY after instruction selection).
+HANDLE_TARGET_OPCODE(G_EXTRACT)
+
+/// Generic instruction to insert blocks of bits from the registers given into
+/// the source.
+HANDLE_TARGET_OPCODE(G_INSERT)
+
+/// Generic instruction to paste a variable number of components together into a
+/// larger register.
+HANDLE_TARGET_OPCODE(G_SEQUENCE)
+
+/// Generic pointer to int conversion.
+HANDLE_TARGET_OPCODE(G_PTRTOINT)
+
+/// Generic int to pointer conversion.
+HANDLE_TARGET_OPCODE(G_INTTOPTR)
+
+/// Generic bitcast. The source and destination types must be different, or a
+/// COPY is the relevant instruction.
+HANDLE_TARGET_OPCODE(G_BITCAST)
+
+/// Generic load.
+HANDLE_TARGET_OPCODE(G_LOAD)
+
+/// Generic store.
+HANDLE_TARGET_OPCODE(G_STORE)
+
+/// Generic conditional branch instruction.
+HANDLE_TARGET_OPCODE(G_BRCOND)
+
+/// Generic intrinsic use (without side effects).
+HANDLE_TARGET_OPCODE(G_INTRINSIC)
+
+/// Generic intrinsic use (with side effects).
+HANDLE_TARGET_OPCODE(G_INTRINSIC_W_SIDE_EFFECTS)
+
+/// Generic extension allowing rubbish in high bits.
+HANDLE_TARGET_OPCODE(G_ANYEXT)
+
+/// Generic instruction to discard the high bits of a register. This differs
+/// from (G_EXTRACT val, 0) on its action on vectors: G_TRUNC will truncate
+/// each element individually, G_EXTRACT will typically discard the high
+/// elements of the vector.
+HANDLE_TARGET_OPCODE(G_TRUNC)
+
+/// Generic integer constant.
+HANDLE_TARGET_OPCODE(G_CONSTANT)
+
+/// Generic floating constant.
+HANDLE_TARGET_OPCODE(G_FCONSTANT)
+
+// Generic sign extend
+HANDLE_TARGET_OPCODE(G_SEXT)
+
+// Generic zero extend
+HANDLE_TARGET_OPCODE(G_ZEXT)
+
+// Generic left-shift
+HANDLE_TARGET_OPCODE(G_SHL)
+
+// Generic logical right-shift
+HANDLE_TARGET_OPCODE(G_LSHR)
+
+// Generic arithmetic right-shift
+HANDLE_TARGET_OPCODE(G_ASHR)
+
+/// Generic integer-base comparison, also applicable to vectors of integers.
+HANDLE_TARGET_OPCODE(G_ICMP)
+
+/// Generic floating-point comparison, also applicable to vectors.
+HANDLE_TARGET_OPCODE(G_FCMP)
+
+/// Generic select.
+HANDLE_TARGET_OPCODE(G_SELECT)
+
+/// Generic unsigned add instruction, consuming the normal operands plus a carry
+/// flag, and similarly producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_UADDE)
+
+/// Generic unsigned subtract instruction, consuming the normal operands plus a
+/// carry flag, and similarly producing the result and a carry flag.
+HANDLE_TARGET_OPCODE(G_USUBE)
+
+/// Generic signed add instruction, producing the result and a signed overflow
+/// flag.
+HANDLE_TARGET_OPCODE(G_SADDO)
+
+/// Generic signed subtract instruction, producing the result and a signed
+/// overflow flag.
+HANDLE_TARGET_OPCODE(G_SSUBO)
+
+/// Generic unsigned multiply instruction, producing the result and a signed
+/// overflow flag.
+HANDLE_TARGET_OPCODE(G_UMULO)
+
+/// Generic signed multiply instruction, producing the result and a signed
+/// overflow flag.
+HANDLE_TARGET_OPCODE(G_SMULO)
+
+/// Generic FP addition.
+HANDLE_TARGET_OPCODE(G_FADD)
+
+/// Generic FP subtraction.
+HANDLE_TARGET_OPCODE(G_FSUB)
+
+/// Generic FP multiplication.
+HANDLE_TARGET_OPCODE(G_FMUL)
+
+/// Generic FP division.
+HANDLE_TARGET_OPCODE(G_FDIV)
+
+/// Generic FP remainder.
+HANDLE_TARGET_OPCODE(G_FREM)
+
+/// Generic float to signed-int conversion
+HANDLE_TARGET_OPCODE(G_FPEXT)
+
+/// Generic float to signed-int conversion
+HANDLE_TARGET_OPCODE(G_FPTRUNC)
+
+/// Generic float to signed-int conversion
+HANDLE_TARGET_OPCODE(G_FPTOSI)
+
+/// Generic float to unsigned-int conversion
+HANDLE_TARGET_OPCODE(G_FPTOUI)
+
+/// Generic signed-int to float conversion
+HANDLE_TARGET_OPCODE(G_SITOFP)
+
+/// Generic unsigned-int to float conversion
+HANDLE_TARGET_OPCODE(G_UITOFP)
+
+/// Generic unsigned-int to float conversion
+HANDLE_TARGET_OPCODE(G_GEP)
/// Generic BRANCH instruction. This is an unconditional branch.
-HANDLE_TARGET_OPCODE(G_BR, 28)
+HANDLE_TARGET_OPCODE(G_BR)
// TODO: Add more generic opcodes as we move along.
diff --git a/include/llvm/Target/TargetOpcodes.h b/include/llvm/Target/TargetOpcodes.h
index f851fc27527b..33df133a4d58 100644
--- a/include/llvm/Target/TargetOpcodes.h
+++ b/include/llvm/Target/TargetOpcodes.h
@@ -20,7 +20,7 @@ namespace llvm {
///
namespace TargetOpcode {
enum {
-#define HANDLE_TARGET_OPCODE(OPC, NUM) OPC = NUM,
+#define HANDLE_TARGET_OPCODE(OPC) OPC,
#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC) IDENT = OPC,
#include "llvm/Target/TargetOpcodes.def"
};
@@ -32,6 +32,11 @@ static inline bool isPreISelGenericOpcode(unsigned Opcode) {
return Opcode >= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START &&
Opcode <= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
}
+
+/// Check whether the given Opcode is a target-specific opcode.
+static inline bool isTargetSpecificOpcode(unsigned Opcode) {
+ return Opcode > TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+}
} // end namespace llvm
#endif
diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h
index 57873b4bd0b4..f5134d99b039 100644
--- a/include/llvm/Target/TargetOptions.h
+++ b/include/llvm/Target/TargetOptions.h
@@ -15,9 +15,7 @@
#ifndef LLVM_TARGET_TARGETOPTIONS_H
#define LLVM_TARGET_TARGETOPTIONS_H
-#include "llvm/Target/TargetRecip.h"
#include "llvm/MC/MCTargetOptions.h"
-#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class MachineFunction;
@@ -57,6 +55,15 @@ namespace llvm {
};
}
+ namespace FPDenormal {
+ enum DenormalMode {
+ IEEE, // IEEE 754 denormal numbers
+ PreserveSign, // the sign of a flushed-to-zero number is preserved in
+ // the sign of 0
+ PositiveZero // denormals are flushed to positive zero
+ };
+ }
+
enum class EABI {
Unknown,
Default, // Default means not specified
@@ -94,6 +101,7 @@ namespace llvm {
TargetOptions()
: PrintMachineCode(false), LessPreciseFPMADOption(false),
UnsafeFPMath(false), NoInfsFPMath(false), NoNaNsFPMath(false),
+ NoTrappingFPMath(false),
HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false),
GuaranteedTailCallOpt(false), StackAlignmentOverride(0),
StackSymbolOrdering(true), EnableFastISel(false), UseInitArray(false),
@@ -102,9 +110,10 @@ namespace llvm {
DataSections(false), UniqueSectionNames(true), TrapUnreachable(false),
EmulatedTLS(false), EnableIPRA(false),
FloatABIType(FloatABI::Default),
- AllowFPOpFusion(FPOpFusion::Standard), Reciprocals(TargetRecip()),
- JTType(JumpTable::Single), ThreadModel(ThreadModel::POSIX),
+ AllowFPOpFusion(FPOpFusion::Standard),
+ ThreadModel(ThreadModel::POSIX),
EABIVersion(EABI::Default), DebuggerTuning(DebuggerKind::Default),
+ FPDenormalMode(FPDenormal::IEEE),
ExceptionModel(ExceptionHandling::None) {}
/// PrintMachineCode - This flag is enabled when the -print-machineinstrs
@@ -144,6 +153,11 @@ namespace llvm {
/// assume the FP arithmetic arguments and results are never NaNs.
unsigned NoNaNsFPMath : 1;
+ /// NoTrappingFPMath - This flag is enabled when the
+ /// -enable-no-trapping-fp-math is specified on the command line. This
+ /// specifies that there are no trap handlers to handle exceptions.
+ unsigned NoTrappingFPMath : 1;
+
/// HonorSignDependentRoundingFPMath - This returns true when the
/// -enable-sign-dependent-rounding-fp-math is specified. If this returns
/// false (the default), the code generator is allowed to assume that the
@@ -237,13 +251,6 @@ namespace llvm {
/// the value of this option.
FPOpFusion::FPOpFusionMode AllowFPOpFusion;
- /// This class encapsulates options for reciprocal-estimate code generation.
- TargetRecip Reciprocals;
-
- /// JTType - This flag specifies the type of jump-instruction table to
- /// create for functions that have the jumptable attribute.
- JumpTable::JumpTableType JTType;
-
/// ThreadModel - This flag specifies the type of threading model to assume
/// for things like atomics
ThreadModel::Model ThreadModel;
@@ -254,6 +261,10 @@ namespace llvm {
/// Which debugger to tune for.
DebuggerKind DebuggerTuning;
+ /// FPDenormalMode - This flags specificies which denormal numbers the code
+ /// is permitted to require.
+ FPDenormal::DenormalMode FPDenormalMode;
+
/// What exception model to use
ExceptionHandling ExceptionModel;
@@ -271,6 +282,7 @@ inline bool operator==(const TargetOptions &LHS,
ARE_EQUAL(UnsafeFPMath) &&
ARE_EQUAL(NoInfsFPMath) &&
ARE_EQUAL(NoNaNsFPMath) &&
+ ARE_EQUAL(NoTrappingFPMath) &&
ARE_EQUAL(HonorSignDependentRoundingFPMathOption) &&
ARE_EQUAL(NoZerosInBSS) &&
ARE_EQUAL(GuaranteedTailCallOpt) &&
@@ -281,11 +293,10 @@ inline bool operator==(const TargetOptions &LHS,
ARE_EQUAL(EmulatedTLS) &&
ARE_EQUAL(FloatABIType) &&
ARE_EQUAL(AllowFPOpFusion) &&
- ARE_EQUAL(Reciprocals) &&
- ARE_EQUAL(JTType) &&
ARE_EQUAL(ThreadModel) &&
ARE_EQUAL(EABIVersion) &&
ARE_EQUAL(DebuggerTuning) &&
+ ARE_EQUAL(FPDenormalMode) &&
ARE_EQUAL(ExceptionModel) &&
ARE_EQUAL(MCOptions) &&
ARE_EQUAL(EnableIPRA);
diff --git a/include/llvm/Target/TargetRecip.h b/include/llvm/Target/TargetRecip.h
deleted file mode 100644
index 309b96079131..000000000000
--- a/include/llvm/Target/TargetRecip.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//===--------------------- llvm/Target/TargetRecip.h ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class is used to customize machine-specific reciprocal estimate code
-// generation in a target-independent way.
-// If a target does not support operations in this specification, then code
-// generation will default to using supported operations.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETRECIP_H
-#define LLVM_TARGET_TARGETRECIP_H
-
-#include "llvm/ADT/StringRef.h"
-#include <cstdint>
-#include <map>
-#include <string>
-#include <vector>
-
-namespace llvm {
-
-struct TargetRecip {
-public:
- TargetRecip();
-
- /// Initialize all or part of the operations from command-line options or
- /// a front end.
- TargetRecip(const std::vector<std::string> &Args);
-
- /// Set whether a particular reciprocal operation is enabled and how many
- /// refinement steps are needed when using it. Use "all" to set enablement
- /// and refinement steps for all operations.
- void setDefaults(StringRef Key, bool Enable, unsigned RefSteps);
-
- /// Return true if the reciprocal operation has been enabled by default or
- /// from the command-line. Return false if the operation has been disabled
- /// by default or from the command-line.
- bool isEnabled(StringRef Key) const;
-
- /// Return the number of iterations necessary to refine the
- /// the result of a machine instruction for the given reciprocal operation.
- unsigned getRefinementSteps(StringRef Key) const;
-
- bool operator==(const TargetRecip &Other) const;
-
-private:
- enum {
- Uninitialized = -1
- };
-
- struct RecipParams {
- int8_t Enabled;
- int8_t RefinementSteps;
-
- RecipParams() : Enabled(Uninitialized), RefinementSteps(Uninitialized) {}
- };
-
- std::map<StringRef, RecipParams> RecipMap;
- typedef std::map<StringRef, RecipParams>::iterator RecipIter;
- typedef std::map<StringRef, RecipParams>::const_iterator ConstRecipIter;
-
- bool parseGlobalParams(const std::string &Arg);
- void parseIndividualParams(const std::vector<std::string> &Args);
-};
-
-} // end namespace llvm
-
-#endif // LLVM_TARGET_TARGETRECIP_H
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index e5a6c8ed2f2d..3080e9a32c3a 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -17,6 +17,7 @@
#define LLVM_TARGET_TARGETREGISTERINFO_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/CallingConv.h"
@@ -35,23 +36,6 @@ class VirtRegMap;
class raw_ostream;
class LiveRegMatrix;
-/// A bitmask representing the covering of a register with sub-registers.
-///
-/// This is typically used to track liveness at sub-register granularity.
-/// Lane masks for sub-register indices are similar to register units for
-/// physical registers. The individual bits in a lane mask can't be assigned
-/// any specific meaning. They can be used to check if two sub-register
-/// indices overlap.
-///
-/// Iff the target has a register such that:
-///
-/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
-///
-/// then:
-///
-/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
-typedef unsigned LaneBitmask;
-
class TargetRegisterClass {
public:
typedef const MCPhysReg* iterator;
@@ -87,6 +71,11 @@ public:
/// Return the number of registers in this class.
unsigned getNumRegs() const { return MC->getNumRegs(); }
+ iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
+ getRegisters() const {
+ return make_range(MC->begin(), MC->end());
+ }
+
/// Return the specified register in the class.
unsigned getRegister(unsigned i) const {
return MC->getRegister(i);
@@ -263,7 +252,7 @@ private:
const LaneBitmask *SubRegIndexLaneMasks;
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
- unsigned CoveringLanes;
+ LaneBitmask CoveringLanes;
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
@@ -271,7 +260,7 @@ protected:
regclass_iterator RegClassEnd,
const char *const *SRINames,
const LaneBitmask *SRILaneMasks,
- unsigned CoveringLanes);
+ LaneBitmask CoveringLanes);
virtual ~TargetRegisterInfo();
public:
@@ -441,11 +430,6 @@ public:
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
- virtual const MCPhysReg*
- getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
- return nullptr;
- }
-
/// Return a mask of call-preserved registers for the given calling convention
/// on the current function. The mask should include all call-preserved
/// aliases. This is used by the register allocator to determine which
@@ -485,10 +469,20 @@ public:
/// Returns a bitset indexed by physical register number indicating if a
/// register is a special register that has particular uses and should be
- /// considered unavailable at all times, e.g. SP, RA. This is
- /// used by register scavenger to determine what registers are free.
+ /// considered unavailable at all times, e.g. stack pointer, return address.
+ /// A reserved register:
+ /// - is not allocatable
+ /// - is considered always live
+ /// - is ignored by liveness tracking
+ /// It is often necessary to reserve the super registers of a reserved
+ /// register as well, to avoid them getting allocated indirectly. You may use
+ /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+ /// Returns true if PhysReg is unallocatable and constant throughout the
+ /// function. Used by MachineRegisterInfo::isConstantPhysReg().
+ virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
+
/// Prior to adding the live-out mask to a stackmap or patchpoint
/// instruction, provide the target the opportunity to adjust it (mainly to
/// remove pseudo-registers that should be ignored).
@@ -512,7 +506,7 @@ public:
// For a copy-like instruction that defines a register of class DefRC with
// subreg index DefSubReg, reading from another source with class SrcRC and
- // subregister SrcSubReg return true if this is a preferrable copy
+ // subregister SrcSubReg return true if this is a preferable copy
// instruction or an earlier use should be used.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
unsigned DefSubReg,
@@ -808,6 +802,13 @@ public:
return false;
}
+ /// Returns true if the target requires using the RegScavenger directly for
+ /// frame elimination despite using requiresFrameIndexScavenging.
+ virtual bool requiresFrameIndexReplacementScavenging(
+ const MachineFunction &MF) const {
+ return false;
+ }
+
/// Returns true if the target wants the LocalStackAllocation pass to be run
/// and virtual base registers used for more efficient stack access.
virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
@@ -930,6 +931,14 @@ public:
/// getFrameRegister - This method should return the register used as a base
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
+
+ /// Mark a register and all its aliases as reserved in the given set.
+ void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;
+
+ /// Returns true if for every register in the set all super registers are part
+ /// of the set as well.
+ bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
+ ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
};
@@ -1115,9 +1124,6 @@ Printable PrintRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
/// registers on a \ref raw_ostream.
Printable PrintVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
-/// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
-Printable PrintLaneMask(LaneBitmask LaneMask);
-
} // End llvm namespace
#endif
diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td
index 88375f77e230..4ddf7d77a23a 100644
--- a/include/llvm/Target/TargetSelectionDAG.td
+++ b/include/llvm/Target/TargetSelectionDAG.td
@@ -450,10 +450,10 @@ def ftrunc : SDNode<"ISD::FTRUNC" , SDTFPUnaryOp>;
def fceil : SDNode<"ISD::FCEIL" , SDTFPUnaryOp>;
def ffloor : SDNode<"ISD::FFLOOR" , SDTFPUnaryOp>;
def fnearbyint : SDNode<"ISD::FNEARBYINT" , SDTFPUnaryOp>;
-def frnd : SDNode<"ISD::FROUND" , SDTFPUnaryOp>;
+def fround : SDNode<"ISD::FROUND" , SDTFPUnaryOp>;
-def fround : SDNode<"ISD::FP_ROUND" , SDTFPRoundOp>;
-def fextend : SDNode<"ISD::FP_EXTEND" , SDTFPExtendOp>;
+def fpround : SDNode<"ISD::FP_ROUND" , SDTFPRoundOp>;
+def fpextend : SDNode<"ISD::FP_EXTEND" , SDTFPExtendOp>;
def fcopysign : SDNode<"ISD::FCOPYSIGN" , SDTFPSignOp>;
def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>;
@@ -1165,10 +1165,12 @@ class Pat<dag pattern, dag result> : Pattern<pattern, [result]>;
// e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>;
//
class ComplexPattern<ValueType ty, int numops, string fn,
- list<SDNode> roots = [], list<SDNodeProperty> props = []> {
+ list<SDNode> roots = [], list<SDNodeProperty> props = [],
+ int complexity = -1> {
ValueType Ty = ty;
int NumOperands = numops;
string SelectFunc = fn;
list<SDNode> RootNodes = roots;
list<SDNodeProperty> Properties = props;
+ int Complexity = complexity;
}
diff --git a/include/llvm/Target/TargetSubtargetInfo.h b/include/llvm/Target/TargetSubtargetInfo.h
index b929070484f9..bf4331383cb0 100644
--- a/include/llvm/Target/TargetSubtargetInfo.h
+++ b/include/llvm/Target/TargetSubtargetInfo.h
@@ -25,6 +25,8 @@ namespace llvm {
class CallLowering;
class DataLayout;
+class InstructionSelector;
+class LegalizerInfo;
class MachineFunction;
class MachineInstr;
class RegisterBankInfo;
@@ -69,6 +71,8 @@ public:
virtual ~TargetSubtargetInfo();
+ virtual bool isXRaySupported() const { return false; }
+
// Interfaces to the major aspects of target machine information:
//
// -- Instruction opcode and operand information
@@ -88,12 +92,23 @@ public:
return nullptr;
}
virtual const CallLowering *getCallLowering() const { return nullptr; }
+
+ // FIXME: This lets targets specialize the selector by subtarget (which lets
+ // us do things like a dedicated avx512 selector). However, we might want
+ // to also specialize selectors by MachineFunction, which would let us be
+ // aware of optsize/optnone and such.
+ virtual const InstructionSelector *getInstructionSelector() const {
+ return nullptr;
+ }
+
/// Target can subclass this hook to select a different DAG scheduler.
virtual RegisterScheduler::FunctionPassCtor
getDAGScheduler(CodeGenOpt::Level) const {
return nullptr;
}
+ virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }
+
/// getRegisterInfo - If register information is available, return it. If
/// not, return null.
///
@@ -176,6 +191,12 @@ public:
std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
}
+ // \brief Provide an ordered list of schedule DAG mutations for the machine
+ // pipeliner.
+ virtual void getSMSMutations(
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+ }
+
// For use with PostRAScheduling: get the minimum optimization level needed
// to enable post-RA scheduling.
virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
@@ -203,6 +224,8 @@ public:
}
/// Enable tracking of subregister liveness in register allocator.
+ /// Please use MachineRegisterInfo::subRegLivenessEnabled() instead where
+ /// possible.
virtual bool enableSubRegLiveness() const { return false; }
};
diff --git a/include/llvm/Transforms/Coroutines.h b/include/llvm/Transforms/Coroutines.h
new file mode 100644
index 000000000000..51beb44fdc56
--- /dev/null
+++ b/include/llvm/Transforms/Coroutines.h
@@ -0,0 +1,38 @@
+//===-- Coroutines.h - Coroutine Transformations ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Declare accessor functions for coroutine lowering passes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_H
+#define LLVM_TRANSFORMS_COROUTINES_H
+
+namespace llvm {
+
+class Pass;
+class PassManagerBuilder;
+
+/// Add all coroutine passes to appropriate extension points.
+void addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder);
+
+/// Lower coroutine intrinsics that are not needed by later passes.
+Pass *createCoroEarlyPass();
+
+/// Split up coroutines into multiple functions driving their state machines.
+Pass *createCoroSplitPass();
+
+/// Analyze coroutines use sites, devirtualize resume/destroy calls and elide
+/// heap allocation for coroutine frame where possible.
+Pass *createCoroElidePass();
+
+/// Lower all remaining coroutine intrinsics.
+Pass *createCoroCleanupPass();
+
+}
+
+#endif
diff --git a/include/llvm/Transforms/GCOVProfiler.h b/include/llvm/Transforms/GCOVProfiler.h
index f6521901a33e..66bd75c88e24 100644
--- a/include/llvm/Transforms/GCOVProfiler.h
+++ b/include/llvm/Transforms/GCOVProfiler.h
@@ -21,7 +21,7 @@ namespace llvm {
class GCOVProfilerPass : public PassInfoMixin<GCOVProfilerPass> {
public:
GCOVProfilerPass(const GCOVOptions &Options = GCOVOptions::getDefault()) : GCOVOpts(Options) { }
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
private:
GCOVOptions GCOVOpts;
diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h
index f6731884870c..4bebc863b4a9 100644
--- a/include/llvm/Transforms/IPO.h
+++ b/include/llvm/Transforms/IPO.h
@@ -20,6 +20,7 @@
namespace llvm {
+struct InlineParams;
class StringRef;
class ModuleSummaryIndex;
class ModulePass;
@@ -27,6 +28,7 @@ class Pass;
class Function;
class BasicBlock;
class GlobalValue;
+class raw_ostream;
//===----------------------------------------------------------------------===//
//
@@ -42,6 +44,10 @@ ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false);
//
ModulePass *createStripNonDebugSymbolsPass();
+/// This function returns a new pass that downgrades the debug info in the
+/// module to line tables only.
+ModulePass *createStripNonLineTableDebugInfoPass();
+
//===----------------------------------------------------------------------===//
//
// These pass removes llvm.dbg.declare intrinsics.
@@ -89,7 +95,7 @@ ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
//===----------------------------------------------------------------------===//
/// This pass performs iterative function importing from other modules.
-Pass *createFunctionImportPass(const ModuleSummaryIndex *Index = nullptr);
+Pass *createFunctionImportPass();
//===----------------------------------------------------------------------===//
/// createFunctionInliningPass - Return a new pass object that uses a heuristic
@@ -103,12 +109,7 @@ Pass *createFunctionImportPass(const ModuleSummaryIndex *Index = nullptr);
Pass *createFunctionInliningPass();
Pass *createFunctionInliningPass(int Threshold);
Pass *createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel);
-
-//===----------------------------------------------------------------------===//
-/// createAlwaysInlinerPass - Return a new pass object that inlines only
-/// functions that are marked as "always_inline".
-Pass *createAlwaysInlinerPass();
-Pass *createAlwaysInlinerPass(bool InsertLifetime);
+Pass *createFunctionInliningPass(InlineParams &Params);
//===----------------------------------------------------------------------===//
/// createPruneEHPass - Return a new pass object which transforms invoke
@@ -225,12 +226,19 @@ ModulePass *createCrossDSOCFIPass();
/// metadata.
ModulePass *createWholeProgramDevirtPass();
+/// This pass splits globals into pieces for the benefit of whole-program
+/// devirtualization and control-flow integrity.
+ModulePass *createGlobalSplitPass();
+
//===----------------------------------------------------------------------===//
// SampleProfilePass - Loads sample profile data from disk and generates
// IR metadata to reflect the profile.
ModulePass *createSampleProfileLoaderPass();
ModulePass *createSampleProfileLoaderPass(StringRef Name);
+/// Write ThinLTO-ready bitcode to Str.
+ModulePass *createWriteThinLTOBitcodePass(raw_ostream &Str);
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/IPO/AlwaysInliner.h b/include/llvm/Transforms/IPO/AlwaysInliner.h
new file mode 100644
index 000000000000..15c80357e4a8
--- /dev/null
+++ b/include/llvm/Transforms/IPO/AlwaysInliner.h
@@ -0,0 +1,40 @@
+//===-- AlwaysInliner.h - Pass to inline "always_inline" functions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Provides passes to inlining "always_inline" functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
+#define LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Inlines functions marked as "always_inline".
+///
+/// Note that this does not inline call sites marked as always_inline and does
+/// not delete the functions even when all users are inlined. The normal
+/// inliner should be used to handle call site inlining, this pass's goal is to
+/// be the simplest possible pass to remove always_inline function definitions'
+/// uses by inlining them. The \c GlobalDCE pass can be used to remove these
+/// functions once all users are gone.
+struct AlwaysInlinerPass : PassInfoMixin<AlwaysInlinerPass> {
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+/// Create a legacy pass manager instance of a pass to inline and remove
+/// functions marked as "always_inline".
+Pass *createAlwaysInlinerLegacyPass(bool InsertLifetime = true);
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
diff --git a/include/llvm/Transforms/IPO/CrossDSOCFI.h b/include/llvm/Transforms/IPO/CrossDSOCFI.h
index 409604a7f330..0979f5b79e86 100644
--- a/include/llvm/Transforms/IPO/CrossDSOCFI.h
+++ b/include/llvm/Transforms/IPO/CrossDSOCFI.h
@@ -21,7 +21,7 @@
namespace llvm {
class CrossDSOCFIPass : public PassInfoMixin<CrossDSOCFIPass> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
}
#endif // LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
diff --git a/include/llvm/Transforms/IPO/FunctionAttrs.h b/include/llvm/Transforms/IPO/FunctionAttrs.h
index c44cc43fc0f6..ee45f35bf11b 100644
--- a/include/llvm/Transforms/IPO/FunctionAttrs.h
+++ b/include/llvm/Transforms/IPO/FunctionAttrs.h
@@ -30,7 +30,8 @@ namespace llvm {
/// attribute. It also discovers function arguments that are not captured by
/// the function and marks them with the nocapture attribute.
struct PostOrderFunctionAttrsPass : PassInfoMixin<PostOrderFunctionAttrsPass> {
- PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM);
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR);
};
/// Create a legacy pass manager instance of a pass to compute function attrs
@@ -50,7 +51,7 @@ Pass *createPostOrderFunctionAttrsLegacyPass();
class ReversePostOrderFunctionAttrsPass
: public PassInfoMixin<ReversePostOrderFunctionAttrsPass> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/IPO/FunctionImport.h b/include/llvm/Transforms/IPO/FunctionImport.h
index ba5db2b5c739..d7acbe883c5d 100644
--- a/include/llvm/Transforms/IPO/FunctionImport.h
+++ b/include/llvm/Transforms/IPO/FunctionImport.h
@@ -13,6 +13,8 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Error.h"
#include <functional>
#include <map>
@@ -42,25 +44,34 @@ public:
/// The set contains an entry for every global value the module exports.
typedef std::unordered_set<GlobalValue::GUID> ExportSetTy;
+ /// A function of this type is used to load modules referenced by the index.
+ typedef std::function<Expected<std::unique_ptr<Module>>(StringRef Identifier)>
+ ModuleLoaderTy;
+
/// Create a Function Importer.
- FunctionImporter(
- const ModuleSummaryIndex &Index,
- std::function<std::unique_ptr<Module>(StringRef Identifier)> ModuleLoader)
+ FunctionImporter(const ModuleSummaryIndex &Index, ModuleLoaderTy ModuleLoader)
: Index(Index), ModuleLoader(std::move(ModuleLoader)) {}
/// Import functions in Module \p M based on the supplied import list.
/// \p ForceImportReferencedDiscardableSymbols will set the ModuleLinker in
/// a mode where referenced discarable symbols in the source modules will be
/// imported as well even if they are not present in the ImportList.
- bool importFunctions(Module &M, const ImportMapTy &ImportList,
- bool ForceImportReferencedDiscardableSymbols = false);
+ Expected<bool>
+ importFunctions(Module &M, const ImportMapTy &ImportList,
+ bool ForceImportReferencedDiscardableSymbols = false);
private:
/// The summaries index used to trigger importing.
const ModuleSummaryIndex &Index;
/// Factory function to load a Module for a given identifier
- std::function<std::unique_ptr<Module>(StringRef Identifier)> ModuleLoader;
+ ModuleLoaderTy ModuleLoader;
+};
+
+/// The function importing pass
+class FunctionImportPass : public PassInfoMixin<FunctionImportPass> {
+public:
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// Compute all the imports and exports for every module in the Index.
@@ -102,12 +113,13 @@ void ComputeCrossModuleImportForModule(
void gatherImportedSummariesForModule(
StringRef ModulePath,
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
- const StringMap<FunctionImporter::ImportMapTy> &ImportLists,
+ const FunctionImporter::ImportMapTy &ImportList,
std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
+/// Emit into \p OutputFilename the files module \p ModulePath will import from.
std::error_code
EmitImportsFiles(StringRef ModulePath, StringRef OutputFilename,
- const StringMap<FunctionImporter::ImportMapTy> &ImportLists);
+ const FunctionImporter::ImportMapTy &ModuleImports);
/// Resolve WeakForLinker values in \p TheModule based on the information
/// recorded in the summaries during global summary-based analysis.
diff --git a/include/llvm/Transforms/IPO/GlobalOpt.h b/include/llvm/Transforms/IPO/GlobalOpt.h
index 5a25a6db4390..ab9116810be1 100644
--- a/include/llvm/Transforms/IPO/GlobalOpt.h
+++ b/include/llvm/Transforms/IPO/GlobalOpt.h
@@ -24,7 +24,7 @@ namespace llvm {
/// Optimize globals that never have their address taken.
class GlobalOptPass : public PassInfoMixin<GlobalOptPass> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/IPO/GlobalSplit.h b/include/llvm/Transforms/IPO/GlobalSplit.h
new file mode 100644
index 000000000000..fb2c2d27338e
--- /dev/null
+++ b/include/llvm/Transforms/IPO/GlobalSplit.h
@@ -0,0 +1,30 @@
+//===- GlobalSplit.h - global variable splitter -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass uses inrange annotations on GEP indices to split globals where
+// beneficial. Clang currently attaches these annotations to references to
+// virtual table globals under the Itanium ABI for the benefit of the
+// whole-program virtual call optimization and control flow integrity passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
+#define LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+/// Pass to perform split of global variables.
+class GlobalSplitPass : public PassInfoMixin<GlobalSplitPass> {
+public:
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+}
+#endif // LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
diff --git a/include/llvm/Transforms/IPO/InferFunctionAttrs.h b/include/llvm/Transforms/IPO/InferFunctionAttrs.h
index f5cbf9eb0613..54e1c243ae27 100644
--- a/include/llvm/Transforms/IPO/InferFunctionAttrs.h
+++ b/include/llvm/Transforms/IPO/InferFunctionAttrs.h
@@ -24,7 +24,7 @@ namespace llvm {
/// A pass which infers function attributes from the names and signatures of
/// function declarations in a module.
struct InferFunctionAttrsPass : PassInfoMixin<InferFunctionAttrsPass> {
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// Create a legacy pass manager instance of a pass to infer function
diff --git a/include/llvm/Transforms/IPO/Inliner.h b/include/llvm/Transforms/IPO/Inliner.h
new file mode 100644
index 000000000000..b3ca5156e388
--- /dev/null
+++ b/include/llvm/Transforms/IPO/Inliner.h
@@ -0,0 +1,108 @@
+//===- Inliner.h - Inliner pass and infrastructure --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INLINER_H
+#define LLVM_TRANSFORMS_IPO_INLINER_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h"
+
+namespace llvm {
+class AssumptionCacheTracker;
+class CallSite;
+class DataLayout;
+class InlineCost;
+class OptimizationRemarkEmitter;
+class ProfileSummaryInfo;
+
+/// This class contains all of the helper code which is used to perform the
+/// inlining operations that do not depend on the policy. It contains the core
+/// bottom-up inlining infrastructure that specific inliner passes use.
+struct LegacyInlinerBase : public CallGraphSCCPass {
+ explicit LegacyInlinerBase(char &ID);
+ explicit LegacyInlinerBase(char &ID, bool InsertLifetime);
+
+ /// For this class, we declare that we require and preserve the call graph.
+ /// If the derived class implements this method, it should always explicitly
+ /// call the implementation here.
+ void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+ bool doInitialization(CallGraph &CG) override;
+
+ /// Main run interface method, this implements the interface required by the
+ /// Pass class.
+ bool runOnSCC(CallGraphSCC &SCC) override;
+
+ using llvm::Pass::doFinalization;
+ /// Remove now-dead linkonce functions at the end of processing to avoid
+ /// breaking the SCC traversal.
+ bool doFinalization(CallGraph &CG) override;
+
+ /// This method must be implemented by the subclass to determine the cost of
+ /// inlining the specified call site. If the cost returned is greater than
+ /// the current inline threshold, the call site is not inlined.
+ virtual InlineCost getInlineCost(CallSite CS) = 0;
+
+ /// Remove dead functions.
+ ///
+ /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
+ /// which restricts it to deleting functions with an 'AlwaysInline'
+ /// attribute. This is useful for the InlineAlways pass that only wants to
+ /// deal with that subset of the functions.
+ bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
+
+ /// This function performs the main work of the pass. The default of
+ /// Inlinter::runOnSCC() calls skipSCC() before calling this method, but
+ /// derived classes which cannot be skipped can override that method and call
+ /// this function unconditionally.
+ bool inlineCalls(CallGraphSCC &SCC);
+
+private:
+ // Insert @llvm.lifetime intrinsics.
+ bool InsertLifetime;
+
+protected:
+ AssumptionCacheTracker *ACT;
+ ProfileSummaryInfo *PSI;
+ ImportedFunctionsInliningStatistics ImportedFunctionsStats;
+};
+
+/// The inliner pass for the new pass manager.
+///
+/// This pass wires together the inlining utilities and the inline cost
+/// analysis into a CGSCC pass. It considers every call in every function in
+/// the SCC and tries to inline if profitable. It can be tuned with a number of
+/// parameters to control what cost model is used and what tradeoffs are made
+/// when making the decision.
+///
+/// It should be noted that the legacy inliners do considerably more than this
+/// inliner pass does. They provide logic for manually merging allocas, and
+/// doing considerable DCE including the DCE of dead functions. This pass makes
+/// every attempt to be simpler. DCE of functions requires complex reasoning
+/// about comdat groups, etc. Instead, it is expected that other more focused
+/// passes be composed to achieve the same end result.
+class InlinerPass : public PassInfoMixin<InlinerPass> {
+public:
+ InlinerPass(InlineParams Params = getInlineParams())
+ : Params(std::move(Params)) {}
+
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR);
+
+private:
+ InlineParams Params;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/Transforms/IPO/InlinerPass.h b/include/llvm/Transforms/IPO/InlinerPass.h
deleted file mode 100644
index 59e10608a9ba..000000000000
--- a/include/llvm/Transforms/IPO/InlinerPass.h
+++ /dev/null
@@ -1,94 +0,0 @@
-//===- InlinerPass.h - Code common to all inliners --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a simple policy-based bottom-up inliner. This file
-// implements all of the boring mechanics of the bottom-up inlining, while the
-// subclass determines WHAT to inline, which is the much more interesting
-// component.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_IPO_INLINERPASS_H
-#define LLVM_TRANSFORMS_IPO_INLINERPASS_H
-
-#include "llvm/Analysis/CallGraphSCCPass.h"
-
-namespace llvm {
-class AssumptionCacheTracker;
-class CallSite;
-class DataLayout;
-class InlineCost;
-class ProfileSummaryInfo;
-template <class PtrType, unsigned SmallSize> class SmallPtrSet;
-
-/// Inliner - This class contains all of the helper code which is used to
-/// perform the inlining operations that do not depend on the policy.
-///
-struct Inliner : public CallGraphSCCPass {
- explicit Inliner(char &ID);
- explicit Inliner(char &ID, bool InsertLifetime);
-
- /// getAnalysisUsage - For this class, we declare that we require and preserve
- /// the call graph. If the derived class implements this method, it should
- /// always explicitly call the implementation here.
- void getAnalysisUsage(AnalysisUsage &Info) const override;
-
- // Main run interface method, this implements the interface required by the
- // Pass class.
- bool runOnSCC(CallGraphSCC &SCC) override;
-
- using llvm::Pass::doFinalization;
- // doFinalization - Remove now-dead linkonce functions at the end of
- // processing to avoid breaking the SCC traversal.
- bool doFinalization(CallGraph &CG) override;
-
- /// getInlineCost - This method must be implemented by the subclass to
- /// determine the cost of inlining the specified call site. If the cost
- /// returned is greater than the current inline threshold, the call site is
- /// not inlined.
- ///
- virtual InlineCost getInlineCost(CallSite CS) = 0;
-
- /// removeDeadFunctions - Remove dead functions.
- ///
- /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
- /// which restricts it to deleting functions with an 'AlwaysInline'
- /// attribute. This is useful for the InlineAlways pass that only wants to
- /// deal with that subset of the functions.
- bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
-
- /// This function performs the main work of the pass. The default
- /// of Inlinter::runOnSCC() calls skipSCC() before calling this method, but
- /// derived classes which cannot be skipped can override that method and
- /// call this function unconditionally.
- bool inlineCalls(CallGraphSCC &SCC);
-
-private:
- // InsertLifetime - Insert @llvm.lifetime intrinsics.
- bool InsertLifetime;
-
- /// shouldInline - Return true if the inliner should attempt to
- /// inline at the given CallSite.
- bool shouldInline(CallSite CS);
- /// Return true if inlining of CS can block the caller from being
- /// inlined which is proved to be more beneficial. \p IC is the
- /// estimated inline cost associated with callsite \p CS.
- /// \p TotalAltCost will be set to the estimated cost of inlining the caller
- /// if \p CS is suppressed for inlining.
- bool shouldBeDeferred(Function *Caller, CallSite CS, InlineCost IC,
- int &TotalAltCost);
-
-protected:
- AssumptionCacheTracker *ACT;
- ProfileSummaryInfo *PSI;
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/include/llvm/Transforms/IPO/Internalize.h b/include/llvm/Transforms/IPO/Internalize.h
index ba1b06877d3a..45d676d9f77b 100644
--- a/include/llvm/Transforms/IPO/Internalize.h
+++ b/include/llvm/Transforms/IPO/Internalize.h
@@ -63,7 +63,7 @@ public:
/// internalizing a function (by removing any edge from the "external node")
bool internalizeModule(Module &TheModule, CallGraph *CG = nullptr);
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// Helper function to internalize functions and variables in a Module.
diff --git a/include/llvm/Transforms/IPO/LowerTypeTests.h b/include/llvm/Transforms/IPO/LowerTypeTests.h
index 93d4fb94e2c4..23c59c199a3b 100644
--- a/include/llvm/Transforms/IPO/LowerTypeTests.h
+++ b/include/llvm/Transforms/IPO/LowerTypeTests.h
@@ -205,7 +205,7 @@ struct ByteArrayBuilder {
class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/IPO/PartialInlining.h b/include/llvm/Transforms/IPO/PartialInlining.h
index 48eb1e30a191..15407fc36a22 100644
--- a/include/llvm/Transforms/IPO/PartialInlining.h
+++ b/include/llvm/Transforms/IPO/PartialInlining.h
@@ -24,9 +24,6 @@ namespace llvm {
class PartialInlinerPass : public PassInfoMixin<PartialInlinerPass> {
public:
PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
-
-private:
- Function *unswitchFunction(Function *F);
};
}
#endif // LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
diff --git a/include/llvm/Transforms/IPO/PassManagerBuilder.h b/include/llvm/Transforms/IPO/PassManagerBuilder.h
index 4f483deeefe5..9f9ce467337e 100644
--- a/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -100,6 +100,11 @@ public:
/// peephole optimizations similar to the instruction combiner. These passes
/// will be inserted after each instance of the instruction combiner pass.
EP_Peephole,
+
+ /// EP_CGSCCOptimizerLate - This extension point allows adding CallGraphSCC
+ /// passes at the end of the main CallGraphSCC passes and before any
+ /// function simplification passes run by CGPassManager.
+ EP_CGSCCOptimizerLate,
};
/// The Optimization Level - Specify the basic optimization level.
@@ -119,9 +124,6 @@ public:
/// added to the per-module passes.
Pass *Inliner;
- /// The module summary index to use for function importing.
- const ModuleSummaryIndex *ModuleSummary;
-
bool DisableTailCalls;
bool DisableUnitAtATime;
bool DisableUnrollLoops;
@@ -130,6 +132,7 @@ public:
bool LoopVectorize;
bool RerollLoops;
bool LoadCombine;
+ bool NewGVN;
bool DisableGVNLoadPRE;
bool VerifyInput;
bool VerifyOutput;
@@ -138,10 +141,14 @@ public:
bool PrepareForThinLTO;
bool PerformThinLTO;
+ /// Enable profile instrumentation pass.
+ bool EnablePGOInstrGen;
/// Profile data file name that the instrumentation will be written to.
std::string PGOInstrGen;
/// Path of the profile data file.
std::string PGOInstrUse;
+ /// Path of the sample Profile data file.
+ std::string PGOSampleUse;
private:
/// ExtensionList - This is list of all of the extensions that are registered.
diff --git a/include/llvm/Transforms/IPO/SCCP.h b/include/llvm/Transforms/IPO/SCCP.h
index fab731342144..7082006f14a6 100644
--- a/include/llvm/Transforms/IPO/SCCP.h
+++ b/include/llvm/Transforms/IPO/SCCP.h
@@ -28,7 +28,7 @@ namespace llvm {
/// Pass to perform interprocedural constant propagation.
class IPSCCPPass : public PassInfoMixin<IPSCCPPass> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
}
#endif // LLVM_TRANSFORMS_IPO_SCCP_H
diff --git a/include/llvm/Transforms/IPO/WholeProgramDevirt.h b/include/llvm/Transforms/IPO/WholeProgramDevirt.h
index 2bd20c95702c..1aa4c6f4f559 100644
--- a/include/llvm/Transforms/IPO/WholeProgramDevirt.h
+++ b/include/llvm/Transforms/IPO/WholeProgramDevirt.h
@@ -118,7 +118,7 @@ struct VirtualCallTarget {
// For testing only.
VirtualCallTarget(const TypeMemberInfo *TM, bool IsBigEndian)
- : Fn(nullptr), TM(TM), IsBigEndian(IsBigEndian) {}
+ : Fn(nullptr), TM(TM), IsBigEndian(IsBigEndian), WasDevirt(false) {}
// The function stored in the vtable.
Function *Fn;
@@ -134,6 +134,9 @@ struct VirtualCallTarget {
// Whether the target is big endian.
bool IsBigEndian;
+ // Whether at least one call site to the target was devirtualized.
+ bool WasDevirt;
+
// The minimum byte offset before the address point. This covers the bytes in
// the vtable object before the address point (e.g. RTTI, access-to-top,
// vtables for other base classes) and is equal to the offset from the start
diff --git a/include/llvm/Transforms/InstCombine/InstCombine.h b/include/llvm/Transforms/InstCombine/InstCombine.h
index d70b847c6892..6bd22dc46255 100644
--- a/include/llvm/Transforms/InstCombine/InstCombine.h
+++ b/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -31,19 +31,10 @@ class InstCombinePass : public PassInfoMixin<InstCombinePass> {
public:
static StringRef name() { return "InstCombinePass"; }
- // Explicitly define constructors for MSVC.
- InstCombinePass(bool ExpensiveCombines = true)
+ explicit InstCombinePass(bool ExpensiveCombines = true)
: ExpensiveCombines(ExpensiveCombines) {}
- InstCombinePass(InstCombinePass &&Arg)
- : Worklist(std::move(Arg.Worklist)),
- ExpensiveCombines(Arg.ExpensiveCombines) {}
- InstCombinePass &operator=(InstCombinePass &&RHS) {
- Worklist = std::move(RHS.Worklist);
- ExpensiveCombines = RHS.ExpensiveCombines;
- return *this;
- }
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief The legacy pass manager's instcombine pass.
diff --git a/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
index 32af035d07d4..271e891bb45e 100644
--- a/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
+++ b/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -28,19 +28,11 @@ class InstCombineWorklist {
SmallVector<Instruction*, 256> Worklist;
DenseMap<Instruction*, unsigned> WorklistMap;
- void operator=(const InstCombineWorklist&RHS) = delete;
- InstCombineWorklist(const InstCombineWorklist&) = delete;
public:
- InstCombineWorklist() {}
-
- InstCombineWorklist(InstCombineWorklist &&Arg)
- : Worklist(std::move(Arg.Worklist)),
- WorklistMap(std::move(Arg.WorklistMap)) {}
- InstCombineWorklist &operator=(InstCombineWorklist &&RHS) {
- Worklist = std::move(RHS.Worklist);
- WorklistMap = std::move(RHS.WorklistMap);
- return *this;
- }
+ InstCombineWorklist() = default;
+
+ InstCombineWorklist(InstCombineWorklist &&) = default;
+ InstCombineWorklist &operator=(InstCombineWorklist &&) = default;
bool isEmpty() const { return Worklist.empty(); }
diff --git a/include/llvm/Transforms/InstrProfiling.h b/include/llvm/Transforms/InstrProfiling.h
index 9ac6d63b96ae..b7c2935f4d84 100644
--- a/include/llvm/Transforms/InstrProfiling.h
+++ b/include/llvm/Transforms/InstrProfiling.h
@@ -21,7 +21,9 @@
namespace llvm {
-/// Instrumenation based profiling lowering pass. This pass lowers
+class TargetLibraryInfo;
+
+/// Instrumentation based profiling lowering pass. This pass lowers
/// the profile instrumented code generated by FE or the IR based
/// instrumentation pass.
class InstrProfiling : public PassInfoMixin<InstrProfiling> {
@@ -29,12 +31,13 @@ public:
InstrProfiling() {}
InstrProfiling(const InstrProfOptions &Options) : Options(Options) {}
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
- bool run(Module &M);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+ bool run(Module &M, const TargetLibraryInfo &TLI);
private:
InstrProfOptions Options;
Module *M;
+ const TargetLibraryInfo *TLI;
struct PerFunctionProfileData {
uint32_t NumValueSites[IPVK_Last + 1];
GlobalVariable *RegionCounters;
@@ -44,7 +47,7 @@ private:
}
};
DenseMap<GlobalVariable *, PerFunctionProfileData> ProfileDataMap;
- std::vector<Value *> UsedVars;
+ std::vector<GlobalValue *> UsedVars;
std::vector<GlobalVariable *> ReferencedNames;
GlobalVariable *NamesVar;
size_t NamesSize;
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index 09eef7e0750e..7fb9a5442081 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -108,7 +108,8 @@ ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
bool Recover = false);
// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
-FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0);
+FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0,
+ bool Recover = false);
// Insert ThreadSanitizer (race detection) instrumentation
FunctionPass *createThreadSanitizerPass();
@@ -136,7 +137,8 @@ ModulePass *createEfficiencySanitizerPass(
struct SanitizerCoverageOptions {
SanitizerCoverageOptions()
: CoverageType(SCK_None), IndirectCalls(false), TraceBB(false),
- TraceCmp(false), Use8bitCounters(false), TracePC(false) {}
+ TraceCmp(false), TraceDiv(false), TraceGep(false),
+ Use8bitCounters(false), TracePC(false), TracePCGuard(false) {}
enum Type {
SCK_None = 0,
@@ -147,8 +149,11 @@ struct SanitizerCoverageOptions {
bool IndirectCalls;
bool TraceBB;
bool TraceCmp;
+ bool TraceDiv;
+ bool TraceGep;
bool Use8bitCounters;
bool TracePC;
+ bool TracePCGuard;
};
// Insert SanitizerCoverage instrumentation.
diff --git a/include/llvm/Transforms/PGOInstrumentation.h b/include/llvm/Transforms/PGOInstrumentation.h
index f6b5639e5aad..1b449c9abdc2 100644
--- a/include/llvm/Transforms/PGOInstrumentation.h
+++ b/include/llvm/Transforms/PGOInstrumentation.h
@@ -22,13 +22,13 @@ namespace llvm {
/// The instrumentation (profile-instr-gen) pass for IR based PGO.
class PGOInstrumentationGen : public PassInfoMixin<PGOInstrumentationGen> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// The profile annotation (profile-instr-use) pass for IR based PGO.
class PGOInstrumentationUse : public PassInfoMixin<PGOInstrumentationUse> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
PGOInstrumentationUse(std::string Filename = "");
private:
@@ -39,7 +39,7 @@ private:
class PGOIndirectCallPromotion : public PassInfoMixin<PGOIndirectCallPromotion> {
public:
PGOIndirectCallPromotion(bool IsInLTO = false) : InLTO(IsInLTO) {}
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
private:
bool InLTO;
};
diff --git a/include/llvm/Transforms/SampleProfile.h b/include/llvm/Transforms/SampleProfile.h
index 0fdfa2f85e54..93fa9532cc3a 100644
--- a/include/llvm/Transforms/SampleProfile.h
+++ b/include/llvm/Transforms/SampleProfile.h
@@ -20,7 +20,7 @@ namespace llvm {
/// The sample profiler data loader pass.
class SampleProfileLoaderPass : public PassInfoMixin<SampleProfileLoaderPass> {
public:
- PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
} // End llvm namespace
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index 167cc94ec81f..92558937d047 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -140,6 +140,13 @@ Pass *createLICMPass();
//===----------------------------------------------------------------------===//
//
+// LoopSink - This pass sinks invariants from preheader to loop body where
+// frequency is lower than loop preheader.
+//
+Pass *createLoopSinkPass();
+
+//===----------------------------------------------------------------------===//
+//
// LoopInterchange - This pass interchanges loops to provide a more
// cache-friendly memory access patterns.
//
@@ -169,8 +176,9 @@ Pass *createLoopInstSimplifyPass();
// LoopUnroll - This pass is a simple loop unrolling pass.
//
Pass *createLoopUnrollPass(int Threshold = -1, int Count = -1,
- int AllowPartial = -1, int Runtime = -1);
-// Create an unrolling pass for full unrolling only.
+ int AllowPartial = -1, int Runtime = -1,
+ int UpperBound = -1);
+// Create an unrolling pass for full unrolling that uses exact trip count only.
Pass *createSimpleLoopUnrollPass();
//===----------------------------------------------------------------------===//
@@ -322,7 +330,7 @@ extern char &LCSSAID;
// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
// tree.
//
-FunctionPass *createEarlyCSEPass();
+FunctionPass *createEarlyCSEPass(bool UseMemorySSA = false);
//===----------------------------------------------------------------------===//
//
@@ -340,6 +348,13 @@ FunctionPass *createMergedLoadStoreMotionPass();
//===----------------------------------------------------------------------===//
//
+// GVN - This pass performs global value numbering and redundant load
+// elimination cotemporaneously.
+//
+FunctionPass *createNewGVNPass();
+
+//===----------------------------------------------------------------------===//
+//
// MemCpyOpt - This pass performs optimizations related to eliminating memcpy
// calls and/or combining multiple stores into memset's.
//
@@ -471,6 +486,13 @@ ModulePass *createRewriteStatepointsForGCPass();
//===----------------------------------------------------------------------===//
//
+// StripGCRelocates - Remove GC relocates that have been inserted by
+// RewriteStatepointsForGC. The resulting IR is incorrect, but this is useful
+// for manual inspection.
+FunctionPass *createStripGCRelocatesPass();
+
+//===----------------------------------------------------------------------===//
+//
// Float2Int - Demote floats to ints where possible.
//
FunctionPass *createFloat2IntPass();
@@ -485,10 +507,7 @@ FunctionPass *createNaryReassociatePass();
//
// LoopDistribute - Distribute loops.
//
-// ProcessAllLoopsByDefault instructs the pass to look for distribution
-// opportunities in all loops unless -enable-loop-distribute or the
-// llvm.loop.distribute.enable metadata data override this default.
-FunctionPass *createLoopDistributePass(bool ProcessAllLoopsByDefault);
+FunctionPass *createLoopDistributePass();
//===----------------------------------------------------------------------===//
//
@@ -516,8 +535,14 @@ FunctionPass *createLoopVersioningPass();
FunctionPass *createLoopDataPrefetchPass();
///===---------------------------------------------------------------------===//
-ModulePass *createNameAnonFunctionPass();
+ModulePass *createNameAnonGlobalPass();
+//===----------------------------------------------------------------------===//
+//
+// LibCallsShrinkWrap - Shrink-wraps a call to function if the result is not
+// used.
+//
+FunctionPass *createLibCallsShrinkWrapPass();
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/Scalar/DCE.h b/include/llvm/Transforms/Scalar/DCE.h
index d9f921e1e7c1..273346cf81d9 100644
--- a/include/llvm/Transforms/Scalar/DCE.h
+++ b/include/llvm/Transforms/Scalar/DCE.h
@@ -22,7 +22,7 @@ namespace llvm {
/// Basic Dead Code Elimination pass.
class DCEPass : public PassInfoMixin<DCEPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/DeadStoreElimination.h b/include/llvm/Transforms/Scalar/DeadStoreElimination.h
index 7826e29f178e..3ae999dfb542 100644
--- a/include/llvm/Transforms/Scalar/DeadStoreElimination.h
+++ b/include/llvm/Transforms/Scalar/DeadStoreElimination.h
@@ -27,7 +27,7 @@ namespace llvm {
/// only the redundant stores that are local to a single Basic Block.
class DSEPass : public PassInfoMixin<DSEPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &FAM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
}
diff --git a/include/llvm/Transforms/Scalar/EarlyCSE.h b/include/llvm/Transforms/Scalar/EarlyCSE.h
index 80e3c602a2b8..969ab78bfd19 100644
--- a/include/llvm/Transforms/Scalar/EarlyCSE.h
+++ b/include/llvm/Transforms/Scalar/EarlyCSE.h
@@ -27,8 +27,12 @@ namespace llvm {
/// cases so that instcombine and other passes are more effective. It is
/// expected that a later pass of GVN will catch the interesting/hard cases.
struct EarlyCSEPass : PassInfoMixin<EarlyCSEPass> {
+ EarlyCSEPass(bool UseMemorySSA = false) : UseMemorySSA(UseMemorySSA) {}
+
/// \brief Run the pass over the function.
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+ bool UseMemorySSA;
};
}
diff --git a/include/llvm/Transforms/Scalar/GVN.h b/include/llvm/Transforms/Scalar/GVN.h
index 3bb5ec392272..8f05e8cdb233 100644
--- a/include/llvm/Transforms/Scalar/GVN.h
+++ b/include/llvm/Transforms/Scalar/GVN.h
@@ -22,12 +22,14 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+class OptimizationRemarkEmitter;
/// A private "module" namespace for types and utilities used by GVN. These
/// are implementation details and should not be used by clients.
@@ -45,7 +47,7 @@ class GVN : public PassInfoMixin<GVN> {
public:
/// \brief Run the pass over the function.
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
/// This removes the specified instruction from
/// our various maps and marks it for deletion.
@@ -109,6 +111,7 @@ private:
const TargetLibraryInfo *TLI;
AssumptionCache *AC;
SetVector<BasicBlock *> DeadBlocks;
+ OptimizationRemarkEmitter *ORE;
ValueTable VN;
@@ -134,7 +137,8 @@ private:
bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
const TargetLibraryInfo &RunTLI, AAResults &RunAA,
- MemoryDependenceResults *RunMD);
+ MemoryDependenceResults *RunMD, LoopInfo *LI,
+ OptimizationRemarkEmitter *ORE);
/// Push a new Value to the LeaderTable onto the list for its value number.
void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
@@ -232,7 +236,7 @@ FunctionPass *createGVNPass(bool NoLoads = false);
/// from sibling branches.
struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
/// \brief Run the pass over the function.
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/GVNExpression.h b/include/llvm/Transforms/Scalar/GVNExpression.h
new file mode 100644
index 000000000000..3458696e0687
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/GVNExpression.h
@@ -0,0 +1,605 @@
+//======- GVNExpression.h - GVN Expression classes -------*- C++ -*-==-------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the GVN pass that contains expression handling
+/// classes
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
+#define LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/MemorySSA.h"
+#include <algorithm>
+
+namespace llvm {
+class MemoryAccess;
+
+namespace GVNExpression {
+
+enum ExpressionType {
+ ET_Base,
+ ET_Constant,
+ ET_Variable,
+ ET_Unknown,
+ ET_BasicStart,
+ ET_Basic,
+ ET_Call,
+ ET_AggregateValue,
+ ET_Phi,
+ ET_Load,
+ ET_Store,
+ ET_BasicEnd
+};
+
+class Expression {
+private:
+ ExpressionType EType;
+ unsigned Opcode;
+
+public:
+ Expression(const Expression &) = delete;
+ Expression(ExpressionType ET = ET_Base, unsigned O = ~2U)
+ : EType(ET), Opcode(O) {}
+ void operator=(const Expression &) = delete;
+ virtual ~Expression();
+
+ static unsigned getEmptyKey() { return ~0U; }
+ static unsigned getTombstoneKey() { return ~1U; }
+
+ bool operator==(const Expression &Other) const {
+ if (getOpcode() != Other.getOpcode())
+ return false;
+ if (getOpcode() == getEmptyKey() || getOpcode() == getTombstoneKey())
+ return true;
+ // Compare the expression type for anything but load and store.
+ // For load and store we set the opcode to zero.
+ // This is needed for load coercion.
+ if (getExpressionType() != ET_Load && getExpressionType() != ET_Store &&
+ getExpressionType() != Other.getExpressionType())
+ return false;
+
+ return equals(Other);
+ }
+
+ virtual bool equals(const Expression &Other) const { return true; }
+
+ unsigned getOpcode() const { return Opcode; }
+ void setOpcode(unsigned opcode) { Opcode = opcode; }
+ ExpressionType getExpressionType() const { return EType; }
+
+ virtual hash_code getHashValue() const {
+ return hash_combine(getExpressionType(), getOpcode());
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const {
+ if (PrintEType)
+ OS << "etype = " << getExpressionType() << ",";
+ OS << "opcode = " << getOpcode() << ", ";
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << "{ ";
+ printInternal(OS, true);
+ OS << "}";
+ }
+ void dump() const { print(dbgs()); }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Expression &E) {
+ E.print(OS);
+ return OS;
+}
+
+class BasicExpression : public Expression {
+private:
+ typedef ArrayRecycler<Value *> RecyclerType;
+ typedef RecyclerType::Capacity RecyclerCapacity;
+ Value **Operands;
+ unsigned MaxOperands;
+ unsigned NumOperands;
+ Type *ValueType;
+
+public:
+ static bool classof(const Expression *EB) {
+ ExpressionType ET = EB->getExpressionType();
+ return ET > ET_BasicStart && ET < ET_BasicEnd;
+ }
+
+ BasicExpression(unsigned NumOperands)
+ : BasicExpression(NumOperands, ET_Basic) {}
+ BasicExpression(unsigned NumOperands, ExpressionType ET)
+ : Expression(ET), Operands(nullptr), MaxOperands(NumOperands),
+ NumOperands(0), ValueType(nullptr) {}
+ virtual ~BasicExpression() override;
+ void operator=(const BasicExpression &) = delete;
+ BasicExpression(const BasicExpression &) = delete;
+ BasicExpression() = delete;
+
+ /// \brief Swap two operands. Used during GVN to put commutative operands in
+ /// order.
+ void swapOperands(unsigned First, unsigned Second) {
+ std::swap(Operands[First], Operands[Second]);
+ }
+
+ Value *getOperand(unsigned N) const {
+ assert(Operands && "Operands not allocated");
+ assert(N < NumOperands && "Operand out of range");
+ return Operands[N];
+ }
+
+ void setOperand(unsigned N, Value *V) {
+ assert(Operands && "Operands not allocated before setting");
+ assert(N < NumOperands && "Operand out of range");
+ Operands[N] = V;
+ }
+
+ unsigned getNumOperands() const { return NumOperands; }
+
+ typedef Value **op_iterator;
+ typedef Value *const *const_op_iterator;
+ op_iterator op_begin() { return Operands; }
+ op_iterator op_end() { return Operands + NumOperands; }
+ const_op_iterator op_begin() const { return Operands; }
+ const_op_iterator op_end() const { return Operands + NumOperands; }
+ iterator_range<op_iterator> operands() {
+ return iterator_range<op_iterator>(op_begin(), op_end());
+ }
+ iterator_range<const_op_iterator> operands() const {
+ return iterator_range<const_op_iterator>(op_begin(), op_end());
+ }
+
+ void op_push_back(Value *Arg) {
+ assert(NumOperands < MaxOperands && "Tried to add too many operands");
+ assert(Operands && "Operandss not allocated before pushing");
+ Operands[NumOperands++] = Arg;
+ }
+ bool op_empty() const { return getNumOperands() == 0; }
+
+ void allocateOperands(RecyclerType &Recycler, BumpPtrAllocator &Allocator) {
+ assert(!Operands && "Operands already allocated");
+ Operands = Recycler.allocate(RecyclerCapacity::get(MaxOperands), Allocator);
+ }
+ void deallocateOperands(RecyclerType &Recycler) {
+ Recycler.deallocate(RecyclerCapacity::get(MaxOperands), Operands);
+ }
+
+ void setType(Type *T) { ValueType = T; }
+ Type *getType() const { return ValueType; }
+
+ virtual bool equals(const Expression &Other) const override {
+ if (getOpcode() != Other.getOpcode())
+ return false;
+
+ const auto &OE = cast<BasicExpression>(Other);
+ return getType() == OE.getType() && NumOperands == OE.NumOperands &&
+ std::equal(op_begin(), op_end(), OE.op_begin());
+ }
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(getExpressionType(), getOpcode(), ValueType,
+ hash_combine_range(op_begin(), op_end()));
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeBasic, ";
+
+ this->Expression::printInternal(OS, false);
+ OS << "operands = {";
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ OS << "[" << i << "] = ";
+ Operands[i]->printAsOperand(OS);
+ OS << " ";
+ }
+ OS << "} ";
+ }
+};
+class op_inserter
+ : public std::iterator<std::output_iterator_tag, void, void, void, void> {
+private:
+ typedef BasicExpression Container;
+ Container *BE;
+
+public:
+ explicit op_inserter(BasicExpression &E) : BE(&E) {}
+ explicit op_inserter(BasicExpression *E) : BE(E) {}
+
+ op_inserter &operator=(Value *val) {
+ BE->op_push_back(val);
+ return *this;
+ }
+ op_inserter &operator*() { return *this; }
+ op_inserter &operator++() { return *this; }
+ op_inserter &operator++(int) { return *this; }
+};
+
+class CallExpression final : public BasicExpression {
+private:
+ CallInst *Call;
+ MemoryAccess *DefiningAccess;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Call;
+ }
+
+ CallExpression(unsigned NumOperands, CallInst *C, MemoryAccess *DA)
+ : BasicExpression(NumOperands, ET_Call), Call(C), DefiningAccess(DA) {}
+ void operator=(const CallExpression &) = delete;
+ CallExpression(const CallExpression &) = delete;
+ CallExpression() = delete;
+ virtual ~CallExpression() override;
+
+ virtual bool equals(const Expression &Other) const override {
+ if (!this->BasicExpression::equals(Other))
+ return false;
+ const auto &OE = cast<CallExpression>(Other);
+ return DefiningAccess == OE.DefiningAccess;
+ }
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(this->BasicExpression::getHashValue(), DefiningAccess);
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeCall, ";
+ this->BasicExpression::printInternal(OS, false);
+ OS << " represents call at " << Call;
+ }
+};
+
+class LoadExpression final : public BasicExpression {
+private:
+ LoadInst *Load;
+ MemoryAccess *DefiningAccess;
+ unsigned Alignment;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Load;
+ }
+
+ LoadExpression(unsigned NumOperands, LoadInst *L, MemoryAccess *DA)
+ : LoadExpression(ET_Load, NumOperands, L, DA) {}
+ LoadExpression(enum ExpressionType EType, unsigned NumOperands, LoadInst *L,
+ MemoryAccess *DA)
+ : BasicExpression(NumOperands, EType), Load(L), DefiningAccess(DA) {
+ Alignment = L ? L->getAlignment() : 0;
+ }
+ void operator=(const LoadExpression &) = delete;
+ LoadExpression(const LoadExpression &) = delete;
+ LoadExpression() = delete;
+ virtual ~LoadExpression() override;
+
+ LoadInst *getLoadInst() const { return Load; }
+ void setLoadInst(LoadInst *L) { Load = L; }
+
+ MemoryAccess *getDefiningAccess() const { return DefiningAccess; }
+ void setDefiningAccess(MemoryAccess *MA) { DefiningAccess = MA; }
+ unsigned getAlignment() const { return Alignment; }
+ void setAlignment(unsigned Align) { Alignment = Align; }
+
+ virtual bool equals(const Expression &Other) const override;
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(getOpcode(), getType(), DefiningAccess,
+ hash_combine_range(op_begin(), op_end()));
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeLoad, ";
+ this->BasicExpression::printInternal(OS, false);
+ OS << " represents Load at " << Load;
+ OS << " with DefiningAccess " << *DefiningAccess;
+ }
+};
+
+class StoreExpression final : public BasicExpression {
+private:
+ StoreInst *Store;
+ MemoryAccess *DefiningAccess;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Store;
+ }
+
+ StoreExpression(unsigned NumOperands, StoreInst *S, MemoryAccess *DA)
+ : BasicExpression(NumOperands, ET_Store), Store(S), DefiningAccess(DA) {}
+ void operator=(const StoreExpression &) = delete;
+ StoreExpression(const StoreExpression &) = delete;
+ StoreExpression() = delete;
+ virtual ~StoreExpression() override;
+
+ StoreInst *getStoreInst() const { return Store; }
+ MemoryAccess *getDefiningAccess() const { return DefiningAccess; }
+
+ virtual bool equals(const Expression &Other) const override;
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(getOpcode(), getType(), DefiningAccess,
+ hash_combine_range(op_begin(), op_end()));
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeStore, ";
+ this->BasicExpression::printInternal(OS, false);
+ OS << " represents Store at " << Store;
+ OS << " with DefiningAccess " << *DefiningAccess;
+ }
+};
+
+class AggregateValueExpression final : public BasicExpression {
+private:
+ unsigned MaxIntOperands;
+ unsigned NumIntOperands;
+ unsigned *IntOperands;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_AggregateValue;
+ }
+
+ AggregateValueExpression(unsigned NumOperands, unsigned NumIntOperands)
+ : BasicExpression(NumOperands, ET_AggregateValue),
+ MaxIntOperands(NumIntOperands), NumIntOperands(0),
+ IntOperands(nullptr) {}
+
+ void operator=(const AggregateValueExpression &) = delete;
+ AggregateValueExpression(const AggregateValueExpression &) = delete;
+ AggregateValueExpression() = delete;
+ virtual ~AggregateValueExpression() override;
+
+ typedef unsigned *int_arg_iterator;
+ typedef const unsigned *const_int_arg_iterator;
+
+ int_arg_iterator int_op_begin() { return IntOperands; }
+ int_arg_iterator int_op_end() { return IntOperands + NumIntOperands; }
+ const_int_arg_iterator int_op_begin() const { return IntOperands; }
+ const_int_arg_iterator int_op_end() const {
+ return IntOperands + NumIntOperands;
+ }
+ unsigned int_op_size() const { return NumIntOperands; }
+ bool int_op_empty() const { return NumIntOperands == 0; }
+ void int_op_push_back(unsigned IntOperand) {
+ assert(NumIntOperands < MaxIntOperands &&
+ "Tried to add too many int operands");
+ assert(IntOperands && "Operands not allocated before pushing");
+ IntOperands[NumIntOperands++] = IntOperand;
+ }
+
+ virtual void allocateIntOperands(BumpPtrAllocator &Allocator) {
+ assert(!IntOperands && "Operands already allocated");
+ IntOperands = Allocator.Allocate<unsigned>(MaxIntOperands);
+ }
+
+ virtual bool equals(const Expression &Other) const override {
+ if (!this->BasicExpression::equals(Other))
+ return false;
+ const AggregateValueExpression &OE = cast<AggregateValueExpression>(Other);
+ return NumIntOperands == OE.NumIntOperands &&
+ std::equal(int_op_begin(), int_op_end(), OE.int_op_begin());
+ }
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(this->BasicExpression::getHashValue(),
+ hash_combine_range(int_op_begin(), int_op_end()));
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeAggregateValue, ";
+ this->BasicExpression::printInternal(OS, false);
+ OS << ", intoperands = {";
+ for (unsigned i = 0, e = int_op_size(); i != e; ++i) {
+ OS << "[" << i << "] = " << IntOperands[i] << " ";
+ }
+ OS << "}";
+ }
+};
+class int_op_inserter
+ : public std::iterator<std::output_iterator_tag, void, void, void, void> {
+private:
+ typedef AggregateValueExpression Container;
+ Container *AVE;
+
+public:
+ explicit int_op_inserter(AggregateValueExpression &E) : AVE(&E) {}
+ explicit int_op_inserter(AggregateValueExpression *E) : AVE(E) {}
+ int_op_inserter &operator=(unsigned int val) {
+ AVE->int_op_push_back(val);
+ return *this;
+ }
+ int_op_inserter &operator*() { return *this; }
+ int_op_inserter &operator++() { return *this; }
+ int_op_inserter &operator++(int) { return *this; }
+};
+
+class PHIExpression final : public BasicExpression {
+private:
+ BasicBlock *BB;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Phi;
+ }
+
+ PHIExpression(unsigned NumOperands, BasicBlock *B)
+ : BasicExpression(NumOperands, ET_Phi), BB(B) {}
+ void operator=(const PHIExpression &) = delete;
+ PHIExpression(const PHIExpression &) = delete;
+ PHIExpression() = delete;
+ virtual ~PHIExpression() override;
+
+ virtual bool equals(const Expression &Other) const override {
+ if (!this->BasicExpression::equals(Other))
+ return false;
+ const PHIExpression &OE = cast<PHIExpression>(Other);
+ return BB == OE.BB;
+ }
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(this->BasicExpression::getHashValue(), BB);
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypePhi, ";
+ this->BasicExpression::printInternal(OS, false);
+ OS << "bb = " << BB;
+ }
+};
+
+class VariableExpression final : public Expression {
+private:
+ Value *VariableValue;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Variable;
+ }
+
+ VariableExpression(Value *V) : Expression(ET_Variable), VariableValue(V) {}
+ void operator=(const VariableExpression &) = delete;
+ VariableExpression(const VariableExpression &) = delete;
+ VariableExpression() = delete;
+
+ Value *getVariableValue() const { return VariableValue; }
+ void setVariableValue(Value *V) { VariableValue = V; }
+ virtual bool equals(const Expression &Other) const override {
+ const VariableExpression &OC = cast<VariableExpression>(Other);
+ return VariableValue == OC.VariableValue;
+ }
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(getExpressionType(), VariableValue->getType(),
+ VariableValue);
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeVariable, ";
+ this->Expression::printInternal(OS, false);
+ OS << " variable = " << *VariableValue;
+ }
+};
+
+class ConstantExpression final : public Expression {
+private:
+ Constant *ConstantValue;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Constant;
+ }
+
+ ConstantExpression() : Expression(ET_Constant), ConstantValue(NULL) {}
+ ConstantExpression(Constant *constantValue)
+ : Expression(ET_Constant), ConstantValue(constantValue) {}
+ void operator=(const ConstantExpression &) = delete;
+ ConstantExpression(const ConstantExpression &) = delete;
+
+ Constant *getConstantValue() const { return ConstantValue; }
+ void setConstantValue(Constant *V) { ConstantValue = V; }
+
+ virtual bool equals(const Expression &Other) const override {
+ const ConstantExpression &OC = cast<ConstantExpression>(Other);
+ return ConstantValue == OC.ConstantValue;
+ }
+
+ virtual hash_code getHashValue() const override {
+ return hash_combine(getExpressionType(), ConstantValue->getType(),
+ ConstantValue);
+ }
+
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeConstant, ";
+ this->Expression::printInternal(OS, false);
+ OS << " constant = " << *ConstantValue;
+ }
+};
+
+class UnknownExpression final : public Expression {
+private:
+ Instruction *Inst;
+
+public:
+ static bool classof(const Expression *EB) {
+ return EB->getExpressionType() == ET_Unknown;
+ }
+
+ UnknownExpression(Instruction *I) : Expression(ET_Unknown), Inst(I) {}
+ void operator=(const UnknownExpression &) = delete;
+ UnknownExpression(const UnknownExpression &) = delete;
+ UnknownExpression() = delete;
+
+ Instruction *getInstruction() const { return Inst; }
+ void setInstruction(Instruction *I) { Inst = I; }
+ virtual bool equals(const Expression &Other) const override {
+ const auto &OU = cast<UnknownExpression>(Other);
+ return Inst == OU.Inst;
+ }
+ virtual hash_code getHashValue() const override {
+ return hash_combine(getExpressionType(), Inst);
+ }
+ //
+ // Debugging support
+ //
+ virtual void printInternal(raw_ostream &OS, bool PrintEType) const override {
+ if (PrintEType)
+ OS << "ExpressionTypeUnknown, ";
+ this->Expression::printInternal(OS, false);
+ OS << " inst = " << *Inst;
+ }
+};
+}
+}
+
+#endif
diff --git a/include/llvm/Transforms/Scalar/GuardWidening.h b/include/llvm/Transforms/Scalar/GuardWidening.h
index 201065cbdfb5..2bc0940ac715 100644
--- a/include/llvm/Transforms/Scalar/GuardWidening.h
+++ b/include/llvm/Transforms/Scalar/GuardWidening.h
@@ -24,7 +24,7 @@ namespace llvm {
class Function;
struct GuardWideningPass : public PassInfoMixin<GuardWideningPass> {
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/IndVarSimplify.h b/include/llvm/Transforms/Scalar/IndVarSimplify.h
index 325bcc7bed8f..24a31594b153 100644
--- a/include/llvm/Transforms/Scalar/IndVarSimplify.h
+++ b/include/llvm/Transforms/Scalar/IndVarSimplify.h
@@ -16,13 +16,14 @@
#define LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
public:
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/JumpThreading.h b/include/llvm/Transforms/Scalar/JumpThreading.h
index e38bdd03ac06..f96741c0127d 100644
--- a/include/llvm/Transforms/Scalar/JumpThreading.h
+++ b/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -85,20 +85,13 @@ class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
public:
JumpThreadingPass(int T = -1);
- // Hack for MSVC 2013 which seems like it can't synthesize this.
- JumpThreadingPass(JumpThreadingPass &&Other)
- : TLI(Other.TLI), LVI(Other.LVI), BFI(std::move(Other.BFI)),
- BPI(std::move(Other.BPI)), HasProfileData(Other.HasProfileData),
- LoopHeaders(std::move(Other.LoopHeaders)),
- RecursionSet(std::move(Other.RecursionSet)),
- BBDupThreshold(Other.BBDupThreshold) {}
// Glue for old PM.
bool runImpl(Function &F, TargetLibraryInfo *TLI_, LazyValueInfo *LVI_,
bool HasProfileData_, std::unique_ptr<BlockFrequencyInfo> BFI_,
std::unique_ptr<BranchProbabilityInfo> BPI_);
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
void releaseMemory() {
BFI.reset();
@@ -134,6 +127,8 @@ private:
const char *Suffix);
void UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, BasicBlock *BB,
BasicBlock *NewBB, BasicBlock *SuccBB);
+ /// Check if the block has profile metadata for its outgoing edges.
+ bool doesBlockHaveProfileData(BasicBlock *BB);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Scalar/LICM.h b/include/llvm/Transforms/Scalar/LICM.h
index a050a43d6179..39bbc72f8cb4 100644
--- a/include/llvm/Transforms/Scalar/LICM.h
+++ b/include/llvm/Transforms/Scalar/LICM.h
@@ -34,6 +34,7 @@
#define LLVM_TRANSFORMS_SCALAR_LICM_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
@@ -41,7 +42,7 @@ namespace llvm {
/// Performs Loop Invariant Code Motion Pass.
class LICMPass : public PassInfoMixin<LICMPass> {
public:
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Scalar/LoopDataPrefetch.h b/include/llvm/Transforms/Scalar/LoopDataPrefetch.h
new file mode 100644
index 000000000000..114d1bad17a5
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/LoopDataPrefetch.h
@@ -0,0 +1,31 @@
+//===-------- LoopDataPrefetch.h - Loop Data Prefetching Pass ---*- C++ -*-===//
+//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Loop Data Prefetching Pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// An optimization pass inserting data prefetches in loops.
+class LoopDataPrefetchPass : public PassInfoMixin<LoopDataPrefetchPass> {
+public:
+ LoopDataPrefetchPass() {}
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif
diff --git a/include/llvm/Transforms/Scalar/LoopDeletion.h b/include/llvm/Transforms/Scalar/LoopDeletion.h
index ed5a9833e572..891f08faa48a 100644
--- a/include/llvm/Transforms/Scalar/LoopDeletion.h
+++ b/include/llvm/Transforms/Scalar/LoopDeletion.h
@@ -15,6 +15,7 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/PassManager.h"
@@ -23,7 +24,7 @@ namespace llvm {
class LoopDeletionPass : public PassInfoMixin<LoopDeletionPass> {
public:
LoopDeletionPass() {}
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
bool runImpl(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
LoopInfo &loopInfo);
diff --git a/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h b/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
index cc66156fba8a..0c052ddd2fe7 100644
--- a/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
+++ b/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
@@ -17,6 +17,7 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
@@ -24,7 +25,7 @@ namespace llvm {
/// Performs Loop Idiom Recognize Pass.
class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
public:
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Scalar/LoopInstSimplify.h b/include/llvm/Transforms/Scalar/LoopInstSimplify.h
index f67343f40a7c..e30f4a97b78e 100644
--- a/include/llvm/Transforms/Scalar/LoopInstSimplify.h
+++ b/include/llvm/Transforms/Scalar/LoopInstSimplify.h
@@ -15,6 +15,7 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPINSTSIMPLIFY_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
@@ -22,7 +23,7 @@ namespace llvm {
/// Performs Loop Inst Simplify Pass.
class LoopInstSimplifyPass : public PassInfoMixin<LoopInstSimplifyPass> {
public:
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Scalar/LoopRotation.h b/include/llvm/Transforms/Scalar/LoopRotation.h
index b21c7313dc4b..54b8ec545ed2 100644
--- a/include/llvm/Transforms/Scalar/LoopRotation.h
+++ b/include/llvm/Transforms/Scalar/LoopRotation.h
@@ -15,6 +15,7 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
@@ -22,8 +23,11 @@ namespace llvm {
/// A simple loop rotation transformation.
class LoopRotatePass : public PassInfoMixin<LoopRotatePass> {
public:
- LoopRotatePass();
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ LoopRotatePass(bool EnableHeaderDuplication = true);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+
+private:
+ const bool EnableHeaderDuplication;
};
}
diff --git a/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h b/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
index 7609bb26a1a0..2f06782052c5 100644
--- a/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
+++ b/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
@@ -18,6 +18,7 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
@@ -25,7 +26,7 @@ namespace llvm {
/// Performs basic CFG simplifications to assist other loop passes.
class LoopSimplifyCFGPass : public PassInfoMixin<LoopSimplifyCFGPass> {
public:
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Scalar/LoopStrengthReduce.h b/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
new file mode 100644
index 000000000000..11c0d9bce85b
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
@@ -0,0 +1,38 @@
+//===- LoopStrengthReduce.h - Loop Strength Reduce Pass -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation analyzes and transforms the induction variables (and
+// computations derived from them) into forms suitable for efficient execution
+// on the target.
+//
+// This pass performs a strength reduction on array references inside loops that
+// have as one or more of their components the loop induction variable, it
+// rewrites expressions to take advantage of scaled-index addressing modes
+// available on the target, and it performs a variety of other optimizations
+// related to loop induction variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Performs Loop Strength Reduce Pass.
+class LoopStrengthReducePass : public PassInfoMixin<LoopStrengthReducePass> {
+public:
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
diff --git a/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/include/llvm/Transforms/Scalar/LoopUnrollPass.h
new file mode 100644
index 000000000000..74a7258df5fc
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -0,0 +1,30 @@
+//===- LoopUnrollPass.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
+ Optional<unsigned> ProvidedCount;
+ Optional<unsigned> ProvidedThreshold;
+ Optional<bool> ProvidedAllowPartial;
+ Optional<bool> ProvidedRuntime;
+ Optional<bool> ProvidedUpperBound;
+
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
diff --git a/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h b/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h
new file mode 100644
index 000000000000..a9f19f6b84b4
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h
@@ -0,0 +1,28 @@
+//===--- LowerGuardIntrinsic.h - Lower the guard intrinsic ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the llvm.experimental.guard intrinsic to a conditional call
+// to @llvm.experimental.deoptimize. Once this happens, the guard can no longer
+// be widened.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerGuardIntrinsicPass : PassInfoMixin<LowerGuardIntrinsicPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif //LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
diff --git a/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h b/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
index 47cfea489243..3cad7bb070d0 100644
--- a/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
+++ b/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
@@ -31,7 +31,7 @@ namespace llvm {
class MergedLoadStoreMotionPass
: public PassInfoMixin<MergedLoadStoreMotionPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/NaryReassociate.h b/include/llvm/Transforms/Scalar/NaryReassociate.h
new file mode 100644
index 000000000000..a74bb6cc4194
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -0,0 +1,174 @@
+//===- NaryReassociate.h - Reassociate n-ary expressions ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reassociates n-ary add expressions and eliminates the redundancy
+// exposed by the reassociation.
+//
+// A motivating example:
+//
+// void foo(int a, int b) {
+// bar(a + b);
+// bar((a + 2) + b);
+// }
+//
+// An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
+// the above code to
+//
+// int t = a + b;
+// bar(t);
+// bar(t + 2);
+//
+// However, the Reassociate pass is unable to do that because it processes each
+// instruction individually and believes (a + 2) + b is the best form according
+// to its rank system.
+//
+// To address this limitation, NaryReassociate reassociates an expression in a
+// form that reuses existing instructions. As a result, NaryReassociate can
+// reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
+// (a + b) is computed before.
+//
+// NaryReassociate works as follows. For every instruction in the form of (a +
+// b) + c, it checks whether a + c or b + c is already computed by a dominating
+// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
+// c) + a and removes the redundancy accordingly. To efficiently look up whether
+// an expression is computed before, we store each instruction seen and its SCEV
+// into an SCEV-to-instruction map.
+//
+// Although the algorithm pattern-matches only ternary additions, it
+// automatically handles many >3-ary expressions by walking through the function
+// in the depth-first order. For example, given
+//
+// (a + c) + d
+// ((a + b) + c) + d
+//
+// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
+// ((a + c) + b) + d into ((a + c) + d) + b.
+//
+// Finally, the above dominator-based algorithm may need to be run multiple
+// iterations before emitting optimal code. One source of this need is that we
+// only split an operand when it is used only once. The above algorithm can
+// eliminate an instruction and decrease the usage count of its operands. As a
+// result, an instruction that previously had multiple uses may become a
+// single-use instruction and thus eligible for split consideration. For
+// example,
+//
+// ac = a + c
+// ab = a + b
+// abc = ab + c
+// ab2 = ab + b
+// ab2c = ab2 + c
+//
+// In the first iteration, we cannot reassociate abc to ac+b because ab is used
+// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
+// result, ab2 becomes dead and ab will be used only once in the second
+// iteration.
+//
+// Limitations and TODO items:
+//
+// 1) We only considers n-ary adds and muls for now. This should be extended
+// and generalized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
+#define LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+ // Glue for old PM.
+ bool runImpl(Function &F, AssumptionCache *AC_, DominatorTree *DT_,
+ ScalarEvolution *SE_, TargetLibraryInfo *TLI_,
+ TargetTransformInfo *TTI_);
+
+private:
+ // Runs only one iteration of the dominator-based algorithm. See the header
+ // comments for why we need multiple iterations.
+ bool doOneIteration(Function &F);
+
+ // Reassociates I for better CSE.
+ Instruction *tryReassociate(Instruction *I);
+
+ // Reassociate GEP for better CSE.
+ Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
+ // Try splitting GEP at the I-th index and see whether either part can be
+ // CSE'ed. This is a helper function for tryReassociateGEP.
+ //
+ // \p IndexedType The element type indexed by GEP's I-th index. This is
+ // equivalent to
+ // GEP->getIndexedType(GEP->getPointerOperand(), 0-th index,
+ // ..., i-th index).
+ GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+ unsigned I, Type *IndexedType);
+ // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or
+ // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly.
+ GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+ unsigned I, Value *LHS,
+ Value *RHS, Type *IndexedType);
+
+ // Reassociate binary operators for better CSE.
+ Instruction *tryReassociateBinaryOp(BinaryOperator *I);
+
+ // A helper function for tryReassociateBinaryOp. LHS and RHS are explicitly
+ // passed.
+ Instruction *tryReassociateBinaryOp(Value *LHS, Value *RHS,
+ BinaryOperator *I);
+ // Rewrites I to (LHS op RHS) if LHS is computed already.
+ Instruction *tryReassociatedBinaryOp(const SCEV *LHS, Value *RHS,
+ BinaryOperator *I);
+
+ // Tries to match Op1 and Op2 by using V.
+ bool matchTernaryOp(BinaryOperator *I, Value *V, Value *&Op1, Value *&Op2);
+
+ // Gets SCEV for (LHS op RHS).
+ const SCEV *getBinarySCEV(BinaryOperator *I, const SCEV *LHS,
+ const SCEV *RHS);
+
+ // Returns the closest dominator of \c Dominatee that computes
+ // \c CandidateExpr. Returns null if not found.
+ Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
+ Instruction *Dominatee);
+ // GetElementPtrInst implicitly sign-extends an index if the index is shorter
+ // than the pointer size. This function returns whether Index is shorter than
+ // GEP's pointer size, i.e., whether Index needs to be sign-extended in order
+ // to be an index of GEP.
+ bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);
+
+ AssumptionCache *AC;
+ const DataLayout *DL;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+ TargetLibraryInfo *TLI;
+ TargetTransformInfo *TTI;
+ // A lookup table quickly telling which instructions compute the given SCEV.
+ // Note that there can be multiple instructions at different locations
+ // computing to the same SCEV, so we map a SCEV to an instruction list. For
+ // example,
+ //
+ // if (p1)
+ // foo(a + b);
+ // if (p2)
+ // bar(a + b);
+ DenseMap<const SCEV *, SmallVector<WeakVH, 2>> SeenExprs;
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
diff --git a/include/llvm/Transforms/Scalar/NewGVN.h b/include/llvm/Transforms/Scalar/NewGVN.h
new file mode 100644
index 000000000000..d0425aa4345f
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/NewGVN.h
@@ -0,0 +1,28 @@
+//===----- NewGVN.h - Global Value Numbering Pass ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Global Value Numbering pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+#define LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class NewGVNPass : public PassInfoMixin<NewGVNPass> {
+public:
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+
diff --git a/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h b/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
index 385bbb40db3f..7f73831e0eb3 100644
--- a/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
+++ b/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
@@ -23,7 +23,7 @@ namespace llvm {
class PartiallyInlineLibCallsPass
: public PassInfoMixin<PartiallyInlineLibCallsPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/Reassociate.h b/include/llvm/Transforms/Scalar/Reassociate.h
index 2f56b9398778..7b68b4489306 100644
--- a/include/llvm/Transforms/Scalar/Reassociate.h
+++ b/include/llvm/Transforms/Scalar/Reassociate.h
@@ -65,7 +65,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
private:
- void BuildRankMap(Function &F);
+ void BuildRankMap(Function &F, ReversePostOrderTraversal<Function *> &RPOT);
unsigned getRank(Value *V);
void canonicalizeOperands(Instruction *I);
void ReassociateExpression(BinaryOperator *I);
diff --git a/include/llvm/Transforms/Scalar/SCCP.h b/include/llvm/Transforms/Scalar/SCCP.h
index 0dd90ecbedec..6e7f77fe2c50 100644
--- a/include/llvm/Transforms/Scalar/SCCP.h
+++ b/include/llvm/Transforms/Scalar/SCCP.h
@@ -29,7 +29,7 @@ namespace llvm {
/// This pass performs function-level constant propagation and merging.
class SCCPPass : public PassInfoMixin<SCCPPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/SROA.h b/include/llvm/Transforms/Scalar/SROA.h
index 72e7d63d4df6..3e93f46dd4e5 100644
--- a/include/llvm/Transforms/Scalar/SROA.h
+++ b/include/llvm/Transforms/Scalar/SROA.h
@@ -102,7 +102,7 @@ public:
SROA() : C(nullptr), DT(nullptr), AC(nullptr) {}
/// \brief Run the pass over the function.
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
private:
friend class sroa::AllocaSliceRewriter;
diff --git a/include/llvm/Transforms/Scalar/SimplifyCFG.h b/include/llvm/Transforms/Scalar/SimplifyCFG.h
index 53f427a7d19a..96e1658c00b0 100644
--- a/include/llvm/Transforms/Scalar/SimplifyCFG.h
+++ b/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -36,7 +36,7 @@ public:
SimplifyCFGPass(int BonusInstThreshold);
/// \brief Run the pass over the function.
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/Sink.h b/include/llvm/Transforms/Scalar/Sink.h
index 1144c62fb20c..f9b3cb0fae39 100644
--- a/include/llvm/Transforms/Scalar/Sink.h
+++ b/include/llvm/Transforms/Scalar/Sink.h
@@ -23,7 +23,7 @@ namespace llvm {
/// Move instructions into successor blocks when possible.
class SinkingPass : public PassInfoMixin<SinkingPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Scalar/SpeculativeExecution.h b/include/llvm/Transforms/Scalar/SpeculativeExecution.h
new file mode 100644
index 000000000000..068f81776a03
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/SpeculativeExecution.h
@@ -0,0 +1,92 @@
+//===- SpeculativeExecution.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass hoists instructions to enable speculative execution on
+// targets where branches are expensive. This is aimed at GPUs. It
+// currently works on simple if-then and if-then-else
+// patterns.
+//
+// Removing branches is not the only motivation for this
+// pass. E.g. consider this code and assume that there is no
+// addressing mode for multiplying by sizeof(*a):
+//
+// if (b > 0)
+// c = a[i + 1]
+// if (d > 0)
+// e = a[i + 2]
+//
+// turns into
+//
+// p = &a[i + 1];
+// if (b > 0)
+// c = *p;
+// q = &a[i + 2];
+// if (d > 0)
+// e = *q;
+//
+// which could later be optimized to
+//
+// r = &a[i];
+// if (b > 0)
+// c = r[1];
+// if (d > 0)
+// e = r[2];
+//
+// Later passes sink back much of the speculated code that did not enable
+// further optimization.
+//
+// This pass is more aggressive than the function SpeculativeyExecuteBB in
+// SimplifyCFG. SimplifyCFG will not speculate if no selects are introduced and
+// it will speculate at most one instruction. It also will not speculate if
+// there is a value defined in the if-block that is only used in the then-block.
+// These restrictions make sense since the speculation in SimplifyCFG seems
+// aimed at introducing cheap selects, while this pass is intended to do more
+// aggressive speculation while counting on later passes to either capitalize on
+// that or clean it up.
+//
+// If the pass was created by calling
+// createSpeculativeExecutionIfHasBranchDivergencePass or the
+// -spec-exec-only-if-divergent-target option is present, this pass only has an
+// effect on targets where TargetTransformInfo::hasBranchDivergence() is true;
+// on other targets, it is a nop.
+//
+// This lets you include this pass unconditionally in the IR pass pipeline, but
+// only enable it for relevant targets.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
+#define LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class SpeculativeExecutionPass
+ : public PassInfoMixin<SpeculativeExecutionPass> {
+public:
+ SpeculativeExecutionPass(bool OnlyIfDivergentTarget = false);
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+ // Glue for old PM
+ bool runImpl(Function &F, TargetTransformInfo *TTI);
+
+private:
+ bool runOnBasicBlock(BasicBlock &B);
+ bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);
+
+ // If true, this pass is a nop unless the target architecture has branch
+ // divergence.
+ const bool OnlyIfDivergentTarget = false;
+
+ TargetTransformInfo *TTI = nullptr;
+};
+}
+
+#endif //LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
diff --git a/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
index 4e4f02c84ece..eaad06a10819 100644
--- a/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
+++ b/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
@@ -24,30 +24,31 @@ class AllocaInst;
static const int kAsanStackLeftRedzoneMagic = 0xf1;
static const int kAsanStackMidRedzoneMagic = 0xf2;
static const int kAsanStackRightRedzoneMagic = 0xf3;
+static const int kAsanStackUseAfterReturnMagic = 0xf5;
+static const int kAsanStackUseAfterScopeMagic = 0xf8;
// Input/output data struct for ComputeASanStackFrameLayout.
struct ASanStackVariableDescription {
- const char *Name; // Name of the variable that will be displayed by asan
- // if a stack-related bug is reported.
- uint64_t Size; // Size of the variable in bytes.
- size_t Alignment; // Alignment of the variable (power of 2).
- AllocaInst *AI; // The actual AllocaInst.
- size_t Offset; // Offset from the beginning of the frame;
- // set by ComputeASanStackFrameLayout.
+ const char *Name; // Name of the variable that will be displayed by asan
+ // if a stack-related bug is reported.
+ uint64_t Size; // Size of the variable in bytes.
+ size_t LifetimeSize; // Size in bytes to use for lifetime analysis check.
+ // Will be rounded up to Granularity.
+ size_t Alignment; // Alignment of the variable (power of 2).
+ AllocaInst *AI; // The actual AllocaInst.
+ size_t Offset; // Offset from the beginning of the frame;
+ // set by ComputeASanStackFrameLayout.
+ unsigned Line; // Line number.
};
// Output data struct for ComputeASanStackFrameLayout.
struct ASanStackFrameLayout {
- // Frame description, see DescribeAddressIfStack in ASan runtime.
- SmallString<64> DescriptionString;
- // The contents of the shadow memory for the stack frame that we need
- // to set at function entry.
- SmallVector<uint8_t, 64> ShadowBytes;
+ size_t Granularity; // Shadow granularity.
size_t FrameAlignment; // Alignment for the entire frame.
size_t FrameSize; // Size of the frame in bytes.
};
-void ComputeASanStackFrameLayout(
+ASanStackFrameLayout ComputeASanStackFrameLayout(
// The array of stack variables. The elements may get reordered and changed.
SmallVectorImpl<ASanStackVariableDescription> &Vars,
// AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
@@ -55,9 +56,25 @@ void ComputeASanStackFrameLayout(
// The minimal size of the left-most redzone (header).
// At least 4 pointer sizes, power of 2, and >= Granularity.
// The resulting FrameSize should be multiple of MinHeaderSize.
- size_t MinHeaderSize,
- // The result is put here.
- ASanStackFrameLayout *Layout);
+ size_t MinHeaderSize);
+
+// Compute frame description, see DescribeAddressIfStack in ASan runtime.
+SmallString<64> ComputeASanStackFrameDescription(
+ const SmallVectorImpl<ASanStackVariableDescription> &Vars);
+
+// Returns shadow bytes with marked red zones. This shadow represents the state
+// if the stack frame when all local variables are inside of the own scope.
+SmallVector<uint8_t, 64>
+GetShadowBytes(const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ const ASanStackFrameLayout &Layout);
+
+// Returns shadow bytes with marked red zones and after scope. This shadow
+// represents the state if the stack frame when all local variables are outside
+// of the own scope.
+SmallVector<uint8_t, 64> GetShadowBytesAfterScope(
+ // The array of stack variables. The elements may get reordered and changed.
+ const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ const ASanStackFrameLayout &Layout);
} // llvm namespace
diff --git a/include/llvm/Transforms/Utils/AddDiscriminators.h b/include/llvm/Transforms/Utils/AddDiscriminators.h
index 0b3a8add6278..a87758300992 100644
--- a/include/llvm/Transforms/Utils/AddDiscriminators.h
+++ b/include/llvm/Transforms/Utils/AddDiscriminators.h
@@ -22,7 +22,7 @@ namespace llvm {
class AddDiscriminatorsPass : public PassInfoMixin<AddDiscriminatorsPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 37fd20925cba..3d41dbe2b954 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -17,8 +17,11 @@
// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/InstrTypes.h"
+#include <cassert>
namespace llvm {
@@ -29,7 +32,6 @@ class Instruction;
class MDNode;
class ReturnInst;
class TargetLibraryInfo;
-class TerminatorInst;
/// Delete the specified block, which must have no predecessors.
void DeleteDeadBlock(BasicBlock *BB);
@@ -154,7 +156,7 @@ SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
CriticalEdgeSplittingOptions()) {
TerminatorInst *TI = Src->getTerminator();
unsigned i = 0;
- while (1) {
+ while (true) {
assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
if (TI->getSuccessor(i) == Dst)
return SplitCriticalEdge(TI, i, Options);
@@ -228,8 +230,8 @@ ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
BasicBlock *Pred);
/// Split the containing block at the specified instruction - everything before
-/// and including SplitBefore stays in the old basic block, and everything after
-/// SplitBefore is moved to a new block. The two blocks are connected by a
+/// SplitBefore stays in the old basic block, and the rest of the instructions
+/// in the BB are moved to a new block. The two blocks are connected by a
/// conditional branch (with value of Cmp being the condition).
/// Before:
/// Head
@@ -282,6 +284,7 @@ void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
/// instructions in them.
Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
BasicBlock *&IfFalse);
-} // End llvm namespace
-#endif
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
diff --git a/include/llvm/Transforms/Utils/BreakCriticalEdges.h b/include/llvm/Transforms/Utils/BreakCriticalEdges.h
new file mode 100644
index 000000000000..9cc81a176cb6
--- /dev/null
+++ b/include/llvm/Transforms/Utils/BreakCriticalEdges.h
@@ -0,0 +1,29 @@
+//===- BreakCriticalEdges.h - Critical Edge Elimination Pass --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges pass - Break all of the critical edges in the CFG by
+// inserting a dummy basic block. This pass may be "required" by passes that
+// cannot deal with critical edges. For this usage, the structure type is
+// forward declared. This pass obviously invalidates the CFG, but can update
+// dominator trees.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+#define LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct BreakCriticalEdgesPass : public PassInfoMixin<BreakCriticalEdgesPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h
index c5fb37007090..5eeb8cf30695 100644
--- a/include/llvm/Transforms/Utils/Cloning.h
+++ b/include/llvm/Transforms/Utils/Cloning.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
@@ -176,13 +177,14 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
class InlineFunctionInfo {
public:
explicit InlineFunctionInfo(CallGraph *cg = nullptr,
- AssumptionCacheTracker *ACT = nullptr)
- : CG(cg), ACT(ACT) {}
+ std::function<AssumptionCache &(Function &)>
+ *GetAssumptionCache = nullptr)
+ : CG(cg), GetAssumptionCache(GetAssumptionCache) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
- AssumptionCacheTracker *ACT;
+ std::function<AssumptionCache &(Function &)> *GetAssumptionCache;
/// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller.
@@ -192,9 +194,17 @@ public:
/// inlined from the callee. This is only filled in if CG is non-null.
SmallVector<WeakVH, 8> InlinedCalls;
+ /// All of the new call sites inlined into the caller.
+ ///
+ /// 'InlineFunction' fills this in by scanning the inlined instructions, and
+ /// only if CG is null. If CG is non-null, instead the value handle
+ /// `InlinedCalls` above is used.
+ SmallVector<CallSite, 8> InlinedCallSites;
+
void reset() {
StaticAllocas.clear();
InlinedCalls.clear();
+ InlinedCallSites.clear();
}
};
@@ -208,6 +218,10 @@ public:
/// exists in the instruction stream. Similarly this will inline a recursive
/// function by one level.
///
+/// Note that while this routine is allowed to cleanup and optimize the
+/// *inlined* code to minimize the actual inserted code, it must not delete
+/// code in the caller as users of this routine may have pointers to
+/// instructions in the caller that need to remain stable.
bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
diff --git a/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/include/llvm/Transforms/Utils/CmpInstAnalysis.h
index 73c15e42c359..5ec3888d4538 100644
--- a/include/llvm/Transforms/Utils/CmpInstAnalysis.h
+++ b/include/llvm/Transforms/Utils/CmpInstAnalysis.h
@@ -21,13 +21,13 @@ namespace llvm {
class ICmpInst;
class Value;
- /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
- /// are carefully arranged to allow folding of expressions such as:
+ /// Encode a icmp predicate into a three bit mask. These bits are carefully
+ /// arranged to allow folding of expressions such as:
///
/// (A < B) | (A > B) --> (A != B)
///
/// Note that this is only valid if the first and second predicates have the
- /// same sign. Is illegal to do: (A u< B) | (A s> B)
+ /// same sign. It is illegal to do: (A u< B) | (A s> B)
///
/// Three bits are used to represent the condition, as follows:
/// 0 A > B
@@ -46,20 +46,25 @@ namespace llvm {
///
unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
- /// getICmpValue - This is the complement of getICmpCode, which turns an
- /// opcode and two operands into either a constant true or false, or the
- /// predicate for a new ICmp instruction. The sign is passed in to determine
- /// which kind of predicate to use in the new icmp instruction.
+ /// This is the complement of getICmpCode, which turns an opcode and two
+ /// operands into either a constant true or false, or the predicate for a new
+ /// ICmp instruction. The sign is passed in to determine which kind of
+ /// predicate to use in the new icmp instruction.
/// Non-NULL return value will be a true or false constant.
- /// NULL return means a new ICmp is needed. The predicate for which is
- /// output in NewICmpPred.
+ /// NULL return means a new ICmp is needed. The predicate for which is output
+ /// in NewICmpPred.
Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
CmpInst::Predicate &NewICmpPred);
- /// PredicatesFoldable - Return true if both predicates match sign or if at
- /// least one of them is an equality comparison (which is signless).
+ /// Return true if both predicates match sign or if at least one of them is an
+ /// equality comparison (which is signless).
bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
+ /// Decompose an icmp into the form ((X & Y) pred Z) if possible. The returned
+ /// predicate is either == or !=. Returns false if decomposition fails.
+ bool decomposeBitTestICmp(const ICmpInst *I, CmpInst::Predicate &Pred,
+ Value *&X, Value *&Y, Value *&Z);
+
} // end namespace llvm
#endif
diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h
index 30dafd045f23..a2978663a4d1 100644
--- a/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -20,6 +20,9 @@
namespace llvm {
template <typename T> class ArrayRef;
class BasicBlock;
+ class BlockFrequency;
+ class BlockFrequencyInfo;
+ class BranchProbabilityInfo;
class DominatorTree;
class Function;
class Loop;
@@ -47,6 +50,8 @@ template <typename T> class ArrayRef;
// Various bits of state computed on construction.
DominatorTree *const DT;
const bool AggregateArgs;
+ BlockFrequencyInfo *BFI;
+ BranchProbabilityInfo *BPI;
// Bits of intermediate state computed at various phases of extraction.
SetVector<BasicBlock *> Blocks;
@@ -54,11 +59,19 @@ template <typename T> class ArrayRef;
Type *RetTy;
public:
+
+ /// \brief Check to see if a block is valid for extraction.
+ ///
+ /// Blocks containing EHPads, allocas, invokes, or vastarts are not valid.
+ static bool isBlockValidForExtraction(const BasicBlock &BB);
+
/// \brief Create a code extractor for a single basic block.
///
/// In this formation, we don't require a dominator tree. The given basic
/// block is set up for extraction.
- CodeExtractor(BasicBlock *BB, bool AggregateArgs = false);
+ CodeExtractor(BasicBlock *BB, bool AggregateArgs = false,
+ BlockFrequencyInfo *BFI = nullptr,
+ BranchProbabilityInfo *BPI = nullptr);
/// \brief Create a code extractor for a sequence of blocks.
///
@@ -67,20 +80,24 @@ template <typename T> class ArrayRef;
/// sequence out into its new function. When a DominatorTree is also given,
/// extra checking and transformations are enabled.
CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
- bool AggregateArgs = false);
+ bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
+ BranchProbabilityInfo *BPI = nullptr);
/// \brief Create a code extractor for a loop body.
///
/// Behaves just like the generic code sequence constructor, but uses the
/// block sequence of the loop.
- CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false);
+ CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false,
+ BlockFrequencyInfo *BFI = nullptr,
+ BranchProbabilityInfo *BPI = nullptr);
/// \brief Create a code extractor for a region node.
///
/// Behaves just like the generic code sequence constructor, but uses the
/// block sequence of the region node passed in.
CodeExtractor(DominatorTree &DT, const RegionNode &RN,
- bool AggregateArgs = false);
+ bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
+ BranchProbabilityInfo *BPI = nullptr);
/// \brief Perform the extraction, returning the new function.
///
@@ -116,6 +133,11 @@ template <typename T> class ArrayRef;
void moveCodeToFunction(Function *newFunction);
+ void calculateNewCallTerminatorWeights(
+ BasicBlock *CodeReplacer,
+ DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
+ BranchProbabilityInfo *BPI);
+
void emitCallAndSwitchStatement(Function *newFunction,
BasicBlock *newHeader,
ValueSet &inputs,
diff --git a/include/llvm/Transforms/Utils/EscapeEnumerator.h b/include/llvm/Transforms/Utils/EscapeEnumerator.h
new file mode 100644
index 000000000000..80d16ed4cf5b
--- /dev/null
+++ b/include/llvm/Transforms/Utils/EscapeEnumerator.h
@@ -0,0 +1,49 @@
+//===-- EscapeEnumerator.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a helper class that enumerates all possible exits from a function,
+// including exception handling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+#define LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+/// EscapeEnumerator - This is a little algorithm to find all escape points
+/// from a function so that "finally"-style code can be inserted. In addition
+/// to finding the existing return and unwind instructions, it also (if
+/// necessary) transforms any call instructions into invokes and sends them to
+/// a landing pad.
+class EscapeEnumerator {
+ Function &F;
+ const char *CleanupBBName;
+
+ Function::iterator StateBB, StateE;
+ IRBuilder<> Builder;
+ bool Done;
+ bool HandleExceptions;
+
+public:
+ EscapeEnumerator(Function &F, const char *N = "cleanup",
+ bool HandleExceptions = true)
+ : F(F), CleanupBBName(N), StateBB(F.begin()), StateE(F.end()),
+ Builder(F.getContext()), Done(false),
+ HandleExceptions(HandleExceptions) {}
+
+ IRBuilder<> *Next();
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
diff --git a/include/llvm/Transforms/Utils/FunctionComparator.h b/include/llvm/Transforms/Utils/FunctionComparator.h
new file mode 100644
index 000000000000..a613fc31a5e3
--- /dev/null
+++ b/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -0,0 +1,376 @@
+//===- FunctionComparator.h - Function Comparator ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionComparator and GlobalNumberState classes which
+// are used by the MergeFunctions pass for comparing functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+#include <tuple>
+
+namespace llvm {
+
+class GetElementPtrInst;
+
+/// GlobalNumberState assigns an integer to each global value in the program,
+/// which is used by the comparison routine to order references to globals. This
+/// state must be preserved throughout the pass, because Functions and other
+/// globals need to maintain their relative order. Globals are assigned a number
+/// when they are first visited. This order is deterministic, and so the
+/// assigned numbers are as well. When two functions are merged, neither number
+/// is updated. If the symbols are weak, this would be incorrect. If they are
+/// strong, then one will be replaced at all references to the other, and so
+/// direct callsites will now see one or the other symbol, and no update is
+/// necessary. Note that if we were guaranteed unique names, we could just
+/// compare those, but this would not work for stripped bitcodes or for those
+/// few symbols without a name.
+class GlobalNumberState {
+ struct Config : ValueMapConfig<GlobalValue*> {
+ enum { FollowRAUW = false };
+ };
+ // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
+ // occurs, the mapping does not change. Tracking changes is unnecessary, and
+ // also problematic for weak symbols (which may be overwritten).
+ typedef ValueMap<GlobalValue *, uint64_t, Config> ValueNumberMap;
+ ValueNumberMap GlobalNumbers;
+ // The next unused serial number to assign to a global.
+ uint64_t NextNumber = 0;
+
+public:
+ GlobalNumberState() = default;
+
+ uint64_t getNumber(GlobalValue* Global) {
+ ValueNumberMap::iterator MapIter;
+ bool Inserted;
+ std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
+ if (Inserted)
+ NextNumber++;
+ return MapIter->second;
+ }
+
+ void clear() {
+ GlobalNumbers.clear();
+ }
+};
+
+/// FunctionComparator - Compares two functions to determine whether or not
+/// they will generate machine code with the same behaviour. DataLayout is
+/// used if available. The comparator always fails conservatively (erring on the
+/// side of claiming that two functions are different).
+class FunctionComparator {
+public:
+ FunctionComparator(const Function *F1, const Function *F2,
+ GlobalNumberState* GN)
+ : FnL(F1), FnR(F2), GlobalNumbers(GN) {}
+
+ /// Test whether the two functions have equivalent behaviour.
+ int compare();
+ /// Hash a function. Equivalent functions will have the same hash, and unequal
+ /// functions will have different hashes with high probability.
+ typedef uint64_t FunctionHash;
+ static FunctionHash functionHash(Function &);
+
+protected:
+ /// Start the comparison.
+ void beginCompare() {
+ sn_mapL.clear();
+ sn_mapR.clear();
+ }
+
+ /// Compares the signature and other general attributes of the two functions.
+ int compareSignature() const;
+
+ /// Test whether two basic blocks have equivalent behaviour.
+ int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR) const;
+
+ /// Constants comparison.
+ /// Its analog to lexicographical comparison between hypothetical numbers
+ /// of next format:
+ /// <bitcastability-trait><raw-bit-contents>
+ ///
+ /// 1. Bitcastability.
+ /// Check whether L's type could be losslessly bitcasted to R's type.
+ /// On this stage method, in case when lossless bitcast is not possible
+ /// method returns -1 or 1, thus also defining which type is greater in
+ /// context of bitcastability.
+ /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
+ /// to the contents comparison.
+ /// If types differ, remember types comparison result and check
+ /// whether we still can bitcast types.
+ /// Stage 1: Types that satisfies isFirstClassType conditions are always
+ /// greater then others.
+ /// Stage 2: Vector is greater then non-vector.
+ /// If both types are vectors, then vector with greater bitwidth is
+ /// greater.
+ /// If both types are vectors with the same bitwidth, then types
+ /// are bitcastable, and we can skip other stages, and go to contents
+ /// comparison.
+ /// Stage 3: Pointer types are greater than non-pointers. If both types are
+ /// pointers of the same address space - go to contents comparison.
+ /// Different address spaces: pointer with greater address space is
+ /// greater.
+ /// Stage 4: Types are neither vectors, nor pointers. And they differ.
+ /// We don't know how to bitcast them. So, we better don't do it,
+ /// and return types comparison result (so it determines the
+ /// relationship among constants we don't know how to bitcast).
+ ///
+ /// Just for clearance, let's see how the set of constants could look
+ /// on single dimension axis:
+ ///
+ /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+ /// Where: NFCT - Not a FirstClassType
+ /// FCT - FirstClassTyp:
+ ///
+ /// 2. Compare raw contents.
+ /// It ignores types on this stage and only compares bits from L and R.
+ /// Returns 0, if L and R has equivalent contents.
+ /// -1 or 1 if values are different.
+ /// Pretty trivial:
+ /// 2.1. If contents are numbers, compare numbers.
+ /// Ints with greater bitwidth are greater. Ints with same bitwidths
+ /// compared by their contents.
+ /// 2.2. "And so on". Just to avoid discrepancies with comments
+ /// perhaps it would be better to read the implementation itself.
+ /// 3. And again about overall picture. Let's look back at how the ordered set
+ /// of constants will look like:
+ /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+ ///
+ /// Now look, what could be inside [FCT, "others"], for example:
+ /// [FCT, "others"] =
+ /// [
+ /// [double 0.1], [double 1.23],
+ /// [i32 1], [i32 2],
+ /// { double 1.0 }, ; StructTyID, NumElements = 1
+ /// { i32 1 }, ; StructTyID, NumElements = 1
+ /// { double 1, i32 1 }, ; StructTyID, NumElements = 2
+ /// { i32 1, double 1 } ; StructTyID, NumElements = 2
+ /// ]
+ ///
+ /// Let's explain the order. Float numbers will be less than integers, just
+ /// because of cmpType terms: FloatTyID < IntegerTyID.
+ /// Floats (with same fltSemantics) are sorted according to their value.
+ /// Then you can see integers, and they are, like a floats,
+ /// could be easy sorted among each others.
+ /// The structures. Structures are grouped at the tail, again because of their
+ /// TypeID: StructTyID > IntegerTyID > FloatTyID.
+ /// Structures with greater number of elements are greater. Structures with
+ /// greater elements going first are greater.
+ /// The same logic with vectors, arrays and other possible complex types.
+ ///
+ /// Bitcastable constants.
+ /// Let's assume, that some constant, belongs to some group of
+ /// "so-called-equal" values with different types, and at the same time
+ /// belongs to another group of constants with equal types
+ /// and "really" equal values.
+ ///
+ /// Now, prove that this is impossible:
+ ///
+ /// If constant A with type TyA is bitcastable to B with type TyB, then:
+ /// 1. All constants with equal types to TyA, are bitcastable to B. Since
+ /// those should be vectors (if TyA is vector), pointers
+ /// (if TyA is pointer), or else (if TyA equal to TyB), those types should
+ /// be equal to TyB.
+ /// 2. All constants with non-equal, but bitcastable types to TyA, are
+ /// bitcastable to B.
+ /// Once again, just because we allow it to vectors and pointers only.
+ /// This statement could be expanded as below:
+ /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
+ /// vector B, and thus bitcastable to B as well.
+ /// 2.2. All pointers of the same address space, no matter what they point to,
+ /// bitcastable. So if C is pointer, it could be bitcasted to A and to B.
+ /// So any constant equal or bitcastable to A is equal or bitcastable to B.
+ /// QED.
+ ///
+ /// In another words, for pointers and vectors, we ignore top-level type and
+ /// look at their particular properties (bit-width for vectors, and
+ /// address space for pointers).
+ /// If these properties are equal - compare their contents.
+ int cmpConstants(const Constant *L, const Constant *R) const;
+
+ /// Compares two global values by number. Uses the GlobalNumbersState to
+ /// identify the same gobals across function calls.
+ int cmpGlobalValues(GlobalValue *L, GlobalValue *R) const;
+
+ /// Assign or look up previously assigned numbers for the two values, and
+ /// return whether the numbers are equal. Numbers are assigned in the order
+ /// visited.
+ /// Comparison order:
+ /// Stage 0: Value that is function itself is always greater then others.
+ /// If left and right values are references to their functions, then
+ /// they are equal.
+ /// Stage 1: Constants are greater than non-constants.
+ /// If both left and right are constants, then the result of
+ /// cmpConstants is used as cmpValues result.
+ /// Stage 2: InlineAsm instances are greater than others. If both left and
+ /// right are InlineAsm instances, InlineAsm* pointers casted to
+ /// integers and compared as numbers.
+ /// Stage 3: For all other cases we compare order we meet these values in
+ /// their functions. If right value was met first during scanning,
+ /// then left value is greater.
+ /// In another words, we compare serial numbers, for more details
+ /// see comments for sn_mapL and sn_mapR.
+ int cmpValues(const Value *L, const Value *R) const;
+
+ /// Compare two Instructions for equivalence, similar to
+ /// Instruction::isSameOperationAs.
+ ///
+ /// Stages are listed in "most significant stage first" order:
+ /// On each stage below, we do comparison between some left and right
+ /// operation parts. If parts are non-equal, we assign parts comparison
+ /// result to the operation comparison result and exit from method.
+ /// Otherwise we proceed to the next stage.
+ /// Stages:
+ /// 1. Operations opcodes. Compared as numbers.
+ /// 2. Number of operands.
+ /// 3. Operation types. Compared with cmpType method.
+ /// 4. Compare operation subclass optional data as stream of bytes:
+ /// just convert it to integers and call cmpNumbers.
+ /// 5. Compare in operation operand types with cmpType in
+ /// most significant operand first order.
+ /// 6. Last stage. Check operations for some specific attributes.
+ /// For example, for Load it would be:
+ /// 6.1.Load: volatile (as boolean flag)
+ /// 6.2.Load: alignment (as integer numbers)
+ /// 6.3.Load: ordering (as underlying enum class value)
+ /// 6.4.Load: synch-scope (as integer numbers)
+ /// 6.5.Load: range metadata (as integer ranges)
+ /// On this stage its better to see the code, since its not more than 10-15
+ /// strings for particular instruction, and could change sometimes.
+ ///
+ /// Sets \p needToCmpOperands to true if the operands of the instructions
+ /// still must be compared afterwards. In this case it's already guaranteed
+ /// that both instructions have the same number of operands.
+ int cmpOperations(const Instruction *L, const Instruction *R,
+ bool &needToCmpOperands) const;
+
+ /// cmpType - compares two types,
+ /// defines total ordering among the types set.
+ ///
+ /// Return values:
+ /// 0 if types are equal,
+ /// -1 if Left is less than Right,
+ /// +1 if Left is greater than Right.
+ ///
+ /// Description:
+ /// Comparison is broken onto stages. Like in lexicographical comparison
+ /// stage coming first has higher priority.
+ /// On each explanation stage keep in mind total ordering properties.
+ ///
+ /// 0. Before comparison we coerce pointer types of 0 address space to
+ /// integer.
+ /// We also don't bother with same type at left and right, so
+ /// just return 0 in this case.
+ ///
+ /// 1. If types are of different kind (different type IDs).
+ /// Return result of type IDs comparison, treating them as numbers.
+ /// 2. If types are integers, check that they have the same width. If they
+ /// are vectors, check that they have the same count and subtype.
+ /// 3. Types have the same ID, so check whether they are one of:
+ /// * Void
+ /// * Float
+ /// * Double
+ /// * X86_FP80
+ /// * FP128
+ /// * PPC_FP128
+ /// * Label
+ /// * Metadata
+ /// We can treat these types as equal whenever their IDs are same.
+ /// 4. If Left and Right are pointers, return result of address space
+ /// comparison (numbers comparison). We can treat pointer types of same
+ /// address space as equal.
+ /// 5. If types are complex.
+ /// Then both Left and Right are to be expanded and their element types will
+ /// be checked with the same way. If we get Res != 0 on some stage, return it.
+ /// Otherwise return 0.
+ /// 6. For all other cases put llvm_unreachable.
+ int cmpTypes(Type *TyL, Type *TyR) const;
+
+ int cmpNumbers(uint64_t L, uint64_t R) const;
+ int cmpAPInts(const APInt &L, const APInt &R) const;
+ int cmpAPFloats(const APFloat &L, const APFloat &R) const;
+ int cmpMem(StringRef L, StringRef R) const;
+
+ // The two functions undergoing comparison.
+ const Function *FnL, *FnR;
+
+private:
+ int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
+ int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
+ int cmpAttrs(const AttributeSet L, const AttributeSet R) const;
+ int cmpRangeMetadata(const MDNode *L, const MDNode *R) const;
+ int cmpOperandBundlesSchema(const Instruction *L, const Instruction *R) const;
+
+ /// Compare two GEPs for equivalent pointer arithmetic.
+ /// Parts to be compared for each comparison stage,
+ /// most significant stage first:
+ /// 1. Address space. As numbers.
+ /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
+ /// 3. Pointer operand type (using cmpType method).
+ /// 4. Number of operands.
+ /// 5. Compare operands, using cmpValues method.
+ int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR) const;
+ int cmpGEPs(const GetElementPtrInst *GEPL,
+ const GetElementPtrInst *GEPR) const {
+ return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
+ }
+
+ /// Assign serial numbers to values from left function, and values from
+ /// right function.
+ /// Explanation:
+ /// Being comparing functions we need to compare values we meet at left and
+ /// right sides.
+ /// Its easy to sort things out for external values. It just should be
+ /// the same value at left and right.
+ /// But for local values (those were introduced inside function body)
+ /// we have to ensure they were introduced at exactly the same place,
+ /// and plays the same role.
+ /// Let's assign serial number to each value when we meet it first time.
+ /// Values that were met at same place will be with same serial numbers.
+ /// In this case it would be good to explain few points about values assigned
+ /// to BBs and other ways of implementation (see below).
+ ///
+ /// 1. Safety of BB reordering.
+ /// It's safe to change the order of BasicBlocks in function.
+ /// Relationship with other functions and serial numbering will not be
+ /// changed in this case.
+ /// As follows from FunctionComparator::compare(), we do CFG walk: we start
+ /// from the entry, and then take each terminator. So it doesn't matter how in
+ /// fact BBs are ordered in function. And since cmpValues are called during
+ /// this walk, the numbering depends only on how BBs located inside the CFG.
+ /// So the answer is - yes. We will get the same numbering.
+ ///
+ /// 2. Impossibility to use dominance properties of values.
+ /// If we compare two instruction operands: first is usage of local
+ /// variable AL from function FL, and second is usage of local variable AR
+ /// from FR, we could compare their origins and check whether they are
+ /// defined at the same place.
+ /// But, we are still not able to compare operands of PHI nodes, since those
+ /// could be operands from further BBs we didn't scan yet.
+ /// So it's impossible to use dominance properties in general.
+ mutable DenseMap<const Value*, int> sn_mapL, sn_mapR;
+
+ // The global state we will use
+ GlobalNumberState* GlobalNumbers;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
diff --git a/include/llvm/Transforms/Utils/FunctionImportUtils.h b/include/llvm/Transforms/Utils/FunctionImportUtils.h
index 3b94ef60be5d..57b7d0fcd7cc 100644
--- a/include/llvm/Transforms/Utils/FunctionImportUtils.h
+++ b/include/llvm/Transforms/Utils/FunctionImportUtils.h
@@ -41,7 +41,7 @@ class FunctionImportGlobalProcessing {
bool HasExportedFunctions = false;
/// Check if we should promote the given local value to global scope.
- bool doPromoteLocalToGlobal(const GlobalValue *SGV);
+ bool shouldPromoteLocalToGlobal(const GlobalValue *SGV);
/// Helper methods to check if we are importing from or potentially
/// exporting from the current source module.
@@ -54,8 +54,9 @@ class FunctionImportGlobalProcessing {
/// Get the name for SGV that should be used in the linked destination
/// module. Specifically, this handles the case where we need to rename
- /// a local that is being promoted to global scope.
- std::string getName(const GlobalValue *SGV);
+ /// a local that is being promoted to global scope, which it will always
+ /// do when \p DoPromote is true (or when importing a local).
+ std::string getName(const GlobalValue *SGV, bool DoPromote);
/// Process globals so that they can be used in ThinLTO. This includes
/// promoting local variables so that they can be reference externally by
@@ -66,8 +67,9 @@ class FunctionImportGlobalProcessing {
/// Get the new linkage for SGV that should be used in the linked destination
/// module. Specifically, for ThinLTO importing or exporting it may need
- /// to be adjusted.
- GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV);
+ /// to be adjusted. When \p DoPromote is true then we must adjust the
+ /// linkage for a required promotion of a local to global scope.
+ GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV, bool DoPromote);
public:
FunctionImportGlobalProcessing(
diff --git a/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h b/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h
new file mode 100644
index 000000000000..bb7fa523cb19
--- /dev/null
+++ b/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h
@@ -0,0 +1,107 @@
+//===-- ImportedFunctionsInliningStats.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Generating inliner statistics for imported functions, mostly useful for
+// ThinLTO.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class Module;
+class Function;
+/// \brief Calculate and dump ThinLTO specific inliner stats.
+/// The main statistics are:
+/// (1) Number of inlined imported functions,
+/// (2) Number of imported functions inlined into importing module (indirect),
+/// (3) Number of non imported functions inlined into importing module
+/// (indirect).
+/// The difference between first and the second is that first stat counts
+/// all performed inlines on imported functions, but the second one only the
+/// functions that have been eventually inlined to a function in the importing
+/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
+/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
+/// and after this `A` might be too big to be inlined into some other function
+/// that calls it. It calculates this statistic by building graph, where
+/// the nodes are functions, and edges are performed inlines and then by marking
+/// the edges starting from not imported function.
+///
+/// If `Verbose` is set to true, then it also dumps statistics
+/// per each inlined function, sorted by the greatest inlines count like
+/// - number of performed inlines
+/// - number of performed inlines to importing module
+class ImportedFunctionsInliningStatistics {
+private:
+ /// InlineGraphNode represents node in graph of inlined functions.
+ struct InlineGraphNode {
+ // Default-constructible and movable.
+ InlineGraphNode() = default;
+ InlineGraphNode(InlineGraphNode &&) = default;
+ InlineGraphNode &operator=(InlineGraphNode &&) = default;
+
+ llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
+ /// Incremented every direct inline.
+ int32_t NumberOfInlines = 0;
+ /// Number of inlines into non imported function (possibly indirect via
+ /// intermediate inlines). Computed based on graph search.
+ int32_t NumberOfRealInlines = 0;
+ bool Imported = false;
+ bool Visited = false;
+ };
+
+public:
+ ImportedFunctionsInliningStatistics() = default;
+ ImportedFunctionsInliningStatistics(
+ const ImportedFunctionsInliningStatistics &) = delete;
+
+ /// Set information like AllFunctions, ImportedFunctions, ModuleName.
+ void setModuleInfo(const Module &M);
+ /// Record inline of @param Callee to @param Caller for statistis.
+ void recordInline(const Function &Caller, const Function &Callee);
+ /// Dump stats computed with InlinerStatistics class.
+ /// If @param Verbose is true then separate statistics for every inlined
+ /// function will be printed.
+ void dump(bool Verbose);
+
+private:
+ /// Creates new Node in NodeMap and sets attributes, or returns existed one.
+ InlineGraphNode &createInlineGraphNode(const Function &);
+ void calculateRealInlines();
+ void dfs(InlineGraphNode &GraphNode);
+
+ using NodesMapTy =
+ llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
+ using SortedNodesTy =
+ std::vector<const NodesMapTy::MapEntryTy*>;
+ /// Returns vector of elements sorted by
+ /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
+ SortedNodesTy getSortedNodes();
+
+private:
+ /// This map manage life of all InlineGraphNodes. Unique pointer to
+ /// InlineGraphNode used since the node pointers are also saved in the
+ /// InlinedCallees vector. If it would store InlineGraphNode instead then the
+ /// address of the node would not be invariant.
+ NodesMapTy NodesMap;
+ /// Non external functions that have some other function inlined inside.
+ std::vector<StringRef> NonImportedCallers;
+ int AllFunctions = 0;
+ int ImportedFunctions = 0;
+ StringRef ModuleName;
+};
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
diff --git a/include/llvm/Transforms/Utils/LCSSA.h b/include/llvm/Transforms/Utils/LCSSA.h
index f0277d021541..fe717e5f6635 100644
--- a/include/llvm/Transforms/Utils/LCSSA.h
+++ b/include/llvm/Transforms/Utils/LCSSA.h
@@ -37,7 +37,7 @@ namespace llvm {
/// Converts loops into loop-closed SSA form.
class LCSSAPass : public PassInfoMixin<LCSSAPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h b/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
new file mode 100644
index 000000000000..c9df532e5794
--- /dev/null
+++ b/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
@@ -0,0 +1,27 @@
+//===- LibCallsShrinkWrap.h - Shrink Wrap Library Calls -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+#define LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LibCallsShrinkWrapPass : public PassInfoMixin<LibCallsShrinkWrapPass> {
+public:
+ static StringRef name() { return "LibCallsShrinkWrapPass"; }
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index 43b376cf8068..490a765c3fab 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -32,6 +32,7 @@ class BranchInst;
class Instruction;
class CallInst;
class DbgDeclareInst;
+class DbgValueInst;
class StoreInst;
class LoadInst;
class Value;
@@ -48,6 +49,8 @@ class LazyValueInfo;
template<typename T> class SmallVectorImpl;
+typedef SmallVector<DbgValueInst *, 1> DbgValueList;
+
//===----------------------------------------------------------------------===//
// Local constant propagation.
//
@@ -161,10 +164,15 @@ AllocaInst *DemoteRegToStack(Instruction &X,
/// deleted and it returns the pointer to the alloca inserted.
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
-/// If the specified pointer has an alignment that we can determine, return it,
-/// otherwise return 0. If PrefAlign is specified, and it is more than the
-/// alignment of the ultimate object, see if we can increase the alignment of
-/// the ultimate object, making this check succeed.
+/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
+/// the owning object can be modified and has an alignment less than \p
+/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
+/// cannot be increased, the known alignment of the value is returned.
+///
+/// It is not always possible to modify the alignment of the underlying object,
+/// so if alignment is important, a more reliable approach is to simply align
+/// all global variables and allocation instructions to their preferred
+/// alignment from the beginning.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout &DL,
const Instruction *CxtI = nullptr,
@@ -209,7 +217,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
if (OpC->getType()->isVectorTy())
OpC = OpC->getSplatValue();
@@ -250,14 +258,19 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
/// that has an associated llvm.dbg.decl intrinsic.
-bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
StoreInst *SI, DIBuilder &Builder);
/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
/// that has an associated llvm.dbg.decl intrinsic.
-bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
LoadInst *LI, DIBuilder &Builder);
+/// Inserts a llvm.dbg.value intrinsic after a phi of an alloca'd value
+/// that has an associated llvm.dbg.decl intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+ PHINode *LI, DIBuilder &Builder);
+
/// Lowers llvm.dbg.declare intrinsics into appropriate set of
/// llvm.dbg.value intrinsics.
bool LowerDbgDeclare(Function &F);
@@ -265,6 +278,9 @@ bool LowerDbgDeclare(Function &F);
/// Finds the llvm.dbg.declare intrinsic corresponding to an alloca, if any.
DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
+/// Finds the llvm.dbg.value intrinsics corresponding to an alloca, if any.
+void FindAllocaDbgValues(DbgValueList &DbgValues, Value *V);
+
/// Replaces llvm.dbg.declare instruction when the address it describes
/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
/// prepended to the expression. If Offset is non-zero, a constant displacement
@@ -296,7 +312,15 @@ unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
/// Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
-unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap);
+unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
+ bool PreserveLCSSA = false);
+
+/// Convert the CallInst to InvokeInst with the specified unwind edge basic
+/// block. This also splits the basic block where CI is located, because
+/// InvokeInst is a terminator instruction. Returns the newly split basic
+/// block.
+BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
+ BasicBlock *UnwindEdge);
/// Replace 'BB's terminator with one that does not have an unwind successor
/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
@@ -316,6 +340,12 @@ bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr);
/// Metadata not listed as known via KnownIDs is removed
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
+/// Combine the metadata of two instructions so that K can replace J. This
+/// specifically handles the case of CSE-like transformations.
+///
+/// Unknown metadata is removed.
+void combineMetadataForCSE(Instruction *K, const Instruction *J);
+
/// Replace each use of 'From' with 'To' if that use is dominated by
/// the given edge. Returns the number of replacements made.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
diff --git a/include/llvm/Transforms/Utils/LoopSimplify.h b/include/llvm/Transforms/Utils/LoopSimplify.h
index 7cf89eaeb939..f3828bc16e2f 100644
--- a/include/llvm/Transforms/Utils/LoopSimplify.h
+++ b/include/llvm/Transforms/Utils/LoopSimplify.h
@@ -49,7 +49,7 @@ namespace llvm {
/// This pass is responsible for loop canonicalization.
class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Simplify each loop in a loop nest recursively.
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index 89fea10f818a..845069d4260a 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -246,7 +246,7 @@ private:
RecurrenceKind Kind;
// If this a min/max recurrence the kind of recurrence.
MinMaxRecurrenceKind MinMaxKind;
- // First occurance of unasfe algebra in the PHI's use-chain.
+ // First occurrence of unasfe algebra in the PHI's use-chain.
Instruction *UnsafeAlgebraInst;
// The type of the recurrence.
Type *RecurrenceType;
@@ -263,13 +263,15 @@ public:
enum InductionKind {
IK_NoInduction, ///< Not an induction variable.
IK_IntInduction, ///< Integer induction variable. Step = C.
- IK_PtrInduction ///< Pointer induction var. Step = C / sizeof(elem).
+ IK_PtrInduction, ///< Pointer induction var. Step = C / sizeof(elem).
+ IK_FpInduction ///< Floating point induction variable.
};
public:
/// Default constructor - creates an invalid induction.
InductionDescriptor()
- : StartValue(nullptr), IK(IK_NoInduction), Step(nullptr) {}
+ : StartValue(nullptr), IK(IK_NoInduction), Step(nullptr),
+ InductionBinOp(nullptr) {}
/// Get the consecutive direction. Returns:
/// 0 - unknown or non-consecutive.
@@ -291,26 +293,58 @@ public:
const SCEV *getStep() const { return Step; }
ConstantInt *getConstIntStepValue() const;
- /// Returns true if \p Phi is an induction. If \p Phi is an induction,
- /// the induction descriptor \p D will contain the data describing this
- /// induction. If by some other means the caller has a better SCEV
+ /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
+ /// induction, the induction descriptor \p D will contain the data describing
+ /// this induction. If by some other means the caller has a better SCEV
/// expression for \p Phi than the one returned by the ScalarEvolution
/// analysis, it can be passed through \p Expr.
- static bool isInductionPHI(PHINode *Phi, ScalarEvolution *SE,
+ static bool isInductionPHI(PHINode *Phi, const Loop* L, ScalarEvolution *SE,
InductionDescriptor &D,
const SCEV *Expr = nullptr);
- /// Returns true if \p Phi is an induction, in the context associated with
- /// the run-time predicate of PSE. If \p Assume is true, this can add further
- /// SCEV predicates to \p PSE in order to prove that \p Phi is an induction.
+ /// Returns true if \p Phi is a floating point induction in the loop \p L.
+ /// If \p Phi is an induction, the induction descriptor \p D will contain
+ /// the data describing this induction.
+ static bool isFPInductionPHI(PHINode *Phi, const Loop* L,
+ ScalarEvolution *SE, InductionDescriptor &D);
+
+ /// Returns true if \p Phi is a loop \p L induction, in the context associated
+ /// with the run-time predicate of PSE. If \p Assume is true, this can add
+ /// further SCEV predicates to \p PSE in order to prove that \p Phi is an
+ /// induction.
/// If \p Phi is an induction, \p D will contain the data describing this
/// induction.
- static bool isInductionPHI(PHINode *Phi, PredicatedScalarEvolution &PSE,
+ static bool isInductionPHI(PHINode *Phi, const Loop* L,
+ PredicatedScalarEvolution &PSE,
InductionDescriptor &D, bool Assume = false);
+ /// Returns true if the induction type is FP and the binary operator does
+ /// not have the "fast-math" property. Such operation requires a relaxed FP
+ /// mode.
+ bool hasUnsafeAlgebra() {
+ return InductionBinOp &&
+ !cast<FPMathOperator>(InductionBinOp)->hasUnsafeAlgebra();
+ }
+
+ /// Returns induction operator that does not have "fast-math" property
+ /// and requires FP unsafe mode.
+ Instruction *getUnsafeAlgebraInst() {
+ if (!InductionBinOp ||
+ cast<FPMathOperator>(InductionBinOp)->hasUnsafeAlgebra())
+ return nullptr;
+ return InductionBinOp;
+ }
+
+ /// Returns binary opcode of the induction operator.
+ Instruction::BinaryOps getInductionOpcode() const {
+ return InductionBinOp ? InductionBinOp->getOpcode() :
+ Instruction::BinaryOpsEnd;
+ }
+
private:
/// Private constructor - used by \c isInductionPHI.
- InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step);
+ InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step,
+ BinaryOperator *InductionBinOp = nullptr);
/// Start value.
TrackingVH<Value> StartValue;
@@ -318,6 +352,8 @@ private:
InductionKind IK;
/// Step value.
const SCEV *Step;
+ // Instruction that advances induction variable.
+ BinaryOperator *InductionBinOp;
};
BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
@@ -425,12 +461,28 @@ Optional<const MDOperand *> findStringMetadataForLoop(Loop *TheLoop,
void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
unsigned V = 0);
+/// \brief Get a loop's estimated trip count based on branch weight metadata.
+/// Returns 0 when the count is estimated to be 0, or None when a meaningful
+/// estimate can not be made.
+Optional<unsigned> getLoopEstimatedTripCount(Loop *L);
+
/// Helper to consistently add the set of standard passes to a loop pass's \c
/// AnalysisUsage.
///
/// All loop passes should call this as part of implementing their \c
/// getAnalysisUsage.
void getLoopAnalysisUsage(AnalysisUsage &AU);
+
+/// Returns true if the hoister and sinker can handle this instruction.
+/// If SafetyInfo is null, we are checking for sinking instructions from
+/// preheader to loop body (no speculation).
+/// If SafetyInfo is not null, we are checking for hoisting/sinking
+/// instructions from loop body to preheader/exit. Check if the instruction
+/// can execute specultatively.
+///
+bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
+ Loop *CurLoop, AliasSetTracker *CurAST,
+ LoopSafetyInfo *SafetyInfo);
}
#endif
diff --git a/include/llvm/Transforms/Utils/LowerInvoke.h b/include/llvm/Transforms/Utils/LowerInvoke.h
new file mode 100644
index 000000000000..12774c7fd1f7
--- /dev/null
+++ b/include/llvm/Transforms/Utils/LowerInvoke.h
@@ -0,0 +1,30 @@
+//===- LowerInvoke.h - Eliminate Invoke instructions ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation is designed for use by code generators which do not yet
+// support stack unwinding. This pass converts 'invoke' instructions to 'call'
+// instructions, so that any exception-handling 'landingpad' blocks become dead
+// code (which can be removed by running the '-simplifycfg' pass afterwards).
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+#define LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerInvokePass : public PassInfoMixin<LowerInvokePass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
diff --git a/include/llvm/Transforms/Utils/Mem2Reg.h b/include/llvm/Transforms/Utils/Mem2Reg.h
index f3c80edf544d..456876b520b0 100644
--- a/include/llvm/Transforms/Utils/Mem2Reg.h
+++ b/include/llvm/Transforms/Utils/Mem2Reg.h
@@ -21,7 +21,7 @@
namespace llvm {
class PromotePass : public PassInfoMixin<PromotePass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}
diff --git a/include/llvm/Transforms/Utils/MemorySSA.h b/include/llvm/Transforms/Utils/MemorySSA.h
index befc34cb80fc..408c6a157cd0 100644
--- a/include/llvm/Transforms/Utils/MemorySSA.h
+++ b/include/llvm/Transforms/Utils/MemorySSA.h
@@ -110,6 +110,11 @@ class Instruction;
class MemoryAccess;
class LLVMContext;
class raw_ostream;
+enum {
+ // Used to signify what the default invalid ID is for MemoryAccess's
+ // getID()
+ INVALID_MEMORYACCESS_ID = 0
+};
template <class T> class memoryaccess_def_iterator_base;
using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
@@ -157,7 +162,8 @@ protected:
friend class MemoryDef;
friend class MemoryPhi;
- /// \brief Used internally to give IDs to MemoryAccesses for printing
+ /// \brief Used for debugging and tracking things about MemoryAccesses.
+ /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
virtual unsigned getID() const = 0;
MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
@@ -170,25 +176,6 @@ private:
BasicBlock *Block;
};
-template <>
-struct ilist_traits<MemoryAccess> : public ilist_default_traits<MemoryAccess> {
- /// See details of the instruction class for why this trick works
- // FIXME: This downcast is UB. See llvm.org/PR26753.
- LLVM_NO_SANITIZE("object-size")
- MemoryAccess *createSentinel() const {
- return static_cast<MemoryAccess *>(&Sentinel);
- }
-
- static void destroySentinel(MemoryAccess *) {}
-
- MemoryAccess *provideInitialHead() const { return createSentinel(); }
- MemoryAccess *ensureHead(MemoryAccess *) const { return createSentinel(); }
- static void noteHead(MemoryAccess *, MemoryAccess *) {}
-
-private:
- mutable ilist_half_node<MemoryAccess> Sentinel;
-};
-
inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
MA.print(OS);
return OS;
@@ -254,7 +241,7 @@ public:
void *operator new(size_t s) { return User::operator new(s, 1); }
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
- : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB) {}
+ : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB), OptimizedID(0) {}
static inline bool classof(const MemoryUse *) { return true; }
static inline bool classof(const Value *MA) {
@@ -262,6 +249,18 @@ public:
}
void print(raw_ostream &OS) const override;
+ void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
+ if (Optimized)
+ OptimizedID = DMA->getID();
+ MemoryUseOrDef::setDefiningAccess(DMA);
+ }
+ bool isOptimized() const {
+ return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
+ }
+ /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
+ /// be rewalked by the walker if necessary.
+ /// This really should only be called by tests.
+ void resetOptimized() { OptimizedID = INVALID_MEMORYACCESS_ID; }
protected:
friend class MemorySSA;
@@ -269,6 +268,9 @@ protected:
unsigned getID() const override {
llvm_unreachable("MemoryUses do not have IDs");
}
+
+private:
+ unsigned int OptimizedID;
};
template <>
@@ -307,8 +309,6 @@ public:
protected:
friend class MemorySSA;
- // For debugging only. This gets used to give memory accesses pretty numbers
- // when printing them out
unsigned getID() const override { return ID; }
private:
@@ -387,6 +387,14 @@ public:
return block_begin() + getNumOperands();
}
+ iterator_range<block_iterator> blocks() {
+ return make_range(block_begin(), block_end());
+ }
+
+ iterator_range<const_block_iterator> blocks() const {
+ return make_range(block_begin(), block_end());
+ }
+
op_range incoming_values() { return operands(); }
const_op_range incoming_values() const { return operands(); }
@@ -465,8 +473,6 @@ protected:
User::allocHungoffUses(N, /* IsPhi */ true);
}
- /// For debugging only. This gets used to give memory accesses pretty numbers
- /// when printing them out
unsigned getID() const final { return ID; }
private:
@@ -494,7 +500,6 @@ class MemorySSAWalker;
class MemorySSA {
public:
MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
- MemorySSA(MemorySSA &&);
~MemorySSA();
MemorySSAWalker *getWalker();
@@ -503,7 +508,7 @@ public:
/// access associated with it. If passed a basic block gets the memory phi
/// node that exists for that block, if there is one. Otherwise, this will get
/// a MemoryUseOrDef.
- MemoryAccess *getMemoryAccess(const Value *) const;
+ MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
void dump() const;
@@ -530,11 +535,12 @@ public:
///
/// This list is not modifiable by the user.
const AccessList *getBlockAccesses(const BasicBlock *BB) const {
- auto It = PerBlockAccesses.find(BB);
- return It == PerBlockAccesses.end() ? nullptr : It->second.get();
+ return getWritableBlockAccesses(BB);
}
- /// \brief Create an empty MemoryPhi in MemorySSA
+ /// \brief Create an empty MemoryPhi in MemorySSA for a given basic block.
+ /// Only one MemoryPhi for a block exists at a time, so this function will
+ /// assert if you try to create one where it already exists.
MemoryPhi *createMemoryPhi(BasicBlock *BB);
enum InsertionPlace { Beginning, End };
@@ -550,6 +556,8 @@ public:
/// will be placed. The caller is expected to keep ordering the same as
/// instructions.
/// It will return the new MemoryAccess.
+ /// Note: If a MemoryAccess already exists for I, this function will make it
+ /// inaccessible and it *must* have removeMemoryAccess called on it.
MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
const BasicBlock *BB,
InsertionPlace Point);
@@ -561,12 +569,23 @@ public:
/// used to replace an existing memory instruction. It will *not* create PHI
/// nodes, or verify the clobbering definition. The clobbering definition
/// must be non-null.
- MemoryAccess *createMemoryAccessBefore(Instruction *I,
- MemoryAccess *Definition,
- MemoryAccess *InsertPt);
- MemoryAccess *createMemoryAccessAfter(Instruction *I,
- MemoryAccess *Definition,
- MemoryAccess *InsertPt);
+ /// Note: If a MemoryAccess already exists for I, this function will make it
+ /// inaccessible and it *must* have removeMemoryAccess called on it.
+ MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
+ MemoryAccess *Definition,
+ MemoryUseOrDef *InsertPt);
+ MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
+ MemoryAccess *Definition,
+ MemoryAccess *InsertPt);
+
+ // \brief Splice \p What to just before \p Where.
+ //
+ // In order to be efficient, the following conditions must be met:
+ // - \p Where dominates \p What,
+ // - All memory accesses in [\p Where, \p What) are no-alias with \p What.
+ //
+ // TODO: relax the MemoryDef requirement on Where.
+ void spliceMemoryAccessAbove(MemoryDef *Where, MemoryUseOrDef *What);
/// \brief Remove a MemoryAccess from MemorySSA, including updating all
/// definitions and uses.
@@ -580,6 +599,14 @@ public:
/// whether MemoryAccess \p A dominates MemoryAccess \p B.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
+ /// \brief Given two memory accesses in potentially different blocks,
+ /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
+ bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
+
+ /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
+ /// dominates Use \p B.
+ bool dominates(const MemoryAccess *A, const Use &B) const;
+
/// \brief Verify that MemorySSA is self consistent (IE definitions dominate
/// all uses, uses appear in the right places). This is used by unit tests.
void verifyMemorySSA() const;
@@ -592,9 +619,20 @@ protected:
void verifyDomination(Function &F) const;
void verifyOrdering(Function &F) const;
+ // This is used by the use optimizer class
+ AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
+ auto It = PerBlockAccesses.find(BB);
+ return It == PerBlockAccesses.end() ? nullptr : It->second.get();
+ }
+
private:
class CachingWalker;
+ class OptimizeUses;
+
+ CachingWalker *getWalkerImpl();
void buildMemorySSA();
+ void optimizeUses();
+
void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
@@ -608,10 +646,14 @@ private:
MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
void removeFromLookups(MemoryAccess *);
+ void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
+ const DenseMap<const BasicBlock *, unsigned int> &);
MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *);
void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
SmallPtrSet<BasicBlock *, 16> &Visited);
AccessList *getOrCreateAccessList(const BasicBlock *);
+ void renumberBlock(const BasicBlock *) const;
+
AliasAnalysis *AA;
DominatorTree *DT;
Function &F;
@@ -621,6 +663,12 @@ private:
AccessMap PerBlockAccesses;
std::unique_ptr<MemoryAccess> LiveOnEntryDef;
+ // Domination mappings
+ // Note that the numbering is local to a block, even though the map is
+ // global.
+ mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
+ mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
+
// Memory SSA building info
std::unique_ptr<CachingWalker> Walker;
unsigned NextID;
@@ -641,12 +689,20 @@ public:
///
class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
friend AnalysisInfoMixin<MemorySSAAnalysis>;
- static char PassID;
+ static AnalysisKey Key;
public:
- typedef MemorySSA Result;
+ // Wrap MemorySSA result to ensure address stability of internal MemorySSA
+ // pointers after construction. Use a wrapper class instead of plain
+ // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
+ struct Result {
+ Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
+ MemorySSA &getMSSA() { return *MSSA.get(); }
- MemorySSA run(Function &F, AnalysisManager<Function> &AM);
+ std::unique_ptr<MemorySSA> MSSA;
+ };
+
+ Result run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for \c MemorySSA.
@@ -655,12 +711,12 @@ class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
public:
explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for \c MemorySSA.
struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c MemorySSA.
@@ -715,7 +771,7 @@ public:
/// store %a
/// } else {
/// 2 = MemoryDef(liveOnEntry)
- /// store %b
+ /// store %b
/// }
/// 3 = MemoryPhi(2, 1)
/// MemoryUse(3)
@@ -723,7 +779,15 @@ public:
///
/// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
/// in the if (a) branch.
- virtual MemoryAccess *getClobberingMemoryAccess(const Instruction *) = 0;
+ MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
+ MemoryAccess *MA = MSSA->getMemoryAccess(I);
+ assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
+ return getClobberingMemoryAccess(MA);
+ }
+
+ /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
+ /// but takes a MemoryAccess instead of an Instruction.
+ virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
/// \brief Given a potentially clobbering memory access and a new location,
/// calling this will give you the nearest dominating clobbering MemoryAccess
@@ -737,7 +801,7 @@ public:
/// will return that MemoryDef, whereas the above would return the clobber
/// starting from the use side of the memory def.
virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- MemoryLocation &) = 0;
+ const MemoryLocation &) = 0;
/// \brief Given a memory access, invalidate anything this walker knows about
/// that access.
@@ -746,6 +810,8 @@ public:
/// the walker it uses or returns.
virtual void invalidateInfo(MemoryAccess *) {}
+ virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
+
protected:
friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
// constructor.
@@ -756,9 +822,12 @@ protected:
/// simply returns the links as they were constructed by the builder.
class DoNothingMemorySSAWalker final : public MemorySSAWalker {
public:
- MemoryAccess *getClobberingMemoryAccess(const Instruction *) override;
+ // Keep the overrides below from hiding the Instruction overload of
+ // getClobberingMemoryAccess.
+ using MemorySSAWalker::getClobberingMemoryAccess;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- MemoryLocation &) override;
+ const MemoryLocation &) override;
};
using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
@@ -837,29 +906,21 @@ inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
/// and uses in the inverse case.
template <> struct GraphTraits<MemoryAccess *> {
- using NodeType = MemoryAccess;
+ using NodeRef = MemoryAccess *;
using ChildIteratorType = memoryaccess_def_iterator;
- static NodeType *getEntryNode(NodeType *N) { return N; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->defs_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->defs_end();
- }
+ static NodeRef getEntryNode(NodeRef N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
};
template <> struct GraphTraits<Inverse<MemoryAccess *>> {
- using NodeType = MemoryAccess;
+ using NodeRef = MemoryAccess *;
using ChildIteratorType = MemoryAccess::iterator;
- static NodeType *getEntryNode(NodeType *N) { return N; }
- static inline ChildIteratorType child_begin(NodeType *N) {
- return N->user_begin();
- }
- static inline ChildIteratorType child_end(NodeType *N) {
- return N->user_end();
- }
+ static NodeRef getEntryNode(NodeRef N) { return N; }
+ static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
+ static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
};
/// \brief Provide an iterator that walks defs, giving both the memory access,
@@ -944,6 +1005,10 @@ inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
+// Return true when MD may alias MU, return false otherwise.
+bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
+ AliasAnalysis &AA);
+
} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
diff --git a/include/llvm/Transforms/Utils/ModuleUtils.h b/include/llvm/Transforms/Utils/ModuleUtils.h
index 2eb2b1363b0b..27508799f8e0 100644
--- a/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -55,9 +55,31 @@ std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions(
ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
StringRef VersionCheckName = StringRef());
-/// Rename all the anon functions in the module using a hash computed from
+/// Rename all the anon globals in the module using a hash computed from
/// the list of public globals in the module.
-bool nameUnamedFunctions(Module &M);
+bool nameUnamedGlobals(Module &M);
+
+/// \brief Adds global values to the llvm.used list.
+void appendToUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// \brief Adds global values to the llvm.compiler.used list.
+void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// Filter out potentially dead comdat functions where other entries keep the
+/// entire comdat group alive.
+///
+/// This is designed for cases where functions appear to become dead but remain
+/// alive due to other live entries in their comdat group.
+///
+/// The \p DeadComdatFunctions container should only have pointers to
+/// `Function`s which are members of a comdat group and are believed to be
+/// dead.
+///
+/// After this routine finishes, the only remaining `Function`s in \p
+/// DeadComdatFunctions are those where every member of the comdat is listed
+/// and thus removing them is safe (provided *all* are removed).
+void filterDeadComdatFunctions(
+ Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/NameAnonGlobals.h b/include/llvm/Transforms/Utils/NameAnonGlobals.h
new file mode 100644
index 000000000000..4bec361674bb
--- /dev/null
+++ b/include/llvm/Transforms/Utils/NameAnonGlobals.h
@@ -0,0 +1,31 @@
+//===-- NameAnonGlobals.h - Anonymous Global Naming Pass ----*- C++ -*-=======//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements naming anonymous globals to make sure they can be
+// referred to by ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+#define LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that provides a name to every anonymous globals.
+class NameAnonGlobalPass : public PassInfoMixin<NameAnonGlobalPass> {
+public:
+ NameAnonGlobalPass() {}
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H
diff --git a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
index b5f4ac82b605..b0448fed9f4d 100644
--- a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
+++ b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -120,9 +120,8 @@ public:
if (Info->NumPreds == 0)
Info->Preds = nullptr;
else
- Info->Preds = static_cast<BBInfo**>
- (Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*),
- AlignOf<BBInfo*>::Alignment));
+ Info->Preds = static_cast<BBInfo **>(Allocator.Allocate(
+ Info->NumPreds * sizeof(BBInfo *), alignof(BBInfo *)));
for (unsigned p = 0; p != Info->NumPreds; ++p) {
BlkT *Pred = Preds[p];
diff --git a/include/llvm/Transforms/Utils/SimplifyInstructions.h b/include/llvm/Transforms/Utils/SimplifyInstructions.h
index ea491dc50587..3f838611626f 100644
--- a/include/llvm/Transforms/Utils/SimplifyInstructions.h
+++ b/include/llvm/Transforms/Utils/SimplifyInstructions.h
@@ -24,7 +24,7 @@ namespace llvm {
/// This pass removes redundant instructions.
class InstSimplifierPass : public PassInfoMixin<InstSimplifierPass> {
public:
- PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm
diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 92ee24633950..5e217adf1987 100644
--- a/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -137,6 +137,7 @@ private:
// Integer Library Call Optimizations
Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFls(CallInst *CI, IRBuilder<> &B);
Value *optimizeAbs(CallInst *CI, IRBuilder<> &B);
Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B);
Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B);
@@ -158,7 +159,6 @@ private:
SmallVectorImpl<CallInst *> &SinCalls,
SmallVectorImpl<CallInst *> &CosCalls,
SmallVectorImpl<CallInst *> &SinCosCalls);
- void replaceTrigInsts(SmallVectorImpl<CallInst *> &Calls, Value *Res);
Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B);
Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B);
Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B);
diff --git a/include/llvm/Transforms/Utils/SymbolRewriter.h b/include/llvm/Transforms/Utils/SymbolRewriter.h
index 5ccee98f97e7..ff995173e126 100644
--- a/include/llvm/Transforms/Utils/SymbolRewriter.h
+++ b/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -30,12 +30,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H
-#define LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H
+#ifndef LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+#define LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
-#include "llvm/ADT/ilist.h"
-#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include <list>
namespace llvm {
class MemoryBuffer;
@@ -59,7 +59,7 @@ namespace SymbolRewriter {
/// be rewritten or providing a (posix compatible) regular expression that will
/// select the symbols to rewrite. This descriptor list is passed to the
/// SymbolRewriter pass.
-class RewriteDescriptor : public ilist_node<RewriteDescriptor> {
+class RewriteDescriptor {
RewriteDescriptor(const RewriteDescriptor &) = delete;
const RewriteDescriptor &
@@ -86,7 +86,7 @@ private:
const Type Kind;
};
-typedef iplist<RewriteDescriptor> RewriteDescriptorList;
+typedef std::list<std::unique_ptr<RewriteDescriptor>> RewriteDescriptorList;
class RewriteMapParser {
public:
@@ -110,43 +110,26 @@ private:
};
}
-template <>
-struct ilist_traits<SymbolRewriter::RewriteDescriptor>
- : public ilist_default_traits<SymbolRewriter::RewriteDescriptor> {
- mutable ilist_half_node<SymbolRewriter::RewriteDescriptor> Sentinel;
+ModulePass *createRewriteSymbolsPass();
+ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+class RewriteSymbolPass : public PassInfoMixin<RewriteSymbolPass> {
public:
- // createSentinel is used to get a reference to a node marking the end of
- // the list. Because the sentinel is relative to this instance, use a
- // non-static method.
- SymbolRewriter::RewriteDescriptor *createSentinel() const {
- // since i[p] lists always publicly derive from the corresponding
- // traits, placing a data member in this class will augment the
- // i[p]list. Since the NodeTy is expected to publicly derive from
- // ilist_node<NodeTy>, there is a legal viable downcast from it to
- // NodeTy. We use this trick to superpose i[p]list with a "ghostly"
- // NodeTy, which becomes the sentinel. Dereferencing the sentinel is
- // forbidden (save the ilist_node<NodeTy>) so no one will ever notice
- // the superposition.
- return static_cast<SymbolRewriter::RewriteDescriptor *>(&Sentinel);
+ RewriteSymbolPass() { loadAndParseMapFiles(); }
+ RewriteSymbolPass(SymbolRewriter::RewriteDescriptorList &DL) {
+ Descriptors.splice(Descriptors.begin(), DL);
}
- void destroySentinel(SymbolRewriter::RewriteDescriptor *) {}
- SymbolRewriter::RewriteDescriptor *provideInitialHead() const {
- return createSentinel();
- }
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
- SymbolRewriter::RewriteDescriptor *
- ensureHead(SymbolRewriter::RewriteDescriptor *&) const {
- return createSentinel();
- }
+ // Glue for old PM
+ bool runImpl(Module &M);
- static void noteHead(SymbolRewriter::RewriteDescriptor *,
- SymbolRewriter::RewriteDescriptor *) {}
-};
+private:
+ void loadAndParseMapFiles();
-ModulePass *createRewriteSymbolsPass();
-ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+ SymbolRewriter::RewriteDescriptorList Descriptors;
+};
}
-#endif
+#endif //LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
diff --git a/include/llvm/Transforms/Utils/UnrollLoop.h b/include/llvm/Transforms/Utils/UnrollLoop.h
index 4d370407591d..2ea28f2d4e13 100644
--- a/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -16,6 +16,9 @@
#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+// Needed because we can't forward-declare the nested struct
+// TargetTransformInfo::UnrollingPreferences
+#include "llvm/Analysis/TargetTransformInfo.h"
namespace llvm {
@@ -27,12 +30,15 @@ class LoopInfo;
class LPPassManager;
class MDNode;
class Pass;
+class OptimizationRemarkEmitter;
class ScalarEvolution;
bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
bool AllowRuntime, bool AllowExpensiveTripCount,
- unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
- DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
+ bool PreserveCondBr, bool PreserveOnlyFirst,
+ unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI,
+ ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
+ OptimizationRemarkEmitter *ORE, bool PreserveLCSSA);
bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
bool AllowExpensiveTripCount,
@@ -40,6 +46,12 @@ bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
ScalarEvolution *SE, DominatorTree *DT,
bool PreserveLCSSA);
+void computePeelCount(Loop *L, unsigned LoopSize,
+ TargetTransformInfo::UnrollingPreferences &UP);
+
+bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
+ DominatorTree *DT, bool PreserveLCSSA);
+
MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
}
diff --git a/include/llvm/Transforms/Vectorize/LoopVectorize.h b/include/llvm/Transforms/Vectorize/LoopVectorize.h
index e6d3e8353307..2efc7ca4f8a1 100644
--- a/include/llvm/Transforms/Vectorize/LoopVectorize.h
+++ b/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -58,6 +58,7 @@
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
+#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Function.h"
@@ -84,6 +85,7 @@ struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
AliasAnalysis *AA;
AssumptionCache *AC;
std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
+ OptimizationRemarkEmitter *ORE;
BlockFrequency ColdEntryFreq;
@@ -94,7 +96,8 @@ struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
TargetTransformInfo &TTI_, DominatorTree &DT_,
BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
- std::function<const LoopAccessInfo &(Loop &)> &GetLAA_);
+ std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
+ OptimizationRemarkEmitter &ORE);
bool processLoop(Loop *L);
};
diff --git a/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/include/llvm/Transforms/Vectorize/SLPVectorizer.h
index 3a5b42411d35..d669a8e5b615 100644
--- a/include/llvm/Transforms/Vectorize/SLPVectorizer.h
+++ b/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -80,7 +80,7 @@ private:
/// \returns true if a value was vectorized.
bool tryToVectorizeList(ArrayRef<Value *> VL, slpvectorizer::BoUpSLP &R,
ArrayRef<Value *> BuildVector = None,
- bool allowReorder = false);
+ bool AllowReorder = false);
/// \brief Try to vectorize a chain that may start at the operands of \V;
bool tryToVectorize(BinaryOperator *V, slpvectorizer::BoUpSLP &R);
@@ -92,15 +92,20 @@ private:
/// collected in GEPs.
bool vectorizeGEPIndices(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+ /// Try to find horizontal reduction or otherwise vectorize a chain of binary
+ /// operators.
+ bool vectorizeRootInstruction(PHINode *P, Value *V, BasicBlock *BB,
+ slpvectorizer::BoUpSLP &R,
+ TargetTransformInfo *TTI);
+
/// \brief Scan the basic block and look for patterns that are likely to start
/// a vectorization chain.
bool vectorizeChainsInBlock(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
- bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold,
- slpvectorizer::BoUpSLP &R, unsigned VecRegSize);
+ bool vectorizeStoreChain(ArrayRef<Value *> Chain, slpvectorizer::BoUpSLP &R,
+ unsigned VecRegSize);
- bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold,
- slpvectorizer::BoUpSLP &R);
+ bool vectorizeStores(ArrayRef<StoreInst *> Stores, slpvectorizer::BoUpSLP &R);
/// The store instructions in a basic block organized by base pointer.
StoreListMap Stores;
diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap
index cc1895011dc3..a86bc7e7fcbf 100644
--- a/include/llvm/module.modulemap
+++ b/include/llvm/module.modulemap
@@ -18,9 +18,6 @@ module LLVM_Backend {
umbrella "CodeGen"
module * { export * }
- // FIXME: Why is this excluded?
- exclude header "CodeGen/MachineValueType.h"
-
// Exclude these; they're intended to be included into only a single
// translation unit (or none) and aren't part of this module.
exclude header "CodeGen/CommandFlags.h"
@@ -84,6 +81,13 @@ module LLVM_DebugInfo_PDB_DIA {
module * { export * }
}
+module LLVM_DebugInfo_MSF {
+ requires cplusplus
+
+ umbrella "DebugInfo/MSF"
+ module * { export * }
+}
+
module LLVM_DebugInfo_CodeView {
requires cplusplus
@@ -109,6 +113,14 @@ module LLVM_ExecutionEngine {
exclude header "ExecutionEngine/MCJIT.h"
exclude header "ExecutionEngine/Interpreter.h"
exclude header "ExecutionEngine/OrcMCJITReplacement.h"
+
+ // FIXME: These exclude directives were added as a workaround for
+ // <rdar://problem/29247092> and should be removed once it is fixed.
+ exclude header "ExecutionEngine/Orc/RawByteChannel.h"
+ exclude header "ExecutionEngine/Orc/RPCUtils.h"
+ exclude header "ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+ exclude header "ExecutionEngine/Orc/OrcRemoteTargetClient.h"
+ exclude header "ExecutionEngine/Orc/OrcRemoteTargetServer.h"
}
module LLVM_Pass {
@@ -218,9 +230,6 @@ module LLVM_Transforms {
requires cplusplus
umbrella "Transforms"
module * { export * }
-
- // FIXME: Excluded because it does bad things with the legacy pass manager.
- exclude header "Transforms/IPO/PassManagerBuilder.h"
}
// A module covering ADT/ and Support/. These are intertwined and
@@ -245,9 +254,6 @@ module LLVM_Utils {
// Exclude this; it's fundamentally non-modular.
exclude header "Support/PluginLoader.h"
- // FIXME: Mislayered?
- exclude header "Support/TargetRegistry.h"
-
// These are intended for textual inclusion.
textual header "Support/ARMTargetParser.def"
textual header "Support/AArch64TargetParser.def"
@@ -264,6 +270,7 @@ module LLVM_Utils {
textual header "Support/ELFRelocs/Mips.def"
textual header "Support/ELFRelocs/PowerPC64.def"
textual header "Support/ELFRelocs/PowerPC.def"
+ textual header "Support/ELFRelocs/RISCV.def"
textual header "Support/ELFRelocs/Sparc.def"
textual header "Support/ELFRelocs/SystemZ.def"
textual header "Support/ELFRelocs/x86_64.def"
diff --git a/include/llvm/module.modulemap.build b/include/llvm/module.modulemap.build
index 7150fe93935f..0f6f82af6e12 100644
--- a/include/llvm/module.modulemap.build
+++ b/include/llvm/module.modulemap.build
@@ -3,3 +3,7 @@ module LLVM_Support_DataTypes {
header "Support/DataTypes.h"
export *
}
+module LLVM_Config_ABI_Breaking {
+ header "Config/abi-breaking.h"
+ export *
+}