aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-20 21:48:54 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-20 21:48:54 +0000
commit6bc11b14146b9a41402d0348438ff4edb1e344cd (patch)
treea3128f15d970747b64d8aaaa66d9fe8c176bef8a
parent554491ffbdcfe51993d5b436a9bbca7aba388dd3 (diff)
parent583e75cce441388bc562fa225d23499261a0091e (diff)
downloadsrc-6bc11b14146b9a41402d0348438ff4edb1e344cd.tar.gz
src-6bc11b14146b9a41402d0348438ff4edb1e344cd.zip
Notes
-rw-r--r--contrib/llvm/include/llvm-c/Core.h10
-rw-r--r--contrib/llvm/include/llvm-c/Types.h14
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h201
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h149
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h16
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h12
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h23
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h73
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h5
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h12
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitcodeReader.h4
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitcodeWriter.h14
-rw-r--r--contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineValueType.h548
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.h72
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.td216
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h11
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h13
-rw-r--r--contrib/llvm/include/llvm/IR/Argument.h12
-rw-r--r--contrib/llvm/include/llvm/IR/Attributes.h3
-rw-r--r--contrib/llvm/include/llvm/IR/ConstantRange.h30
-rw-r--r--contrib/llvm/include/llvm/IR/DIBuilder.h3
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfoMetadata.h5
-rw-r--r--contrib/llvm/include/llvm/IR/Instructions.h6
-rw-r--r--contrib/llvm/include/llvm/IR/Metadata.h9
-rw-r--r--contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h4
-rw-r--r--contrib/llvm/include/llvm/IR/PatternMatch.h10
-rw-r--r--contrib/llvm/include/llvm/IR/Use.h24
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfo.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCSubtargetInfo.h4
-rw-r--r--contrib/llvm/include/llvm/Object/Archive.h49
-rw-r--r--contrib/llvm/include/llvm/Object/Binary.h22
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h100
-rw-r--r--contrib/llvm/include/llvm/Object/IRSymtab.h49
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h58
-rw-r--r--contrib/llvm/include/llvm/Object/SymbolicFile.h28
-rw-r--r--contrib/llvm/include/llvm/ObjectYAML/DWARFYAML.h6
-rw-r--r--contrib/llvm/include/llvm/PassSupport.h6
-rw-r--r--contrib/llvm/include/llvm/Support/ARMTargetParser.def38
-rw-r--r--contrib/llvm/include/llvm/Support/ArrayRecycler.h3
-rw-r--r--contrib/llvm/include/llvm/Support/BinaryStreamArray.h5
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.def1096
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.h60
-rw-r--r--contrib/llvm/include/llvm/Support/GenericDomTree.h56
-rw-r--r--contrib/llvm/include/llvm/Support/GraphWriter.h7
-rw-r--r--contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h211
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h16
-rw-r--r--contrib/llvm/include/llvm/Support/Recycler.h3
-rw-r--r--contrib/llvm/include/llvm/Support/Regex.h2
-rw-r--r--contrib/llvm/include/llvm/Support/TargetParser.h2
-rw-r--r--contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h2
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h8
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h16
-rw-r--r--contrib/llvm/include/llvm/XRay/InstrumentationMap.h2
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp12
-rw-r--r--contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp133
-rw-r--r--contrib/llvm/lib/Analysis/CFLGraph.h3
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp105
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp17
-rw-r--r--contrib/llvm/lib/Analysis/MemorySSA.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp216
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp110
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp15
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp316
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp57
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h3
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp415
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp145
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h17
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp28
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp32
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/LowLevelType.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp28
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/SafeStack.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp207
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp92
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp26
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp69
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp6
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp30
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp70
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp2
-rw-r--r--contrib/llvm/lib/IR/Attributes.cpp112
-rw-r--r--contrib/llvm/lib/IR/ConstantFold.cpp2
-rw-r--r--contrib/llvm/lib/IR/ConstantRange.cpp92
-rw-r--r--contrib/llvm/lib/IR/Constants.cpp28
-rw-r--r--contrib/llvm/lib/IR/Core.cpp33
-rw-r--r--contrib/llvm/lib/IR/DataLayout.cpp12
-rw-r--r--contrib/llvm/lib/IR/Function.cpp23
-rw-r--r--contrib/llvm/lib/IR/Instructions.cpp15
-rw-r--r--contrib/llvm/lib/MC/MCDwarf.cpp21
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp4
-rw-r--r--contrib/llvm/lib/Object/Archive.cpp51
-rw-r--r--contrib/llvm/lib/Object/Binary.cpp20
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp32
-rw-r--r--contrib/llvm/lib/Object/IRSymtab.cpp35
-rw-r--r--contrib/llvm/lib/Object/ObjectFile.cpp14
-rw-r--r--contrib/llvm/lib/Object/SymbolicFile.cpp14
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp2
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp256
-rw-r--r--contrib/llvm/lib/Support/CommandLine.cpp5
-rw-r--r--contrib/llvm/lib/Support/Dwarf.cpp169
-rw-r--r--contrib/llvm/lib/Support/LowLevelType.cpp29
-rw-r--r--contrib/llvm/lib/Support/Regex.cpp2
-rw-r--r--contrib/llvm/lib/Support/TargetParser.cpp2
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp8
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp38
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td5
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp1
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td6
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp22
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h3
-rw-r--r--contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp55
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp226
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h4
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h6
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp25
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/DSInstructions.td2
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp29
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h8
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td43
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp166
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h23
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.td6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp16
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFastISel.cpp3
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp50
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp128
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp66
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td54
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp14
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h6
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp11
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp180
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp5
-rw-r--r--contrib/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/BitTracker.cpp10
-rw-r--r--contrib/llvm/lib/Target/Hexagon/BitTracker.h10
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp70
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFCopy.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFGraph.h3
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp47
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFRegisters.h13
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td74
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp25
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp4
-rw-r--r--contrib/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp2
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp2
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt28
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp9
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp5
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp54
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp61
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp40
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/IPO/SampleProfile.cpp40
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp23
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp38
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp77
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp239
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp99
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp39
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp24
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LCSSA.cpp31
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp106
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/VNCoercion.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp24
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp12
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtOpenMP.h193
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td128
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AttrSubjectMatchRules.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td37
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CLCompatOptions.td6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/IndexSymbol.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h32
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExternalASTMerger.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp98
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Attributes.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp83
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h1
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp36
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.h6
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.cpp24
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.h1
-rw-r--r--contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h239
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexSymbol.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexTypeSourceInfo.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexingContext.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexingContext.h3
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp534
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp213
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp85
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp7
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp4
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp536
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp31
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h6
-rw-r--r--contrib/llvm/tools/lld/COFF/Driver.cpp8
-rw-r--r--contrib/llvm/tools/lld/COFF/DriverUtils.cpp2
-rw-r--r--contrib/llvm/tools/lld/ELF/Config.h1
-rw-r--r--contrib/llvm/tools/lld/ELF/Driver.cpp13
-rw-r--r--contrib/llvm/tools/lld/ELF/LinkerScript.cpp50
-rw-r--r--contrib/llvm/tools/lld/ELF/LinkerScript.h1
-rw-r--r--contrib/llvm/tools/lld/ELF/Options.td3
-rw-r--r--contrib/llvm/tools/lld/ELF/OutputSections.cpp42
-rw-r--r--contrib/llvm/tools/lld/ELF/OutputSections.h5
-rw-r--r--contrib/llvm/tools/lld/ELF/Writer.cpp47
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Core/ArchSpec.h1
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Expression/DiagnosticManager.h2
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Utility/StringLexer.h2
-rw-r--r--contrib/llvm/tools/lldb/source/Commands/CommandObjectCommands.cpp6
-rw-r--r--contrib/llvm/tools/lldb/source/Commands/CommandObjectFrame.cpp90
-rw-r--r--contrib/llvm/tools/lldb/source/Core/ArchSpec.cpp32
-rw-r--r--contrib/llvm/tools/lldb/source/Core/Scalar.cpp2
-rw-r--r--contrib/llvm/tools/lldb/source/Expression/DiagnosticManager.cpp9
-rw-r--r--contrib/llvm/tools/lldb/source/Interpreter/CommandHistory.cpp4
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/LanguageRuntime/RenderScript/RenderScriptRuntime/RenderScriptRuntime.cpp69
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp117
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.cpp26
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.h9
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.cpp460
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.h25
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp71
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.h7
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterContextPOSIX_mips64.cpp7
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterInfos_x86_64.h2
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp14
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.h4
-rw-r--r--contrib/llvm/tools/lldb/source/Symbol/ClangASTContext.cpp12
-rw-r--r--contrib/llvm/tools/lldb/source/Utility/StringLexer.cpp4
-rw-r--r--contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp7
-rw-r--r--contrib/llvm/tools/llvm-modextract/llvm-modextract.cpp9
-rw-r--r--contrib/llvm/tools/llvm-xray/xray-extract.cc23
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp39
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp22
-rw-r--r--lib/clang/include/clang/Basic/Version.inc2
-rw-r--r--lib/clang/include/lld/Config/Version.inc2
-rw-r--r--lib/clang/include/llvm/Support/VCSRevision.h2
372 files changed, 9135 insertions, 4750 deletions
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index 7f5c05d21e65..0a1d8faf99b7 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -2131,6 +2131,16 @@ LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals,
LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count);
/**
+ * Obtain a Metadata as a Value.
+ */
+LLVMValueRef LLVMMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD);
+
+/**
+ * Obtain a Value as a Metadata.
+ */
+LLVMMetadataRef LLVMValueAsMetadata(LLVMValueRef Val);
+
+/**
* Obtain the underlying string from a MDString value.
*
* @param V Instance to obtain string from.
diff --git a/contrib/llvm/include/llvm-c/Types.h b/contrib/llvm/include/llvm-c/Types.h
index 3d472a6bf47d..d63ea4de933d 100644
--- a/contrib/llvm/include/llvm-c/Types.h
+++ b/contrib/llvm/include/llvm-c/Types.h
@@ -83,6 +83,13 @@ typedef struct LLVMOpaqueValue *LLVMValueRef;
typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
/**
+ * Represents an LLVM Metadata.
+ *
+ * This models llvm::Metadata.
+ */
+typedef struct LLVMOpaqueMetadata *LLVMMetadataRef;
+
+/**
* Represents an LLVM basic block builder.
*
* This models llvm::IRBuilder.
@@ -90,6 +97,13 @@ typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
typedef struct LLVMOpaqueBuilder *LLVMBuilderRef;
/**
+ * Represents an LLVM debug info builder.
+ *
+ * This models llvm::DIBuilder.
+ */
+typedef struct LLVMOpaqueDIBuilder *LLVMDIBuilderRef;
+
+/**
* Interface used to provide a module to JIT or interpreter.
* This is now just a synonym for llvm::Module, but we have to keep using the
* different type to keep binary compatibility.
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index ab23130b137d..ceb623d34531 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -189,17 +189,17 @@ private:
void initSlowCase(const APInt &that);
/// out-of-line slow case for shl
- APInt shlSlowCase(unsigned shiftAmt) const;
+ void shlSlowCase(unsigned ShiftAmt);
+
+ /// out-of-line slow case for lshr.
+ void lshrSlowCase(unsigned ShiftAmt);
/// out-of-line slow case for operator=
- APInt &AssignSlowCase(const APInt &RHS);
+ void AssignSlowCase(const APInt &RHS);
/// out-of-line slow case for operator==
bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY;
- /// out-of-line slow case for operator==
- bool EqualSlowCase(uint64_t Val) const LLVM_READONLY;
-
/// out-of-line slow case for countLeadingZeros
unsigned countLeadingZerosSlowCase() const LLVM_READONLY;
@@ -209,6 +209,12 @@ private:
/// out-of-line slow case for countPopulation
unsigned countPopulationSlowCase() const LLVM_READONLY;
+ /// out-of-line slow case for intersects.
+ bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+ /// out-of-line slow case for isSubsetOf.
+ bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY;
+
/// out-of-line slow case for setBits.
void setBitsSlowCase(unsigned loBit, unsigned hiBit);
@@ -216,13 +222,13 @@ private:
void flipAllBitsSlowCase();
/// out-of-line slow case for operator&=.
- APInt& AndAssignSlowCase(const APInt& RHS);
+ void AndAssignSlowCase(const APInt& RHS);
/// out-of-line slow case for operator|=.
- APInt& OrAssignSlowCase(const APInt& RHS);
+ void OrAssignSlowCase(const APInt& RHS);
/// out-of-line slow case for operator^=.
- APInt& XorAssignSlowCase(const APInt& RHS);
+ void XorAssignSlowCase(const APInt& RHS);
public:
/// \name Constructors
@@ -330,6 +336,20 @@ public:
/// This tests the high bit of the APInt to determine if it is unset.
bool isNonNegative() const { return !isNegative(); }
+ /// \brief Determine if sign bit of this APInt is set.
+ ///
+ /// This tests the high bit of this APInt to determine if it is set.
+ ///
+ /// \returns true if this APInt has its sign bit set, false otherwise.
+ bool isSignBitSet() const { return (*this)[BitWidth-1]; }
+
+ /// \brief Determine if sign bit of this APInt is clear.
+ ///
+ /// This tests the high bit of this APInt to determine if it is clear.
+ ///
+ /// \returns true if this APInt has its sign bit clear, false otherwise.
+ bool isSignBitClear() const { return !isSignBitSet(); }
+
/// \brief Determine if this APInt Value is positive.
///
/// This tests if the value of this APInt is positive (> 0). Note
@@ -396,10 +416,10 @@ public:
return countPopulationSlowCase() == 1;
}
- /// \brief Check if the APInt's value is returned by getSignBit.
+ /// \brief Check if the APInt's value is returned by getSignMask.
///
- /// \returns true if this is the value returned by getSignBit.
- bool isSignBit() const { return isMinSignedValue(); }
+ /// \returns true if this is the value returned by getSignMask.
+ bool isSignMask() const { return isMinSignedValue(); }
/// \brief Convert APInt to a boolean value.
///
@@ -409,8 +429,7 @@ public:
/// If this value is smaller than the specified limit, return it, otherwise
/// return the limit value. This causes the value to saturate to the limit.
uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const {
- return (getActiveBits() > 64 || getZExtValue() > Limit) ? Limit
- : getZExtValue();
+ return ugt(Limit) ? Limit : getZExtValue();
}
/// \brief Check if the APInt consists of a repeated bit pattern.
@@ -427,8 +446,9 @@ public:
assert(numBits <= BitWidth && "numBits out of range");
if (isSingleWord())
return VAL == (UINT64_MAX >> (APINT_BITS_PER_WORD - numBits));
- unsigned Ones = countTrailingOnes();
- return (numBits == Ones) && ((Ones + countLeadingZeros()) == BitWidth);
+ unsigned Ones = countTrailingOnesSlowCase();
+ return (numBits == Ones) &&
+ ((Ones + countLeadingZerosSlowCase()) == BitWidth);
}
/// \returns true if this APInt is a non-empty sequence of ones starting at
@@ -437,8 +457,8 @@ public:
bool isMask() const {
if (isSingleWord())
return isMask_64(VAL);
- unsigned Ones = countTrailingOnes();
- return (Ones > 0) && ((Ones + countLeadingZeros()) == BitWidth);
+ unsigned Ones = countTrailingOnesSlowCase();
+ return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
}
/// \brief Return true if this APInt value contains a sequence of ones with
@@ -446,8 +466,9 @@ public:
bool isShiftedMask() const {
if (isSingleWord())
return isShiftedMask_64(VAL);
- unsigned Ones = countPopulation();
- return (Ones + countTrailingZeros() + countLeadingZeros()) == BitWidth;
+ unsigned Ones = countPopulationSlowCase();
+ unsigned LeadZ = countLeadingZerosSlowCase();
+ return (Ones + LeadZ + countTrailingZeros()) == BitWidth;
}
/// @}
@@ -476,11 +497,11 @@ public:
return API;
}
- /// \brief Get the SignBit for a specific bit width.
+ /// \brief Get the SignMask for a specific bit width.
///
/// This is just a wrapper function of getSignedMinValue(), and it helps code
- /// readability when we want to get a SignBit.
- static APInt getSignBit(unsigned BitWidth) {
+ /// readability when we want to get a SignMask.
+ static APInt getSignMask(unsigned BitWidth) {
return getSignedMinValue(BitWidth);
}
@@ -674,29 +695,22 @@ public:
return clearUnusedBits();
}
- return AssignSlowCase(RHS);
+ AssignSlowCase(RHS);
+ return *this;
}
/// @brief Move assignment operator.
APInt &operator=(APInt &&that) {
- if (!isSingleWord()) {
- // The MSVC STL shipped in 2013 requires that self move assignment be a
- // no-op. Otherwise algorithms like stable_sort will produce answers
- // where half of the output is left in a moved-from state.
- if (this == &that)
- return *this;
+ assert(this != &that && "Self-move not supported");
+ if (!isSingleWord())
delete[] pVal;
- }
// Use memcpy so that type based alias analysis sees both VAL and pVal
// as modified.
memcpy(&VAL, &that.VAL, sizeof(uint64_t));
- // If 'this == &that', avoid zeroing our own bitwidth by storing to 'that'
- // first.
- unsigned ThatBitWidth = that.BitWidth;
+ BitWidth = that.BitWidth;
that.BitWidth = 0;
- BitWidth = ThatBitWidth;
return *this;
}
@@ -727,11 +741,11 @@ public:
/// \returns *this after ANDing with RHS.
APInt &operator&=(const APInt &RHS) {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord()) {
+ if (isSingleWord())
VAL &= RHS.VAL;
- return *this;
- }
- return AndAssignSlowCase(RHS);
+ else
+ AndAssignSlowCase(RHS);
+ return *this;
}
/// \brief Bitwise AND assignment operator.
@@ -757,11 +771,11 @@ public:
/// \returns *this after ORing with RHS.
APInt &operator|=(const APInt &RHS) {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord()) {
+ if (isSingleWord())
VAL |= RHS.VAL;
- return *this;
- }
- return OrAssignSlowCase(RHS);
+ else
+ OrAssignSlowCase(RHS);
+ return *this;
}
/// \brief Bitwise OR assignment operator.
@@ -787,11 +801,11 @@ public:
/// \returns *this after XORing with RHS.
APInt &operator^=(const APInt &RHS) {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord()) {
+ if (isSingleWord())
VAL ^= RHS.VAL;
- return *this;
- }
- return XorAssignSlowCase(RHS);
+ else
+ XorAssignSlowCase(RHS);
+ return *this;
}
/// \brief Bitwise XOR assignment operator.
@@ -836,9 +850,17 @@ public:
///
/// Shifts *this left by shiftAmt and assigns the result to *this.
///
- /// \returns *this after shifting left by shiftAmt
- APInt &operator<<=(unsigned shiftAmt) {
- *this = shl(shiftAmt);
+ /// \returns *this after shifting left by ShiftAmt
+ APInt &operator<<=(unsigned ShiftAmt) {
+ assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+ if (isSingleWord()) {
+ if (ShiftAmt == BitWidth)
+ VAL = 0;
+ else
+ VAL <<= ShiftAmt;
+ return clearUnusedBits();
+ }
+ shlSlowCase(ShiftAmt);
return *this;
}
@@ -875,20 +897,26 @@ public:
return R;
}
- /// Logical right-shift this APInt by shiftAmt in place.
- void lshrInPlace(unsigned shiftAmt);
+ /// Logical right-shift this APInt by ShiftAmt in place.
+ void lshrInPlace(unsigned ShiftAmt) {
+ assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+ if (isSingleWord()) {
+ if (ShiftAmt == BitWidth)
+ VAL = 0;
+ else
+ VAL >>= ShiftAmt;
+ return;
+ }
+ lshrSlowCase(ShiftAmt);
+ }
/// \brief Left-shift function.
///
/// Left-shift this APInt by shiftAmt.
APInt shl(unsigned shiftAmt) const {
- assert(shiftAmt <= BitWidth && "Invalid shift amount");
- if (isSingleWord()) {
- if (shiftAmt >= BitWidth)
- return APInt(BitWidth, 0); // avoid undefined shift results
- return APInt(BitWidth, VAL << shiftAmt);
- }
- return shlSlowCase(shiftAmt);
+ APInt R(*this);
+ R <<= shiftAmt;
+ return R;
}
/// \brief Rotate left by rotateAmt.
@@ -905,7 +933,14 @@ public:
/// \brief Logical right-shift function.
///
/// Logical right-shift this APInt by shiftAmt.
- APInt lshr(const APInt &shiftAmt) const;
+ APInt lshr(const APInt &ShiftAmt) const {
+ APInt R(*this);
+ R.lshrInPlace(ShiftAmt);
+ return R;
+ }
+
+ /// Logical right-shift this APInt by ShiftAmt in place.
+ void lshrInPlace(const APInt &ShiftAmt);
/// \brief Left-shift function.
///
@@ -1003,9 +1038,7 @@ public:
///
/// \returns true if *this == Val
bool operator==(uint64_t Val) const {
- if (isSingleWord())
- return VAL == Val;
- return EqualSlowCase(Val);
+ return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
}
/// \brief Equality comparison.
@@ -1055,7 +1088,8 @@ public:
///
/// \returns true if *this < RHS when considered unsigned.
bool ult(uint64_t RHS) const {
- return getActiveBits() > 64 ? false : getZExtValue() < RHS;
+ // Only need to check active bits if not a single word.
+ return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
}
/// \brief Signed less than comparison
@@ -1073,7 +1107,8 @@ public:
///
/// \returns true if *this < RHS when considered signed.
bool slt(int64_t RHS) const {
- return getMinSignedBits() > 64 ? isNegative() : getSExtValue() < RHS;
+ return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative()
+ : getSExtValue() < RHS;
}
/// \brief Unsigned less or equal comparison
@@ -1123,7 +1158,8 @@ public:
///
/// \returns true if *this > RHS when considered unsigned.
bool ugt(uint64_t RHS) const {
- return getActiveBits() > 64 ? true : getZExtValue() > RHS;
+ // Only need to check active bits if not a single word.
+ return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
}
/// \brief Signed greather than comparison
@@ -1141,7 +1177,8 @@ public:
///
/// \returns true if *this > RHS when considered signed.
bool sgt(int64_t RHS) const {
- return getMinSignedBits() > 64 ? !isNegative() : getSExtValue() > RHS;
+ return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative()
+ : getSExtValue() > RHS;
}
/// \brief Unsigned greater or equal comparison
@@ -1179,9 +1216,18 @@ public:
/// This operation tests if there are any pairs of corresponding bits
/// between this APInt and RHS that are both set.
bool intersects(const APInt &RHS) const {
- APInt temp(*this);
- temp &= RHS;
- return temp != 0;
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return (VAL & RHS.VAL) != 0;
+ return intersectsSlowCase(RHS);
+ }
+
+ /// This operation checks that all bits set in this APInt are also set in RHS.
+ bool isSubsetOf(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return (VAL & ~RHS.VAL) == 0;
+ return isSubsetOfSlowCase(RHS);
}
/// @}
@@ -1404,8 +1450,7 @@ public:
/// int64_t. Otherwise an assertion will result.
int64_t getSExtValue() const {
if (isSingleWord())
- return int64_t(VAL << (APINT_BITS_PER_WORD - BitWidth)) >>
- (APINT_BITS_PER_WORD - BitWidth);
+ return SignExtend64(VAL, BitWidth);
assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
return int64_t(pVal[0]);
}
@@ -1759,13 +1804,13 @@ public:
WordType *remainder, WordType *scratch,
unsigned parts);
- /// Shift a bignum left COUNT bits. Shifted in bits are zero. There are no
- /// restrictions on COUNT.
- static void tcShiftLeft(WordType *, unsigned parts, unsigned count);
+ /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
+ /// restrictions on Count.
+ static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);
- /// Shift a bignum right COUNT bits. Shifted in bits are zero. There are no
- /// restrictions on COUNT.
- static void tcShiftRight(WordType *, unsigned parts, unsigned count);
+ /// Shift a bignum right Count bits. Shifted in bits are zero. There are no
+ /// restrictions on Count.
+ static void tcShiftRight(WordType *, unsigned Words, unsigned Count);
/// The obvious AND, OR and XOR and complement operations.
static void tcAnd(WordType *, const WordType *, unsigned);
@@ -1959,7 +2004,7 @@ inline const APInt &umax(const APInt &A, const APInt &B) {
/// \brief Compute GCD of two unsigned APInt values.
///
/// This function returns the greatest common divisor of the two APInt values
-/// using Euclid's algorithm.
+/// using Stein's algorithm.
///
/// \returns the greatest common divisor of A and B.
APInt GreatestCommonDivisor(APInt A, APInt B);
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index 8240d01ae977..e48c023ae7df 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -14,6 +14,8 @@
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
@@ -455,6 +457,105 @@ public:
return *this;
}
+ BitVector &operator>>=(unsigned N) {
+ assert(N <= Size);
+ if (LLVM_UNLIKELY(empty() || N == 0))
+ return *this;
+
+ unsigned NumWords = NumBitWords(Size);
+ assert(NumWords >= 1);
+
+ wordShr(N / BITWORD_SIZE);
+
+ unsigned BitDistance = N % BITWORD_SIZE;
+ if (BitDistance == 0)
+ return *this;
+
+ // When the shift size is not a multiple of the word size, then we have
+ // a tricky situation where each word in succession needs to extract some
+ // of the bits from the next word and or them into this word while
+ // shifting this word to make room for the new bits. This has to be done
+ // for every word in the array.
+
+ // Since we're shifting each word right, some bits will fall off the end
+ // of each word to the right, and empty space will be created on the left.
+ // The final word in the array will lose bits permanently, so starting at
+ // the beginning, work forwards shifting each word to the right, and
+ // OR'ing in the bits from the end of the next word to the beginning of
+ // the current word.
+
+ // Example:
+ // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
+ // by 4 bits.
+ // Step 1: Word[0] >>= 4 ; 0x0ABBCCDD
+ // Step 2: Word[0] |= 0x10000000 ; 0x1ABBCCDD
+ // Step 3: Word[1] >>= 4 ; 0x0EEFF001
+ // Step 4: Word[1] |= 0x50000000 ; 0x5EEFF001
+ // Step 5: Word[2] >>= 4 ; 0x02334455
+ // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
+ const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
+ const unsigned LSH = BITWORD_SIZE - BitDistance;
+
+ for (unsigned I = 0; I < NumWords - 1; ++I) {
+ Bits[I] >>= BitDistance;
+ Bits[I] |= (Bits[I + 1] & Mask) << LSH;
+ }
+
+ Bits[NumWords - 1] >>= BitDistance;
+
+ return *this;
+ }
+
+ BitVector &operator<<=(unsigned N) {
+ assert(N <= Size);
+ if (LLVM_UNLIKELY(empty() || N == 0))
+ return *this;
+
+ unsigned NumWords = NumBitWords(Size);
+ assert(NumWords >= 1);
+
+ wordShl(N / BITWORD_SIZE);
+
+ unsigned BitDistance = N % BITWORD_SIZE;
+ if (BitDistance == 0)
+ return *this;
+
+ // When the shift size is not a multiple of the word size, then we have
+ // a tricky situation where each word in succession needs to extract some
+ // of the bits from the previous word and or them into this word while
+ // shifting this word to make room for the new bits. This has to be done
+ // for every word in the array. This is similar to the algorithm outlined
+ // in operator>>=, but backwards.
+
+ // Since we're shifting each word left, some bits will fall off the end
+ // of each word to the left, and empty space will be created on the right.
+ // The first word in the array will lose bits permanently, so starting at
+ // the end, work backwards shifting each word to the left, and OR'ing
+ // in the bits from the end of the next word to the beginning of the
+ // current word.
+
+ // Example:
+ // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
+ // by 4 bits.
+ // Step 1: Word[2] <<= 4 ; 0x23344550
+ // Step 2: Word[2] |= 0x0000000E ; 0x2334455E
+ // Step 3: Word[1] <<= 4 ; 0xEFF00110
+ // Step 4: Word[1] |= 0x0000000A ; 0xEFF0011A
+ // Step 5: Word[0] <<= 4 ; 0xABBCCDD0
+ // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
+ const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
+ const unsigned RSH = BITWORD_SIZE - BitDistance;
+
+ for (int I = NumWords - 1; I > 0; --I) {
+ Bits[I] <<= BitDistance;
+ Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
+ }
+ Bits[0] <<= BitDistance;
+ clear_unused_bits();
+
+ return *this;
+ }
+
// Assignment operator.
const BitVector &operator=(const BitVector &RHS) {
if (this == &RHS) return *this;
@@ -538,6 +639,54 @@ public:
}
private:
+ /// \brief Perform a logical left shift of \p Count words by moving everything
+ /// \p Count words to the right in memory.
+ ///
+ /// While confusing, words are stored from least significant at Bits[0] to
+ /// most significant at Bits[NumWords-1]. A logical shift left, however,
+ /// moves the current least significant bit to a higher logical index, and
+ /// fills the previous least significant bits with 0. Thus, we actually
+ /// need to move the bytes of the memory to the right, not to the left.
+ /// Example:
+ /// Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
+ /// represents a BitVector where 0xBBBBAAAA contain the least significant
+ /// bits. So if we want to shift the BitVector left by 2 words, we need to
+ /// turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
+ /// memmove which moves right, not left.
+ void wordShl(uint32_t Count) {
+ if (Count == 0)
+ return;
+
+ uint32_t NumWords = NumBitWords(Size);
+
+ auto Src = ArrayRef<BitWord>(Bits, NumWords).drop_back(Count);
+ auto Dest = MutableArrayRef<BitWord>(Bits, NumWords).drop_front(Count);
+
+ // Since we always move Word-sized chunks of data with src and dest both
+ // aligned to a word-boundary, we don't need to worry about endianness
+ // here.
+ std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
+ std::memset(Bits, 0, Count * sizeof(BitWord));
+ clear_unused_bits();
+ }
+
+ /// \brief Perform a logical right shift of \p Count words by moving those
+ /// words to the left in memory. See wordShl for more information.
+ ///
+ void wordShr(uint32_t Count) {
+ if (Count == 0)
+ return;
+
+ uint32_t NumWords = NumBitWords(Size);
+
+ auto Src = ArrayRef<BitWord>(Bits, NumWords).drop_front(Count);
+ auto Dest = MutableArrayRef<BitWord>(Bits, NumWords).drop_back(Count);
+ assert(Dest.size() == Src.size());
+
+ std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
+ std::memset(Dest.end(), 0, Count * sizeof(BitWord));
+ }
+
int next_unset_in_word(int WordIndex, BitWord Word) const {
unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
return Result < size() ? Result : -1;
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index edb37da38da1..607e040a606c 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -508,6 +508,22 @@ public:
return *this;
}
+ SmallBitVector &operator<<=(unsigned N) {
+ if (isSmall())
+ setSmallBits(getSmallBits() << N);
+ else
+ getPointer()->operator<<=(N);
+ return *this;
+ }
+
+ SmallBitVector &operator>>=(unsigned N) {
+ if (isSmall())
+ setSmallBits(getSmallBits() >> N);
+ else
+ getPointer()->operator>>=(N);
+ return *this;
+ }
+
// Assignment operator.
const SmallBitVector &operator=(const SmallBitVector &RHS) {
if (isSmall()) {
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index e3d81fea49ea..3e05e09900a5 100644
--- a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -1164,9 +1164,8 @@ template <class BT> struct BlockEdgesAdder {
void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
const LoopData *OuterLoop) {
const BlockT *BB = BFI.RPOT[Irr.Node.Index];
- for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB);
- I != E; ++I)
- G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
+ for (const auto Succ : children<const BlockT *>(BB))
+ G.addEdge(Irr, BFI.getNode(Succ), OuterLoop);
}
};
}
@@ -1210,10 +1209,9 @@ BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
return false;
} else {
const BlockT *BB = getBlock(Node);
- for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
- SI != SE; ++SI)
- if (!addToDist(Dist, OuterLoop, Node, getNode(*SI),
- getWeightFromBranchProb(BPI->getEdgeProbability(BB, SI))))
+ for (const auto Succ : children<const BlockT *>(BB))
+ if (!addToDist(Dist, OuterLoop, Node, getNode(Succ),
+ getWeightFromBranchProb(BPI->getEdgeProbability(BB, Succ))))
// Irreducible backedge.
return false;
}
diff --git a/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h b/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h
index 629ae3809045..9f8cacc24f2c 100644
--- a/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h
@@ -174,12 +174,10 @@ ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT,
// Visit each block only once.
if (visited.insert(currentBB).second) {
// Loop over CFG successors to calculate DFlocal[currentNode]
- for (auto SI = BlockTraits::child_begin(currentBB),
- SE = BlockTraits::child_end(currentBB);
- SI != SE; ++SI) {
+ for (const auto Succ : children<BlockT *>(currentBB)) {
// Does Node immediately dominate this successor?
- if (DT[*SI]->getIDom() != currentNode)
- S.insert(*SI);
+ if (DT[Succ]->getIDom() != currentNode)
+ S.insert(Succ);
}
}
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index 996794b660a9..2fad1737d1c0 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -158,11 +158,8 @@ public:
/// True if terminator in the block can branch to another block that is
/// outside of the current loop.
bool isLoopExiting(const BlockT *BB) const {
- typedef GraphTraits<const BlockT*> BlockTraits;
- for (typename BlockTraits::ChildIteratorType SI =
- BlockTraits::child_begin(BB),
- SE = BlockTraits::child_end(BB); SI != SE; ++SI) {
- if (!contains(*SI))
+ for (const auto Succ : children<const BlockT*>(BB)) {
+ if (!contains(Succ))
return true;
}
return false;
@@ -186,11 +183,8 @@ public:
unsigned NumBackEdges = 0;
BlockT *H = getHeader();
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType I =
- InvBlockTraits::child_begin(H),
- E = InvBlockTraits::child_end(H); I != E; ++I)
- if (contains(*I))
+ for (const auto Pred : children<Inverse<BlockT*> >(H))
+ if (contains(Pred))
++NumBackEdges;
return NumBackEdges;
@@ -249,12 +243,9 @@ public:
/// contains a branch back to the header.
void getLoopLatches(SmallVectorImpl<BlockT *> &LoopLatches) const {
BlockT *H = getHeader();
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType I =
- InvBlockTraits::child_begin(H),
- E = InvBlockTraits::child_end(H); I != E; ++I)
- if (contains(*I))
- LoopLatches.push_back(*I);
+ for (const auto Pred : children<Inverse<BlockT*>>(H))
+ if (contains(Pred))
+ LoopLatches.push_back(Pred);
}
//===--------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
index 761f8721b54f..6dc0422ce0e9 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -34,14 +34,11 @@ namespace llvm {
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
- typedef GraphTraits<BlockT*> BlockTraits;
- for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
- for (typename BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I)
- if (!contains(*I)) {
+ for (const auto BB : blocks())
+ for (const auto Succ : children<BlockT*>(BB))
+ if (!contains(Succ)) {
// Not in current loop? It must be an exit block.
- ExitingBlocks.push_back(*BI);
+ ExitingBlocks.push_back(BB);
break;
}
}
@@ -63,14 +60,11 @@ BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
- typedef GraphTraits<BlockT*> BlockTraits;
- for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
- for (typename BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I)
- if (!contains(*I))
+ for (const auto BB : blocks())
+ for (const auto Succ : children<BlockT*>(BB))
+ if (!contains(Succ))
// Not in current loop? It must be an exit block.
- ExitBlocks.push_back(*I);
+ ExitBlocks.push_back(Succ);
}
/// getExitBlock - If getExitBlocks would return exactly one block,
@@ -88,14 +82,11 @@ BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
- typedef GraphTraits<BlockT*> BlockTraits;
- for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
- for (typename BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I)
- if (!contains(*I))
+ for (const auto BB : blocks())
+ for (const auto Succ : children<BlockT*>(BB))
+ if (!contains(Succ))
// Not in current loop? It must be an exit block.
- ExitEdges.push_back(Edge(*BI, *I));
+ ExitEdges.emplace_back(BB, Succ);
}
/// getLoopPreheader - If there is a preheader for this loop, return it. A
@@ -134,15 +125,11 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(Header),
- PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
- typename InvBlockTraits::NodeRef N = *PI;
- if (!contains(N)) { // If the block is not in the loop...
- if (Out && Out != N)
+ for (const auto Pred : children<Inverse<BlockT*>>(Header)) {
+ if (!contains(Pred)) { // If the block is not in the loop...
+ if (Out && Out != Pred)
return nullptr; // Multiple predecessors outside the loop
- Out = N;
+ Out = Pred;
}
}
@@ -156,17 +143,11 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
BlockT *Header = getHeader();
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(Header);
- typename InvBlockTraits::ChildIteratorType PE =
- InvBlockTraits::child_end(Header);
BlockT *Latch = nullptr;
- for (; PI != PE; ++PI) {
- typename InvBlockTraits::NodeRef N = *PI;
- if (contains(N)) {
+ for (const auto Pred : children<Inverse<BlockT*>>(Header)) {
+ if (contains(Pred)) {
if (Latch) return nullptr;
- Latch = N;
+ Latch = Pred;
}
}
@@ -394,11 +375,9 @@ static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges,
// within this subloop tree itself. Note that a predecessor may directly
// reach another subloop that is not yet discovered to be a subloop of
// this loop, which we must traverse.
- for (typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(PredBB),
- PE = InvBlockTraits::child_end(PredBB); PI != PE; ++PI) {
- if (LI->getLoopFor(*PI) != Subloop)
- ReverseCFGWorklist.push_back(*PI);
+ for (const auto Pred : children<Inverse<BlockT*>>(PredBB)) {
+ if (LI->getLoopFor(Pred) != Subloop)
+ ReverseCFGWorklist.push_back(Pred);
}
}
}
@@ -482,13 +461,7 @@ analyze(const DominatorTreeBase<BlockT> &DomTree) {
SmallVector<BlockT *, 4> Backedges;
// Check each predecessor of the potential loop header.
- typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
- for (typename InvBlockTraits::ChildIteratorType PI =
- InvBlockTraits::child_begin(Header),
- PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
-
- BlockT *Backedge = *PI;
-
+ for (const auto Backedge : children<Inverse<BlockT*>>(Header)) {
// If Header dominates predBB, this is a new loop. Collect the backedges.
if (DomTree.dominates(Header, Backedge)
&& DomTree.isReachableFromEntry(Backedge)) {
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
index c5514316f75f..743faf2b67db 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -54,6 +54,11 @@ bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory similar to malloc or calloc.
+bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index 9a50de540f2b..91aeae0f728f 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1159,8 +1159,20 @@ public:
const SCEV *getConstant(const APInt &Val);
const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
+
+ typedef SmallDenseMap<std::pair<const SCEV *, Type *>, const SCEV *, 8>
+ ExtendCacheTy;
const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getZeroExtendExprCached(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache);
+ const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache);
+
const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getSignExtendExprCached(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache);
+ const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache);
const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
diff --git a/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h b/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h
index 9e042b17241f..0701ddbb7f1c 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitcodeReader.h
@@ -46,6 +46,9 @@ namespace llvm {
ArrayRef<uint8_t> Buffer;
StringRef ModuleIdentifier;
+ // The string table used to interpret this module.
+ StringRef Strtab;
+
// The bitstream location of the IDENTIFICATION_BLOCK.
uint64_t IdentificationBit;
@@ -70,6 +73,7 @@ namespace llvm {
StringRef getBuffer() const {
return StringRef((const char *)Buffer.begin(), Buffer.size());
}
+ StringRef getStrtab() const { return Strtab; }
StringRef getModuleIdentifier() const { return ModuleIdentifier; }
diff --git a/contrib/llvm/include/llvm/Bitcode/BitcodeWriter.h b/contrib/llvm/include/llvm/Bitcode/BitcodeWriter.h
index 271cb2d81bbb..23b5ae87b278 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitcodeWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitcodeWriter.h
@@ -15,6 +15,7 @@
#define LLVM_BITCODE_BITCODEWRITER_H
#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/MC/StringTableBuilder.h"
#include <string>
namespace llvm {
@@ -26,12 +27,25 @@ namespace llvm {
SmallVectorImpl<char> &Buffer;
std::unique_ptr<BitstreamWriter> Stream;
+ StringTableBuilder StrtabBuilder{StringTableBuilder::RAW};
+ bool WroteStrtab = false;
+
+ void writeBlob(unsigned Block, unsigned Record, StringRef Blob);
+
public:
/// Create a BitcodeWriter that writes to Buffer.
BitcodeWriter(SmallVectorImpl<char> &Buffer);
~BitcodeWriter();
+ /// Write the bitcode file's string table. This must be called exactly once
+ /// after all modules have been written.
+ void writeStrtab();
+
+ /// Copy the string table for another module into this bitcode file. This
+ /// should be called after copying the module itself into the bitcode file.
+ void copyStrtab(StringRef Strtab);
+
/// Write the specified module to the buffer specified at construction time.
///
/// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
diff --git a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index e2d2fbb0f449..03eac80bc1e8 100644
--- a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -22,7 +22,7 @@
namespace llvm {
namespace bitc {
-// The only top-level block type defined is for a module.
+// The only top-level block types are MODULE, IDENTIFICATION and STRTAB.
enum BlockIDs {
// Blocks
MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID,
@@ -52,7 +52,9 @@ enum BlockIDs {
OPERAND_BUNDLE_TAGS_BLOCK_ID,
- METADATA_KIND_BLOCK_ID
+ METADATA_KIND_BLOCK_ID,
+
+ STRTAB_BLOCK_ID,
};
/// Identification block contains a string that describes the producer details,
@@ -232,6 +234,10 @@ enum GlobalValueSummarySymtabCodes {
// llvm.type.checked.load intrinsic with all constant integer arguments.
// [typeid, offset, n x arg]
FS_TYPE_CHECKED_LOAD_CONST_VCALL = 15,
+ // Assigns a GUID to a value ID. This normally appears only in combined
+ // summaries, but it can also appear in per-module summaries for PGO data.
+ // [valueid, guid]
+ FS_VALUE_GUID = 16,
};
enum MetadataCodes {
@@ -550,6 +556,10 @@ enum ComdatSelectionKindCodes {
COMDAT_SELECTION_KIND_SAME_SIZE = 5,
};
+enum StrtabCodes {
+ STRTAB_BLOB = 1,
+};
+
} // End bitc namespace
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index d8096aeb215a..911e8756070b 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -62,9 +62,6 @@ protected:
const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI) const;
- Optional<int64_t> getConstantVRegVal(unsigned VReg,
- const MachineRegisterInfo &MRI) const;
-
bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 52bf965a3cb3..92bc9736141a 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -60,5 +60,8 @@ void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
const char *PassName, StringRef Msg,
const MachineInstr &MI);
+Optional<int64_t> getConstantVRegVal(unsigned VReg,
+ const MachineRegisterInfo &MRI);
+
} // End namespace llvm.
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index ef4226d30fe3..412c55d542ea 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -413,6 +413,11 @@ MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
unsigned Reg, unsigned Offset,
const MDNode *Variable, const MDNode *Expr);
+/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
+MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I,
+ const MachineInstr &Orig, int FrameIndex);
+
inline unsigned getDefRegState(bool B) {
return B ? RegState::Define : 0;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineValueType.h b/contrib/llvm/include/llvm/CodeGen/MachineValueType.h
index e4744fd5e260..a90fe96227b9 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineValueType.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineValueType.h
@@ -28,155 +28,246 @@ namespace llvm {
/// type can be represented by an MVT.
class MVT {
public:
- enum SimpleValueType : int8_t {
- // Simple value types less than zero are considered extended value types.
- INVALID_SIMPLE_VALUE_TYPE = -1,
+ enum SimpleValueType : uint8_t {
+ // Simple value types that aren't explicitly part of this enumeration
+ // are considered extended value types.
+ INVALID_SIMPLE_VALUE_TYPE = 0,
// If you change this numbering, you must change the values in
// ValueTypes.td as well!
- Other = 0, // This is a non-standard value
- i1 = 1, // This is a 1 bit integer value
- i8 = 2, // This is an 8 bit integer value
- i16 = 3, // This is a 16 bit integer value
- i32 = 4, // This is a 32 bit integer value
- i64 = 5, // This is a 64 bit integer value
- i128 = 6, // This is a 128 bit integer value
+ Other = 1, // This is a non-standard value
+ i1 = 2, // This is a 1 bit integer value
+ i8 = 3, // This is an 8 bit integer value
+ i16 = 4, // This is a 16 bit integer value
+ i32 = 5, // This is a 32 bit integer value
+ i64 = 6, // This is a 64 bit integer value
+ i128 = 7, // This is a 128 bit integer value
FIRST_INTEGER_VALUETYPE = i1,
LAST_INTEGER_VALUETYPE = i128,
- f16 = 7, // This is a 16 bit floating point value
- f32 = 8, // This is a 32 bit floating point value
- f64 = 9, // This is a 64 bit floating point value
- f80 = 10, // This is a 80 bit floating point value
- f128 = 11, // This is a 128 bit floating point value
- ppcf128 = 12, // This is a PPC 128-bit floating point value
+ f16 = 8, // This is a 16 bit floating point value
+ f32 = 9, // This is a 32 bit floating point value
+ f64 = 10, // This is a 64 bit floating point value
+ f80 = 11, // This is a 80 bit floating point value
+ f128 = 12, // This is a 128 bit floating point value
+ ppcf128 = 13, // This is a PPC 128-bit floating point value
FIRST_FP_VALUETYPE = f16,
LAST_FP_VALUETYPE = ppcf128,
- v2i1 = 13, // 2 x i1
- v4i1 = 14, // 4 x i1
- v8i1 = 15, // 8 x i1
- v16i1 = 16, // 16 x i1
- v32i1 = 17, // 32 x i1
- v64i1 = 18, // 64 x i1
- v512i1 = 19, // 512 x i1
- v1024i1 = 20, // 1024 x i1
-
- v1i8 = 21, // 1 x i8
- v2i8 = 22, // 2 x i8
- v4i8 = 23, // 4 x i8
- v8i8 = 24, // 8 x i8
- v16i8 = 25, // 16 x i8
- v32i8 = 26, // 32 x i8
- v64i8 = 27, // 64 x i8
- v128i8 = 28, //128 x i8
- v256i8 = 29, //256 x i8
-
- v1i16 = 30, // 1 x i16
- v2i16 = 31, // 2 x i16
- v4i16 = 32, // 4 x i16
- v8i16 = 33, // 8 x i16
- v16i16 = 34, // 16 x i16
- v32i16 = 35, // 32 x i16
- v64i16 = 36, // 64 x i16
- v128i16 = 37, //128 x i16
-
- v1i32 = 38, // 1 x i32
- v2i32 = 39, // 2 x i32
- v4i32 = 40, // 4 x i32
- v8i32 = 41, // 8 x i32
- v16i32 = 42, // 16 x i32
- v32i32 = 43, // 32 x i32
- v64i32 = 44, // 64 x i32
-
- v1i64 = 45, // 1 x i64
- v2i64 = 46, // 2 x i64
- v4i64 = 47, // 4 x i64
- v8i64 = 48, // 8 x i64
- v16i64 = 49, // 16 x i64
- v32i64 = 50, // 32 x i64
-
- v1i128 = 51, // 1 x i128
+ v2i1 = 14, // 2 x i1
+ v4i1 = 15, // 4 x i1
+ v8i1 = 16, // 8 x i1
+ v16i1 = 17, // 16 x i1
+ v32i1 = 18, // 32 x i1
+ v64i1 = 19, // 64 x i1
+ v512i1 = 20, // 512 x i1
+ v1024i1 = 21, // 1024 x i1
+
+ v1i8 = 22, // 1 x i8
+ v2i8 = 23, // 2 x i8
+ v4i8 = 24, // 4 x i8
+ v8i8 = 25, // 8 x i8
+ v16i8 = 26, // 16 x i8
+ v32i8 = 27, // 32 x i8
+ v64i8 = 28, // 64 x i8
+ v128i8 = 29, //128 x i8
+ v256i8 = 30, //256 x i8
+
+ v1i16 = 31, // 1 x i16
+ v2i16 = 32, // 2 x i16
+ v4i16 = 33, // 4 x i16
+ v8i16 = 34, // 8 x i16
+ v16i16 = 35, // 16 x i16
+ v32i16 = 36, // 32 x i16
+ v64i16 = 37, // 64 x i16
+ v128i16 = 38, //128 x i16
+
+ v1i32 = 39, // 1 x i32
+ v2i32 = 40, // 2 x i32
+ v4i32 = 41, // 4 x i32
+ v8i32 = 42, // 8 x i32
+ v16i32 = 43, // 16 x i32
+ v32i32 = 44, // 32 x i32
+ v64i32 = 45, // 64 x i32
+
+ v1i64 = 46, // 1 x i64
+ v2i64 = 47, // 2 x i64
+ v4i64 = 48, // 4 x i64
+ v8i64 = 49, // 8 x i64
+ v16i64 = 50, // 16 x i64
+ v32i64 = 51, // 32 x i64
+
+ v1i128 = 52, // 1 x i128
+
+ // Scalable integer types
+ nxv2i1 = 53, // n x 2 x i1
+ nxv4i1 = 54, // n x 4 x i1
+ nxv8i1 = 55, // n x 8 x i1
+ nxv16i1 = 56, // n x 16 x i1
+ nxv32i1 = 57, // n x 32 x i1
+
+ nxv1i8 = 58, // n x 1 x i8
+ nxv2i8 = 59, // n x 2 x i8
+ nxv4i8 = 60, // n x 4 x i8
+ nxv8i8 = 61, // n x 8 x i8
+ nxv16i8 = 62, // n x 16 x i8
+ nxv32i8 = 63, // n x 32 x i8
+
+ nxv1i16 = 64, // n x 1 x i16
+ nxv2i16 = 65, // n x 2 x i16
+ nxv4i16 = 66, // n x 4 x i16
+ nxv8i16 = 67, // n x 8 x i16
+ nxv16i16 = 68, // n x 16 x i16
+ nxv32i16 = 69, // n x 32 x i16
+
+ nxv1i32 = 70, // n x 1 x i32
+ nxv2i32 = 71, // n x 2 x i32
+ nxv4i32 = 72, // n x 4 x i32
+ nxv8i32 = 73, // n x 8 x i32
+ nxv16i32 = 74, // n x 16 x i32
+ nxv32i32 = 75, // n x 32 x i32
+
+ nxv1i64 = 76, // n x 1 x i64
+ nxv2i64 = 77, // n x 2 x i64
+ nxv4i64 = 78, // n x 4 x i64
+ nxv8i64 = 79, // n x 8 x i64
+ nxv16i64 = 80, // n x 16 x i64
+ nxv32i64 = 81, // n x 32 x i64
FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
- LAST_INTEGER_VECTOR_VALUETYPE = v1i128,
-
- v2f16 = 52, // 2 x f16
- v4f16 = 53, // 4 x f16
- v8f16 = 54, // 8 x f16
- v1f32 = 55, // 1 x f32
- v2f32 = 56, // 2 x f32
- v4f32 = 57, // 4 x f32
- v8f32 = 58, // 8 x f32
- v16f32 = 59, // 16 x f32
- v1f64 = 60, // 1 x f64
- v2f64 = 61, // 2 x f64
- v4f64 = 62, // 4 x f64
- v8f64 = 63, // 8 x f64
+ LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64,
+
+ FIRST_INTEGER_SCALABLE_VALUETYPE = nxv2i1,
+ LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64,
+
+ v2f16 = 82, // 2 x f16
+ v4f16 = 83, // 4 x f16
+ v8f16 = 84, // 8 x f16
+ v1f32 = 85, // 1 x f32
+ v2f32 = 86, // 2 x f32
+ v4f32 = 87, // 4 x f32
+ v8f32 = 88, // 8 x f32
+ v16f32 = 89, // 16 x f32
+ v1f64 = 90, // 1 x f64
+ v2f64 = 91, // 2 x f64
+ v4f64 = 92, // 4 x f64
+ v8f64 = 93, // 8 x f64
+
+ nxv2f16 = 94, // n x 2 x f16
+ nxv4f16 = 95, // n x 4 x f16
+ nxv8f16 = 96, // n x 8 x f16
+ nxv1f32 = 97, // n x 1 x f32
+ nxv2f32 = 98, // n x 2 x f32
+ nxv4f32 = 99, // n x 4 x f32
+ nxv8f32 = 100, // n x 8 x f32
+ nxv16f32 = 101, // n x 16 x f32
+ nxv1f64 = 102, // n x 1 x f64
+ nxv2f64 = 103, // n x 2 x f64
+ nxv4f64 = 104, // n x 4 x f64
+ nxv8f64 = 105, // n x 8 x f64
FIRST_FP_VECTOR_VALUETYPE = v2f16,
- LAST_FP_VECTOR_VALUETYPE = v8f64,
+ LAST_FP_VECTOR_VALUETYPE = nxv8f64,
+
+ FIRST_FP_SCALABLE_VALUETYPE = nxv2f16,
+ LAST_FP_SCALABLE_VALUETYPE = nxv8f64,
FIRST_VECTOR_VALUETYPE = v2i1,
- LAST_VECTOR_VALUETYPE = v8f64,
+ LAST_VECTOR_VALUETYPE = nxv8f64,
- x86mmx = 64, // This is an X86 MMX value
+ x86mmx = 106, // This is an X86 MMX value
- Glue = 65, // This glues nodes together during pre-RA sched
+ Glue = 107, // This glues nodes together during pre-RA sched
- isVoid = 66, // This has no value
+ isVoid = 108, // This has no value
- Untyped = 67, // This value takes a register, but has
- // unspecified type. The register class
- // will be determined by the opcode.
+ Untyped = 109, // This value takes a register, but has
+ // unspecified type. The register class
+ // will be determined by the opcode.
- FIRST_VALUETYPE = 0, // This is always the beginning of the list.
- LAST_VALUETYPE = 68, // This always remains at the end of the list.
+ FIRST_VALUETYPE = 1, // This is always the beginning of the list.
+ LAST_VALUETYPE = 110, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
// This value must be a multiple of 32.
- MAX_ALLOWED_VALUETYPE = 96,
+ MAX_ALLOWED_VALUETYPE = 128,
// A value of type llvm::TokenTy
- token = 120,
+ token = 248,
// This is MDNode or MDString.
- Metadata = 121,
+ Metadata = 249,
// An int value the size of the pointer of the current
// target to any address space. This must only be used internal to
// tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
- iPTRAny = 122,
+ iPTRAny = 250,
// A vector with any length and element size. This is used
// for intrinsics that have overloadings based on vector types.
// This is only for tblgen's consumption!
- vAny = 123,
+ vAny = 251,
// Any floating-point or vector floating-point value. This is used
// for intrinsics that have overloadings based on floating-point types.
// This is only for tblgen's consumption!
- fAny = 124,
+ fAny = 252,
// An integer or vector integer value of any bit width. This is
// used for intrinsics that have overloadings based on integer bit widths.
// This is only for tblgen's consumption!
- iAny = 125,
+ iAny = 253,
// An int value the size of the pointer of the current
// target. This should only be used internal to tblgen!
- iPTR = 126,
+ iPTR = 254,
// Any type. This is used for intrinsics that have overloadings.
// This is only for tblgen's consumption!
- Any = 127
+ Any = 255
};
SimpleValueType SimpleTy;
+
+ // A class to represent the number of elements in a vector
+ //
+ // For fixed-length vectors, the total number of elements is equal to 'Min'
+ // For scalable vectors, the total number of elements is a multiple of 'Min'
+ class ElementCount {
+ public:
+ unsigned Min;
+ bool Scalable;
+
+ ElementCount(unsigned Min, bool Scalable)
+ : Min(Min), Scalable(Scalable) {}
+
+ ElementCount operator*(unsigned RHS) {
+ return { Min * RHS, Scalable };
+ }
+
+ ElementCount& operator*=(unsigned RHS) {
+ Min *= RHS;
+ return *this;
+ }
+
+ ElementCount operator/(unsigned RHS) {
+ return { Min / RHS, Scalable };
+ }
+
+ ElementCount& operator/=(unsigned RHS) {
+ Min /= RHS;
+ return *this;
+ }
+
+ bool operator==(const ElementCount& RHS) {
+ return Min == RHS.Min && Scalable == RHS.Scalable;
+ }
+ };
+
constexpr MVT() : SimpleTy(INVALID_SIMPLE_VALUE_TYPE) {}
constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}
@@ -221,6 +312,15 @@ class MVT {
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
+ /// Return true if this is a vector value type where the
+ /// runtime length is machine dependent
+ bool isScalableVector() const {
+ return ((SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VALUETYPE &&
+ SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VALUETYPE) ||
+ (SimpleTy >= MVT::FIRST_FP_SCALABLE_VALUETYPE &&
+ SimpleTy <= MVT::LAST_FP_SCALABLE_VALUETYPE));
+ }
+
/// Return true if this is a 16-bit vector type.
bool is16BitVector() const {
return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
@@ -318,7 +418,12 @@ class MVT {
case v32i1:
case v64i1:
case v512i1:
- case v1024i1: return i1;
+ case v1024i1:
+ case nxv2i1:
+ case nxv4i1:
+ case nxv8i1:
+ case nxv16i1:
+ case nxv32i1: return i1;
case v1i8:
case v2i8:
case v4i8:
@@ -327,7 +432,13 @@ class MVT {
case v32i8:
case v64i8:
case v128i8:
- case v256i8: return i8;
+ case v256i8:
+ case nxv1i8:
+ case nxv2i8:
+ case nxv4i8:
+ case nxv8i8:
+ case nxv16i8:
+ case nxv32i8: return i8;
case v1i16:
case v2i16:
case v4i16:
@@ -335,33 +446,63 @@ class MVT {
case v16i16:
case v32i16:
case v64i16:
- case v128i16: return i16;
+ case v128i16:
+ case nxv1i16:
+ case nxv2i16:
+ case nxv4i16:
+ case nxv8i16:
+ case nxv16i16:
+ case nxv32i16: return i16;
case v1i32:
case v2i32:
case v4i32:
case v8i32:
case v16i32:
case v32i32:
- case v64i32: return i32;
+ case v64i32:
+ case nxv1i32:
+ case nxv2i32:
+ case nxv4i32:
+ case nxv8i32:
+ case nxv16i32:
+ case nxv32i32: return i32;
case v1i64:
case v2i64:
case v4i64:
case v8i64:
case v16i64:
- case v32i64: return i64;
+ case v32i64:
+ case nxv1i64:
+ case nxv2i64:
+ case nxv4i64:
+ case nxv8i64:
+ case nxv16i64:
+ case nxv32i64: return i64;
case v1i128: return i128;
case v2f16:
case v4f16:
- case v8f16: return f16;
+ case v8f16:
+ case nxv2f16:
+ case nxv4f16:
+ case nxv8f16: return f16;
case v1f32:
case v2f32:
case v4f32:
case v8f32:
- case v16f32: return f32;
+ case v16f32:
+ case nxv1f32:
+ case nxv2f32:
+ case nxv4f32:
+ case nxv8f32:
+ case nxv16f32: return f32;
case v1f64:
case v2f64:
case v4f64:
- case v8f64: return f64;
+ case v8f64:
+ case nxv1f64:
+ case nxv2f64:
+ case nxv4f64:
+ case nxv8f64: return f64;
}
}
@@ -382,13 +523,24 @@ class MVT {
case v32i8:
case v32i16:
case v32i32:
- case v32i64: return 32;
+ case v32i64:
+ case nxv32i1:
+ case nxv32i8:
+ case nxv32i16:
+ case nxv32i32:
+ case nxv32i64: return 32;
case v16i1:
case v16i8:
case v16i16:
case v16i32:
case v16i64:
- case v16f32: return 16;
+ case v16f32:
+ case nxv16i1:
+ case nxv16i8:
+ case nxv16i16:
+ case nxv16i32:
+ case nxv16i64:
+ case nxv16f32: return 16;
case v8i1:
case v8i8:
case v8i16:
@@ -396,7 +548,15 @@ class MVT {
case v8i64:
case v8f16:
case v8f32:
- case v8f64: return 8;
+ case v8f64:
+ case nxv8i1:
+ case nxv8i8:
+ case nxv8i16:
+ case nxv8i32:
+ case nxv8i64:
+ case nxv8f16:
+ case nxv8f32:
+ case nxv8f64: return 8;
case v4i1:
case v4i8:
case v4i16:
@@ -404,7 +564,15 @@ class MVT {
case v4i64:
case v4f16:
case v4f32:
- case v4f64: return 4;
+ case v4f64:
+ case nxv4i1:
+ case nxv4i8:
+ case nxv4i16:
+ case nxv4i32:
+ case nxv4i64:
+ case nxv4f16:
+ case nxv4f32:
+ case nxv4f64: return 4;
case v2i1:
case v2i8:
case v2i16:
@@ -412,17 +580,35 @@ class MVT {
case v2i64:
case v2f16:
case v2f32:
- case v2f64: return 2;
+ case v2f64:
+ case nxv2i1:
+ case nxv2i8:
+ case nxv2i16:
+ case nxv2i32:
+ case nxv2i64:
+ case nxv2f16:
+ case nxv2f32:
+ case nxv2f64: return 2;
case v1i8:
case v1i16:
case v1i32:
case v1i64:
case v1i128:
case v1f32:
- case v1f64: return 1;
+ case v1f64:
+ case nxv1i8:
+ case nxv1i16:
+ case nxv1i32:
+ case nxv1i64:
+ case nxv1f32:
+ case nxv1f64: return 1;
}
}
+ MVT::ElementCount getVectorElementCount() const {
+ return { getVectorNumElements(), isScalableVector() };
+ }
+
unsigned getSizeInBits() const {
switch (SimpleTy) {
default:
@@ -443,16 +629,23 @@ class MVT {
case Metadata:
llvm_unreachable("Value type is metadata.");
case i1 : return 1;
- case v2i1: return 2;
- case v4i1: return 4;
+ case v2i1:
+ case nxv2i1: return 2;
+ case v4i1:
+ case nxv4i1: return 4;
case i8 :
case v1i8:
- case v8i1: return 8;
+ case v8i1:
+ case nxv1i8:
+ case nxv8i1: return 8;
case i16 :
case f16:
case v16i1:
case v2i8:
- case v1i16: return 16;
+ case v1i16:
+ case nxv16i1:
+ case nxv2i8:
+ case nxv1i16: return 16;
case f32 :
case i32 :
case v32i1:
@@ -460,7 +653,13 @@ class MVT {
case v2i16:
case v2f16:
case v1f32:
- case v1i32: return 32;
+ case v1i32:
+ case nxv32i1:
+ case nxv4i8:
+ case nxv2i16:
+ case nxv1i32:
+ case nxv2f16:
+ case nxv1f32: return 32;
case x86mmx:
case f64 :
case i64 :
@@ -471,7 +670,14 @@ class MVT {
case v1i64:
case v4f16:
case v2f32:
- case v1f64: return 64;
+ case v1f64:
+ case nxv8i8:
+ case nxv4i16:
+ case nxv2i32:
+ case nxv1i64:
+ case nxv4f16:
+ case nxv2f32:
+ case nxv1f64: return 64;
case f80 : return 80;
case f128:
case ppcf128:
@@ -483,29 +689,50 @@ class MVT {
case v1i128:
case v8f16:
case v4f32:
- case v2f64: return 128;
+ case v2f64:
+ case nxv16i8:
+ case nxv8i16:
+ case nxv4i32:
+ case nxv2i64:
+ case nxv8f16:
+ case nxv4f32:
+ case nxv2f64: return 128;
case v32i8:
case v16i16:
case v8i32:
case v4i64:
case v8f32:
- case v4f64: return 256;
+ case v4f64:
+ case nxv32i8:
+ case nxv16i16:
+ case nxv8i32:
+ case nxv4i64:
+ case nxv8f32:
+ case nxv4f64: return 256;
case v512i1:
case v64i8:
case v32i16:
case v16i32:
case v8i64:
case v16f32:
- case v8f64: return 512;
+ case v8f64:
+ case nxv32i16:
+ case nxv16i32:
+ case nxv8i64:
+ case nxv16f32:
+ case nxv8f64: return 512;
case v1024i1:
case v128i8:
case v64i16:
case v32i32:
- case v16i64: return 1024;
+ case v16i64:
+ case nxv32i32:
+ case nxv16i64: return 1024;
case v256i8:
case v128i16:
case v64i32:
- case v32i64: return 2048;
+ case v32i64:
+ case nxv32i64: return 2048;
}
}
@@ -659,6 +886,83 @@ class MVT {
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
}
+ static MVT getScalableVectorVT(MVT VT, unsigned NumElements) {
+ switch(VT.SimpleTy) {
+ default:
+ break;
+ case MVT::i1:
+ if (NumElements == 2) return MVT::nxv2i1;
+ if (NumElements == 4) return MVT::nxv4i1;
+ if (NumElements == 8) return MVT::nxv8i1;
+ if (NumElements == 16) return MVT::nxv16i1;
+ if (NumElements == 32) return MVT::nxv32i1;
+ break;
+ case MVT::i8:
+ if (NumElements == 1) return MVT::nxv1i8;
+ if (NumElements == 2) return MVT::nxv2i8;
+ if (NumElements == 4) return MVT::nxv4i8;
+ if (NumElements == 8) return MVT::nxv8i8;
+ if (NumElements == 16) return MVT::nxv16i8;
+ if (NumElements == 32) return MVT::nxv32i8;
+ break;
+ case MVT::i16:
+ if (NumElements == 1) return MVT::nxv1i16;
+ if (NumElements == 2) return MVT::nxv2i16;
+ if (NumElements == 4) return MVT::nxv4i16;
+ if (NumElements == 8) return MVT::nxv8i16;
+ if (NumElements == 16) return MVT::nxv16i16;
+ if (NumElements == 32) return MVT::nxv32i16;
+ break;
+ case MVT::i32:
+ if (NumElements == 1) return MVT::nxv1i32;
+ if (NumElements == 2) return MVT::nxv2i32;
+ if (NumElements == 4) return MVT::nxv4i32;
+ if (NumElements == 8) return MVT::nxv8i32;
+ if (NumElements == 16) return MVT::nxv16i32;
+ if (NumElements == 32) return MVT::nxv32i32;
+ break;
+ case MVT::i64:
+ if (NumElements == 1) return MVT::nxv1i64;
+ if (NumElements == 2) return MVT::nxv2i64;
+ if (NumElements == 4) return MVT::nxv4i64;
+ if (NumElements == 8) return MVT::nxv8i64;
+ if (NumElements == 16) return MVT::nxv16i64;
+ if (NumElements == 32) return MVT::nxv32i64;
+ break;
+ case MVT::f16:
+ if (NumElements == 2) return MVT::nxv2f16;
+ if (NumElements == 4) return MVT::nxv4f16;
+ if (NumElements == 8) return MVT::nxv8f16;
+ break;
+ case MVT::f32:
+ if (NumElements == 1) return MVT::nxv1f32;
+ if (NumElements == 2) return MVT::nxv2f32;
+ if (NumElements == 4) return MVT::nxv4f32;
+ if (NumElements == 8) return MVT::nxv8f32;
+ if (NumElements == 16) return MVT::nxv16f32;
+ break;
+ case MVT::f64:
+ if (NumElements == 1) return MVT::nxv1f64;
+ if (NumElements == 2) return MVT::nxv2f64;
+ if (NumElements == 4) return MVT::nxv4f64;
+ if (NumElements == 8) return MVT::nxv8f64;
+ break;
+ }
+ return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+ }
+
+ static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) {
+ if (IsScalable)
+ return getScalableVectorVT(VT, NumElements);
+ return getVectorVT(VT, NumElements);
+ }
+
+ static MVT getVectorVT(MVT VT, MVT::ElementCount EC) {
+ if (EC.Scalable)
+ return getScalableVectorVT(VT, EC.Min);
+ return getVectorVT(VT, EC.Min);
+ }
+
/// Return the value type corresponding to the specified type. This returns
/// all pointers as iPTR. If HandleUnknown is true, unknown types are
/// returned as Other, otherwise they are invalid.
@@ -709,6 +1013,14 @@ class MVT {
MVT::FIRST_FP_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1));
}
+ static mvt_range integer_scalable_vector_valuetypes() {
+ return mvt_range(MVT::FIRST_INTEGER_SCALABLE_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VALUETYPE + 1));
+ }
+ static mvt_range fp_scalable_vector_valuetypes() {
+ return mvt_range(MVT::FIRST_FP_SCALABLE_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_FP_SCALABLE_VALUETYPE + 1));
+ }
/// @}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
index 0a3063663cef..b404b4ca701f 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -44,7 +44,7 @@ namespace llvm {
bool operator!=(EVT VT) const {
if (V.SimpleTy != VT.V.SimpleTy)
return true;
- if (V.SimpleTy < 0)
+ if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
return LLVMTy != VT.LLVMTy;
return false;
}
@@ -60,31 +60,48 @@ namespace llvm {
/// bits.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
MVT M = MVT::getIntegerVT(BitWidth);
- if (M.SimpleTy >= 0)
+ if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
return M;
return getExtendedIntegerVT(Context, BitWidth);
}
/// Returns the EVT that represents a vector NumElements in length, where
/// each element is of type VT.
- static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements) {
- MVT M = MVT::getVectorVT(VT.V, NumElements);
- if (M.SimpleTy >= 0)
+ static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
+ bool IsScalable = false) {
+ MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
+ if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
return M;
+
+ assert(!IsScalable && "We don't support extended scalable types yet");
return getExtendedVectorVT(Context, VT, NumElements);
}
+ /// Returns the EVT that represents a vector EC.Min elements in length,
+ /// where each element is of type VT.
+ static EVT getVectorVT(LLVMContext &Context, EVT VT, MVT::ElementCount EC) {
+ MVT M = MVT::getVectorVT(VT.V, EC);
+ if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+ return M;
+ assert (!EC.Scalable && "We don't support extended scalable types yet");
+ return getExtendedVectorVT(Context, VT, EC.Min);
+ }
+
/// Return a vector with the same number of elements as this vector, but
/// with the element type converted to an integer type with the same
/// bitwidth.
EVT changeVectorElementTypeToInteger() const {
- if (!isSimple())
+ if (!isSimple()) {
+ assert (!isScalableVector() &&
+ "We don't support extended scalable types yet");
return changeExtendedVectorElementTypeToInteger();
+ }
MVT EltTy = getSimpleVT().getVectorElementType();
unsigned BitWidth = EltTy.getSizeInBits();
MVT IntTy = MVT::getIntegerVT(BitWidth);
- MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements());
- assert(VecTy.SimpleTy >= 0 &&
+ MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements(),
+ isScalableVector());
+ assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
"Simple vector VT not representable by simple integer vector VT!");
return VecTy;
}
@@ -104,7 +121,7 @@ namespace llvm {
/// Test if the given EVT is simple (as opposed to being extended).
bool isSimple() const {
- return V.SimpleTy >= 0;
+ return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE;
}
/// Test if the given EVT is extended (as opposed to being simple).
@@ -132,6 +149,17 @@ namespace llvm {
return isSimple() ? V.isVector() : isExtendedVector();
}
+ /// Return true if this is a vector type where the runtime
+ /// length is machine dependent
+ bool isScalableVector() const {
+ // FIXME: We don't support extended scalable types yet, because the
+ // matching IR type doesn't exist. Once it has been added, this can
+ // be changed to call isExtendedScalableVector.
+ if (!isSimple())
+ return false;
+ return V.isScalableVector();
+ }
+
/// Return true if this is a 16-bit vector type.
bool is16BitVector() const {
return isSimple() ? V.is16BitVector() : isExtended16BitVector();
@@ -247,6 +275,17 @@ namespace llvm {
return getExtendedVectorNumElements();
}
+ // Given a (possibly scalable) vector type, return the ElementCount
+ MVT::ElementCount getVectorElementCount() const {
+ assert((isVector()) && "Invalid vector type!");
+ if (isSimple())
+ return V.getVectorElementCount();
+
+ assert(!isScalableVector() &&
+ "We don't support extended scalable types yet");
+ return {getExtendedVectorNumElements(), false};
+ }
+
/// Return the size of the specified value type in bits.
unsigned getSizeInBits() const {
if (isSimple())
@@ -301,7 +340,17 @@ namespace llvm {
EVT widenIntegerVectorElementType(LLVMContext &Context) const {
EVT EltVT = getVectorElementType();
EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
- return EVT::getVectorVT(Context, EltVT, getVectorNumElements());
+ return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
+ }
+
+ // Return a VT for a vector type with the same element type but
+ // half the number of elements. The type returned may be an
+ // extended type.
+ EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
+ EVT EltVT = getVectorElementType();
+ auto EltCnt = getVectorElementCount();
+ assert(!(EltCnt.Min & 1) && "Splitting vector, but not in half!");
+ return EVT::getVectorVT(Context, EltVT, EltCnt / 2);
}
/// Returns true if the given vector is a power of 2.
@@ -316,7 +365,8 @@ namespace llvm {
if (!isPow2VectorType()) {
unsigned NElts = getVectorNumElements();
unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
- return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts);
+ return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts,
+ isScalableVector());
}
else {
return *this;
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
index f7b1661d7451..cd8434475451 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -19,101 +19,147 @@ class ValueType<int size, int value> {
int Value = value;
}
-def OtherVT: ValueType<0 , 0>; // "Other" value
-def i1 : ValueType<1 , 1>; // One bit boolean value
-def i8 : ValueType<8 , 2>; // 8-bit integer value
-def i16 : ValueType<16 , 3>; // 16-bit integer value
-def i32 : ValueType<32 , 4>; // 32-bit integer value
-def i64 : ValueType<64 , 5>; // 64-bit integer value
-def i128 : ValueType<128, 6>; // 128-bit integer value
-def f16 : ValueType<16 , 7>; // 16-bit floating point value
-def f32 : ValueType<32 , 8>; // 32-bit floating point value
-def f64 : ValueType<64 , 9>; // 64-bit floating point value
-def f80 : ValueType<80 , 10>; // 80-bit floating point value
-def f128 : ValueType<128, 11>; // 128-bit floating point value
-def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value
-
-def v2i1 : ValueType<2 , 13>; // 2 x i1 vector value
-def v4i1 : ValueType<4 , 14>; // 4 x i1 vector value
-def v8i1 : ValueType<8 , 15>; // 8 x i1 vector value
-def v16i1 : ValueType<16, 16>; // 16 x i1 vector value
-def v32i1 : ValueType<32 , 17>; // 32 x i1 vector value
-def v64i1 : ValueType<64 , 18>; // 64 x i1 vector value
-def v512i1 : ValueType<512, 19>; // 512 x i1 vector value
-def v1024i1: ValueType<1024,20>; //1024 x i1 vector value
-
-def v1i8 : ValueType<16, 21>; // 1 x i8 vector value
-def v2i8 : ValueType<16 , 22>; // 2 x i8 vector value
-def v4i8 : ValueType<32 , 23>; // 4 x i8 vector value
-def v8i8 : ValueType<64 , 24>; // 8 x i8 vector value
-def v16i8 : ValueType<128, 25>; // 16 x i8 vector value
-def v32i8 : ValueType<256, 26>; // 32 x i8 vector value
-def v64i8 : ValueType<512, 27>; // 64 x i8 vector value
-def v128i8 : ValueType<1024,28>; //128 x i8 vector value
-def v256i8 : ValueType<2048,29>; //256 x i8 vector value
-
-def v1i16 : ValueType<16 , 30>; // 1 x i16 vector value
-def v2i16 : ValueType<32 , 31>; // 2 x i16 vector value
-def v4i16 : ValueType<64 , 32>; // 4 x i16 vector value
-def v8i16 : ValueType<128, 33>; // 8 x i16 vector value
-def v16i16 : ValueType<256, 34>; // 16 x i16 vector value
-def v32i16 : ValueType<512, 35>; // 32 x i16 vector value
-def v64i16 : ValueType<1024,36>; // 64 x i16 vector value
-def v128i16: ValueType<2048,37>; //128 x i16 vector value
-
-def v1i32 : ValueType<32 , 38>; // 1 x i32 vector value
-def v2i32 : ValueType<64 , 39>; // 2 x i32 vector value
-def v4i32 : ValueType<128, 40>; // 4 x i32 vector value
-def v8i32 : ValueType<256, 41>; // 8 x i32 vector value
-def v16i32 : ValueType<512, 42>; // 16 x i32 vector value
-def v32i32 : ValueType<1024,43>; // 32 x i32 vector value
-def v64i32 : ValueType<2048,44>; // 32 x i32 vector value
-
-def v1i64 : ValueType<64 , 45>; // 1 x i64 vector value
-def v2i64 : ValueType<128, 46>; // 2 x i64 vector value
-def v4i64 : ValueType<256, 47>; // 4 x i64 vector value
-def v8i64 : ValueType<512, 48>; // 8 x i64 vector value
-def v16i64 : ValueType<1024,49>; // 16 x i64 vector value
-def v32i64 : ValueType<2048,50>; // 32 x i64 vector value
-
-def v1i128 : ValueType<128, 51>; // 1 x i128 vector value
-
-def v2f16 : ValueType<32 , 52>; // 2 x f16 vector value
-def v4f16 : ValueType<64 , 53>; // 4 x f16 vector value
-def v8f16 : ValueType<128, 54>; // 8 x f16 vector value
-def v1f32 : ValueType<32 , 55>; // 1 x f32 vector value
-def v2f32 : ValueType<64 , 56>; // 2 x f32 vector value
-def v4f32 : ValueType<128, 57>; // 4 x f32 vector value
-def v8f32 : ValueType<256, 58>; // 8 x f32 vector value
-def v16f32 : ValueType<512, 59>; // 16 x f32 vector value
-def v1f64 : ValueType<64, 60>; // 1 x f64 vector value
-def v2f64 : ValueType<128, 61>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 62>; // 4 x f64 vector value
-def v8f64 : ValueType<512, 63>; // 8 x f64 vector value
-
-
-def x86mmx : ValueType<64 , 64>; // X86 MMX value
-def FlagVT : ValueType<0 , 65>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 66>; // Produces no value
-def untyped: ValueType<8 , 67>; // Produces an untyped value
-def token : ValueType<0 , 120>; // TokenTy
-def MetadataVT: ValueType<0, 121>; // Metadata
+def OtherVT: ValueType<0 , 1>; // "Other" value
+def i1 : ValueType<1 , 2>; // One bit boolean value
+def i8 : ValueType<8 , 3>; // 8-bit integer value
+def i16 : ValueType<16 , 4>; // 16-bit integer value
+def i32 : ValueType<32 , 5>; // 32-bit integer value
+def i64 : ValueType<64 , 6>; // 64-bit integer value
+def i128 : ValueType<128, 7>; // 128-bit integer value
+def f16 : ValueType<16 , 8>; // 16-bit floating point value
+def f32 : ValueType<32 , 9>; // 32-bit floating point value
+def f64 : ValueType<64 , 10>; // 64-bit floating point value
+def f80 : ValueType<80 , 11>; // 80-bit floating point value
+def f128 : ValueType<128, 12>; // 128-bit floating point value
+def ppcf128: ValueType<128, 13>; // PPC 128-bit floating point value
+
+def v2i1 : ValueType<2 , 14>; // 2 x i1 vector value
+def v4i1 : ValueType<4 , 15>; // 4 x i1 vector value
+def v8i1 : ValueType<8 , 16>; // 8 x i1 vector value
+def v16i1 : ValueType<16, 17>; // 16 x i1 vector value
+def v32i1 : ValueType<32 , 18>; // 32 x i1 vector value
+def v64i1 : ValueType<64 , 19>; // 64 x i1 vector value
+def v512i1 : ValueType<512, 20>; // 512 x i1 vector value
+def v1024i1: ValueType<1024,21>; //1024 x i1 vector value
+
+def v1i8 : ValueType<16, 22>; // 1 x i8 vector value
+def v2i8 : ValueType<16 , 23>; // 2 x i8 vector value
+def v4i8 : ValueType<32 , 24>; // 4 x i8 vector value
+def v8i8 : ValueType<64 , 25>; // 8 x i8 vector value
+def v16i8 : ValueType<128, 26>; // 16 x i8 vector value
+def v32i8 : ValueType<256, 27>; // 32 x i8 vector value
+def v64i8 : ValueType<512, 28>; // 64 x i8 vector value
+def v128i8 : ValueType<1024,29>; //128 x i8 vector value
+def v256i8 : ValueType<2048,30>; //256 x i8 vector value
+
+def v1i16 : ValueType<16 , 31>; // 1 x i16 vector value
+def v2i16 : ValueType<32 , 32>; // 2 x i16 vector value
+def v4i16 : ValueType<64 , 33>; // 4 x i16 vector value
+def v8i16 : ValueType<128, 34>; // 8 x i16 vector value
+def v16i16 : ValueType<256, 35>; // 16 x i16 vector value
+def v32i16 : ValueType<512, 36>; // 32 x i16 vector value
+def v64i16 : ValueType<1024,37>; // 64 x i16 vector value
+def v128i16: ValueType<2048,38>; //128 x i16 vector value
+
+def v1i32 : ValueType<32 , 39>; // 1 x i32 vector value
+def v2i32 : ValueType<64 , 40>; // 2 x i32 vector value
+def v4i32 : ValueType<128, 41>; // 4 x i32 vector value
+def v8i32 : ValueType<256, 42>; // 8 x i32 vector value
+def v16i32 : ValueType<512, 43>; // 16 x i32 vector value
+def v32i32 : ValueType<1024,44>; // 32 x i32 vector value
+def v64i32 : ValueType<2048,45>; // 32 x i32 vector value
+
+def v1i64 : ValueType<64 , 46>; // 1 x i64 vector value
+def v2i64 : ValueType<128, 47>; // 2 x i64 vector value
+def v4i64 : ValueType<256, 48>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 49>; // 8 x i64 vector value
+def v16i64 : ValueType<1024,50>; // 16 x i64 vector value
+def v32i64 : ValueType<2048,51>; // 32 x i64 vector value
+
+def v1i128 : ValueType<128, 52>; // 1 x i128 vector value
+
+def nxv2i1 : ValueType<2, 53>; // n x 2 x i1 vector value
+def nxv4i1 : ValueType<4, 54>; // n x 4 x i1 vector value
+def nxv8i1 : ValueType<8, 55>; // n x 8 x i1 vector value
+def nxv16i1 : ValueType<16, 56>; // n x 16 x i1 vector value
+def nxv32i1 : ValueType<32, 57>; // n x 32 x i1 vector value
+
+def nxv1i8 : ValueType<8, 58>; // n x 1 x i8 vector value
+def nxv2i8 : ValueType<16, 59>; // n x 2 x i8 vector value
+def nxv4i8 : ValueType<32, 60>; // n x 4 x i8 vector value
+def nxv8i8 : ValueType<64, 61>; // n x 8 x i8 vector value
+def nxv16i8 : ValueType<128, 62>; // n x 16 x i8 vector value
+def nxv32i8 : ValueType<256, 63>; // n x 32 x i8 vector value
+
+def nxv1i16 : ValueType<16, 64>; // n x 1 x i16 vector value
+def nxv2i16 : ValueType<32, 65>; // n x 2 x i16 vector value
+def nxv4i16 : ValueType<64, 66>; // n x 4 x i16 vector value
+def nxv8i16 : ValueType<128, 67>; // n x 8 x i16 vector value
+def nxv16i16: ValueType<256, 68>; // n x 16 x i16 vector value
+def nxv32i16: ValueType<512, 69>; // n x 32 x i16 vector value
+
+def nxv1i32 : ValueType<32, 70>; // n x 1 x i32 vector value
+def nxv2i32 : ValueType<64, 71>; // n x 2 x i32 vector value
+def nxv4i32 : ValueType<128, 72>; // n x 4 x i32 vector value
+def nxv8i32 : ValueType<256, 73>; // n x 8 x i32 vector value
+def nxv16i32: ValueType<512, 74>; // n x 16 x i32 vector value
+def nxv32i32: ValueType<1024,75>; // n x 32 x i32 vector value
+
+def nxv1i64 : ValueType<64, 76>; // n x 1 x i64 vector value
+def nxv2i64 : ValueType<128, 77>; // n x 2 x i64 vector value
+def nxv4i64 : ValueType<256, 78>; // n x 4 x i64 vector value
+def nxv8i64 : ValueType<512, 79>; // n x 8 x i64 vector value
+def nxv16i64: ValueType<1024,80>; // n x 16 x i64 vector value
+def nxv32i64: ValueType<2048,81>; // n x 32 x i64 vector value
+
+def v2f16 : ValueType<32 , 82>; // 2 x f16 vector value
+def v4f16 : ValueType<64 , 83>; // 4 x f16 vector value
+def v8f16 : ValueType<128, 84>; // 8 x f16 vector value
+def v1f32 : ValueType<32 , 85>; // 1 x f32 vector value
+def v2f32 : ValueType<64 , 86>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 87>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 88>; // 8 x f32 vector value
+def v16f32 : ValueType<512, 89>; // 16 x f32 vector value
+def v1f64 : ValueType<64, 90>; // 1 x f64 vector value
+def v2f64 : ValueType<128, 91>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 92>; // 4 x f64 vector value
+def v8f64 : ValueType<512, 93>; // 8 x f64 vector value
+
+def nxv2f16 : ValueType<32 , 94>; // n x 2 x f16 vector value
+def nxv4f16 : ValueType<64 , 95>; // n x 4 x f16 vector value
+def nxv8f16 : ValueType<128, 96>; // n x 8 x f16 vector value
+def nxv1f32 : ValueType<32 , 97>; // n x 1 x f32 vector value
+def nxv2f32 : ValueType<64 , 98>; // n x 2 x f32 vector value
+def nxv4f32 : ValueType<128, 99>; // n x 4 x f32 vector value
+def nxv8f32 : ValueType<256, 100>; // n x 8 x f32 vector value
+def nxv16f32 : ValueType<512, 101>; // n x 16 x f32 vector value
+def nxv1f64 : ValueType<64, 102>; // n x 1 x f64 vector value
+def nxv2f64 : ValueType<128, 103>; // n x 2 x f64 vector value
+def nxv4f64 : ValueType<256, 104>; // n x 4 x f64 vector value
+def nxv8f64 : ValueType<512, 105>; // n x 8 x f64 vector value
+
+def x86mmx : ValueType<64 , 106>; // X86 MMX value
+def FlagVT : ValueType<0 , 107>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 108>; // Produces no value
+def untyped: ValueType<8 , 109>; // Produces an untyped value
+def token : ValueType<0 , 248>; // TokenTy
+def MetadataVT: ValueType<0, 249>; // Metadata
// Pseudo valuetype mapped to the current pointer size to any address space.
// Should only be used in TableGen.
-def iPTRAny : ValueType<0, 122>;
+def iPTRAny : ValueType<0, 250>;
// Pseudo valuetype to represent "vector of any size"
-def vAny : ValueType<0 , 123>;
+def vAny : ValueType<0 , 251>;
// Pseudo valuetype to represent "float of any format"
-def fAny : ValueType<0 , 124>;
+def fAny : ValueType<0 , 252>;
// Pseudo valuetype to represent "integer of any bit width"
-def iAny : ValueType<0 , 125>;
+def iAny : ValueType<0 , 253>;
// Pseudo valuetype mapped to the current pointer size.
-def iPTR : ValueType<0 , 126>;
+def iPTR : ValueType<0 , 254>;
// Pseudo valuetype to represent "any type of any size".
-def Any : ValueType<0 , 127>;
+def Any : ValueType<0 , 255>;
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
index 33e24fe3adc9..ee06125ea278 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
@@ -247,16 +247,11 @@ public:
/// DW_AT_call_line attribute in this DIE.
/// \param CallColumn filled in with non-zero if successful, zero if there is
/// no DW_AT_call_column attribute in this DIE.
+ /// \param CallDiscriminator filled in with non-zero if successful, zero if
+ /// there is no DW_AT_GNU_discriminator attribute in this DIE.
void getCallerFrame(uint32_t &CallFile, uint32_t &CallLine,
- uint32_t &CallColumn) const;
+ uint32_t &CallColumn, uint32_t &CallDiscriminator) const;
- /// Get inlined chain for a given address, rooted at the current DIE.
- /// Returns empty chain if address is not contained in address range
- /// of current DIE.
- void
- getInlinedChainForAddress(const uint64_t Address,
- SmallVectorImpl<DWARFDie> &InlinedChain) const;
-
class attribute_iterator;
/// Get an iterator range to all attributes in the current DIE only.
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index 40eb4434bd61..023a0f7b9fb2 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -31,6 +31,7 @@
#include <cstdint>
#include <memory>
#include <vector>
+#include <map>
namespace llvm {
@@ -134,6 +135,11 @@ class DWARFUnit {
uint64_t BaseAddr;
// The compile unit debug information entry items.
std::vector<DWARFDebugInfoEntry> DieArray;
+
+ // Map from range's start address to end address and corresponding DIE.
+ // IntervalMap does not support range removal, as a result, we use the
+ // std::map::upper_bound for address range lookup.
+ std::map<uint64_t, std::pair<uint64_t, DWARFDie>> AddrDieMap;
typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>
die_iterator_range;
@@ -183,6 +189,9 @@ public:
AddrOffsetSectionBase = Base;
}
+ // Recursively update address to Die map.
+ void updateAddressDieMap(DWARFDie Die);
+
void setRangesSection(StringRef RS, uint32_t Base) {
RangeSection = RS;
RangeSectionBase = Base;
@@ -339,10 +348,10 @@ private:
/// it was actually constructed.
bool parseDWO();
- /// getSubprogramForAddress - Returns subprogram DIE with address range
+ /// getSubroutineForAddress - Returns subprogram DIE with address range
/// encompassing the provided address. The pointer is alive as long as parsed
/// compile unit DIEs are not cleared.
- DWARFDie getSubprogramForAddress(uint64_t Address);
+ DWARFDie getSubroutineForAddress(uint64_t Address);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/IR/Argument.h b/contrib/llvm/include/llvm/IR/Argument.h
index 6fc1dd2f285a..5c05f19abc1f 100644
--- a/contrib/llvm/include/llvm/IR/Argument.h
+++ b/contrib/llvm/include/llvm/IR/Argument.h
@@ -108,18 +108,16 @@ public:
bool hasSExtAttr() const;
/// Add attributes to an argument.
- void addAttr(AttributeList AS);
+ void addAttrs(AttrBuilder &B);
- void addAttr(Attribute::AttrKind Kind) {
- addAttr(AttributeList::get(getContext(), getArgNo() + 1, Kind));
- }
+ void addAttr(Attribute::AttrKind Kind);
+
+ void addAttr(Attribute Attr);
/// Remove attributes from an argument.
void removeAttr(AttributeList AS);
- void removeAttr(Attribute::AttrKind Kind) {
- removeAttr(AttributeList::get(getContext(), getArgNo() + 1, Kind));
- }
+ void removeAttr(Attribute::AttrKind Kind);
/// Check if an argument has a given attribute.
bool hasAttribute(Attribute::AttrKind Kind) const;
diff --git a/contrib/llvm/include/llvm/IR/Attributes.h b/contrib/llvm/include/llvm/IR/Attributes.h
index 121f57a433ac..b13f197d25fd 100644
--- a/contrib/llvm/include/llvm/IR/Attributes.h
+++ b/contrib/llvm/include/llvm/IR/Attributes.h
@@ -357,9 +357,6 @@ public:
AttributeList Attrs) const;
AttributeList addAttributes(LLVMContext &C, unsigned Index,
- AttributeSet AS) const;
-
- AttributeList addAttributes(LLVMContext &C, unsigned Index,
const AttrBuilder &B) const;
/// \brief Remove the specified attribute at the specified index from this
diff --git a/contrib/llvm/include/llvm/IR/ConstantRange.h b/contrib/llvm/include/llvm/IR/ConstantRange.h
index 6d704666933f..47004e82cc19 100644
--- a/contrib/llvm/include/llvm/IR/ConstantRange.h
+++ b/contrib/llvm/include/llvm/IR/ConstantRange.h
@@ -41,17 +41,14 @@ namespace llvm {
class MDNode;
/// This class represents a range of values.
-///
class ConstantRange {
APInt Lower, Upper;
public:
/// Initialize a full (the default) or empty set for the specified bit width.
- ///
explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true);
/// Initialize a range to hold the single specified value.
- ///
ConstantRange(APInt Value);
/// @brief Initialize a range of values explicitly. This will assert out if
@@ -119,46 +116,36 @@ public:
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const;
/// Return the lower value for this range.
- ///
const APInt &getLower() const { return Lower; }
/// Return the upper value for this range.
- ///
const APInt &getUpper() const { return Upper; }
/// Get the bit width of this ConstantRange.
- ///
uint32_t getBitWidth() const { return Lower.getBitWidth(); }
/// Return true if this set contains all of the elements possible
/// for this data-type.
- ///
bool isFullSet() const;
/// Return true if this set contains no members.
- ///
bool isEmptySet() const;
/// Return true if this set wraps around the top of the range.
/// For example: [100, 8).
- ///
bool isWrappedSet() const;
/// Return true if this set wraps around the INT_MIN of
/// its bitwidth. For example: i8 [120, 140).
- ///
bool isSignWrappedSet() const;
/// Return true if the specified value is in the set.
- ///
bool contains(const APInt &Val) const;
/// Return true if the other range is a subset of this one.
- ///
bool contains(const ConstantRange &CR) const;
/// If this set contains a single element, return it, otherwise return null.
- ///
const APInt *getSingleElement() const {
if (Upper == Lower + 1)
return &Lower;
@@ -174,35 +161,27 @@ public:
}
/// Return true if this set contains exactly one member.
- ///
bool isSingleElement() const { return getSingleElement() != nullptr; }
/// Return the number of elements in this set.
- ///
APInt getSetSize() const;
/// Compare set size of this range with the range CR.
- ///
bool isSizeStrictlySmallerThanOf(const ConstantRange &CR) const;
/// Return the largest unsigned value contained in the ConstantRange.
- ///
APInt getUnsignedMax() const;
/// Return the smallest unsigned value contained in the ConstantRange.
- ///
APInt getUnsignedMin() const;
/// Return the largest signed value contained in the ConstantRange.
- ///
APInt getSignedMax() const;
/// Return the smallest signed value contained in the ConstantRange.
- ///
APInt getSignedMin() const;
/// Return true if this range is equal to another range.
- ///
bool operator==(const ConstantRange &CR) const {
return Lower == CR.Lower && Upper == CR.Upper;
}
@@ -213,8 +192,8 @@ public:
/// Subtract the specified constant from the endpoints of this constant range.
ConstantRange subtract(const APInt &CI) const;
- /// \brief Subtract the specified range from this range (aka relative
- /// complement of the sets).
+ /// Subtract the specified range from this range (aka relative complement of
+ /// the sets).
ConstantRange difference(const ConstantRange &CR) const;
/// Return the range that results from the intersection of
@@ -223,7 +202,6 @@ public:
/// smallest possible set size that does so. Because there may be two
/// intersections with the same set size, A.intersectWith(B) might not
/// be equal to B.intersectWith(A).
- ///
ConstantRange intersectWith(const ConstantRange &CR) const;
/// Return the range that results from the union of this range
@@ -231,7 +209,6 @@ public:
/// elements of both sets, but may contain more. For example, [3, 9) union
/// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included
/// in either set before.
- ///
ConstantRange unionWith(const ConstantRange &CR) const;
/// Return a new range representing the possible values resulting
@@ -331,15 +308,12 @@ public:
ConstantRange lshr(const ConstantRange &Other) const;
/// Return a new range that is the logical not of the current set.
- ///
ConstantRange inverse() const;
/// Print out the bounds to a stream.
- ///
void print(raw_ostream &OS) const;
/// Allow printing from a debugger easily.
- ///
void dump() const;
};
diff --git a/contrib/llvm/include/llvm/IR/DIBuilder.h b/contrib/llvm/include/llvm/IR/DIBuilder.h
index 69bd5c847a8d..a4b2a02d5050 100644
--- a/contrib/llvm/include/llvm/IR/DIBuilder.h
+++ b/contrib/llvm/include/llvm/IR/DIBuilder.h
@@ -778,6 +778,9 @@ namespace llvm {
}
};
+ // Create wrappers for C Binding types (see CBindingWrapping.h).
+ DEFINE_ISA_CONVERSION_FUNCTIONS(DIBuilder, LLVMDIBuilderRef)
+
} // end namespace llvm
#endif // LLVM_IR_DIBUILDER_H
diff --git a/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h b/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
index 8a924b40143a..8041e35e0e0a 100644
--- a/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -2232,6 +2232,9 @@ public:
expr_op_iterator expr_op_end() const {
return expr_op_iterator(elements_end());
}
+ iterator_range<expr_op_iterator> expr_ops() const {
+ return {expr_op_begin(), expr_op_end()};
+ }
/// @}
bool isValid() const;
@@ -2240,7 +2243,7 @@ public:
return MD->getMetadataID() == DIExpressionKind;
}
- /// Is the first element a DW_OP_deref?.
+ /// Return whether the first element a DW_OP_deref.
bool startsWithDeref() const {
return getNumElements() > 0 && getElement(0) == dwarf::DW_OP_deref;
}
diff --git a/contrib/llvm/include/llvm/IR/Instructions.h b/contrib/llvm/include/llvm/IR/Instructions.h
index 34dafebe0fc5..d23c1ddf9257 100644
--- a/contrib/llvm/include/llvm/IR/Instructions.h
+++ b/contrib/llvm/include/llvm/IR/Instructions.h
@@ -273,10 +273,11 @@ public:
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
+ Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return getPointerOperand()->getType()->getPointerAddressSpace();
+ return getPointerOperandType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -397,10 +398,11 @@ public:
Value *getPointerOperand() { return getOperand(1); }
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
+ Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return getPointerOperand()->getType()->getPointerAddressSpace();
+ return getPointerOperandType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
diff --git a/contrib/llvm/include/llvm/IR/Metadata.h b/contrib/llvm/include/llvm/IR/Metadata.h
index fd79355bff1a..8f24a6a1d69d 100644
--- a/contrib/llvm/include/llvm/IR/Metadata.h
+++ b/contrib/llvm/include/llvm/IR/Metadata.h
@@ -30,6 +30,7 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
@@ -133,6 +134,14 @@ public:
/// @}
};
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef)
+
+// Specialized opaque metadata conversions.
+inline Metadata **unwrap(LLVMMetadataRef *MDs) {
+ return reinterpret_cast<Metadata**>(MDs);
+}
+
#define HANDLE_METADATA(CLASS) class CLASS;
#include "llvm/IR/Metadata.def"
diff --git a/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h b/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
index 09f6c1897009..9c0a4159cad2 100644
--- a/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -160,7 +160,6 @@ private:
std::vector<ValueInfo> RefEdgeList;
protected:
- /// GlobalValueSummary constructor.
GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs)
: Kind(K), Flags(Flags), OriginalName(0), RefEdgeList(std::move(Refs)) {}
@@ -221,7 +220,6 @@ class AliasSummary : public GlobalValueSummary {
GlobalValueSummary *AliaseeSummary;
public:
- /// Summary constructors.
AliasSummary(GVFlags Flags, std::vector<ValueInfo> Refs)
: GlobalValueSummary(AliasKind, Flags, std::move(Refs)) {}
@@ -297,7 +295,6 @@ private:
std::unique_ptr<TypeIdInfo> TIdInfo;
public:
- /// Summary constructors.
FunctionSummary(GVFlags Flags, unsigned NumInsts, std::vector<ValueInfo> Refs,
std::vector<EdgeTy> CGEdges,
std::vector<GlobalValue::GUID> TypeTests,
@@ -418,7 +415,6 @@ template <> struct DenseMapInfo<FunctionSummary::ConstVCall> {
class GlobalVarSummary : public GlobalValueSummary {
public:
- /// Summary constructors.
GlobalVarSummary(GVFlags Flags, std::vector<ValueInfo> Refs)
: GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)) {}
diff --git a/contrib/llvm/include/llvm/IR/PatternMatch.h b/contrib/llvm/include/llvm/IR/PatternMatch.h
index 40f9c21f646b..31a76b4ed6c3 100644
--- a/contrib/llvm/include/llvm/IR/PatternMatch.h
+++ b/contrib/llvm/include/llvm/IR/PatternMatch.h
@@ -267,15 +267,15 @@ inline cst_pred_ty<is_all_ones> m_AllOnes() {
}
inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; }
-struct is_sign_bit {
- bool isValue(const APInt &C) { return C.isSignBit(); }
+struct is_sign_mask {
+ bool isValue(const APInt &C) { return C.isSignMask(); }
};
/// \brief Match an integer or vector with only the sign bit(s) set.
-inline cst_pred_ty<is_sign_bit> m_SignBit() {
- return cst_pred_ty<is_sign_bit>();
+inline cst_pred_ty<is_sign_mask> m_SignMask() {
+ return cst_pred_ty<is_sign_mask>();
}
-inline api_pred_ty<is_sign_bit> m_SignBit(const APInt *&V) { return V; }
+inline api_pred_ty<is_sign_mask> m_SignMask(const APInt *&V) { return V; }
struct is_power2 {
bool isValue(const APInt &C) { return C.isPowerOf2(); }
diff --git a/contrib/llvm/include/llvm/IR/Use.h b/contrib/llvm/include/llvm/IR/Use.h
index 05b68ccbb38e..6b56546f4421 100644
--- a/contrib/llvm/include/llvm/IR/Use.h
+++ b/contrib/llvm/include/llvm/IR/Use.h
@@ -61,9 +61,29 @@ public:
/// that also works with less standard-compliant compilers
void swap(Use &RHS);
+ /// Pointer traits for the UserRef PointerIntPair. This ensures we always
+ /// use the LSB regardless of pointer alignment on different targets.
+ struct UserRefPointerTraits {
+ static inline void *getAsVoidPointer(User *P) { return P; }
+ static inline User *getFromVoidPointer(void *P) {
+ return (User *)P;
+ }
+ enum { NumLowBitsAvailable = 1 };
+ };
+
// A type for the word following an array of hung-off Uses in memory, which is
// a pointer back to their User with the bottom bit set.
- typedef PointerIntPair<User *, 1, unsigned> UserRef;
+ typedef PointerIntPair<User *, 1, unsigned, UserRefPointerTraits> UserRef;
+
+ /// Pointer traits for the Prev PointerIntPair. This ensures we always use
+ /// the two LSBs regardless of pointer alignment on different targets.
+ struct PrevPointerTraits {
+ static inline void *getAsVoidPointer(Use **P) { return P; }
+ static inline Use **getFromVoidPointer(void *P) {
+ return (Use **)P;
+ }
+ enum { NumLowBitsAvailable = 2 };
+ };
private:
/// Destructor - Only for zap()
@@ -115,7 +135,7 @@ private:
Value *Val;
Use *Next;
- PointerIntPair<Use **, 2, PrevPtrTag> Prev;
+ PointerIntPair<Use **, 2, PrevPtrTag, PrevPointerTraits> Prev;
void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); }
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfo.h b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
index bd2717de9960..869706c45483 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
@@ -65,8 +65,8 @@ protected:
// Properties to be set by the target writer, used to configure asm printer.
//
- /// Pointer size in bytes. Default is 4.
- unsigned PointerSize = 4;
+ /// Code pointer size in bytes. Default is 4.
+ unsigned CodePointerSize = 4;
/// Size of the stack slot reserved for callee-saved registers, in bytes.
/// Default is same as pointer size.
@@ -384,8 +384,8 @@ public:
explicit MCAsmInfo();
virtual ~MCAsmInfo();
- /// Get the pointer size in bytes.
- unsigned getPointerSize() const { return PointerSize; }
+ /// Get the code pointer size in bytes.
+ unsigned getCodePointerSize() const { return CodePointerSize; }
/// Get the callee-saved register stack slot
/// size in bytes.
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index e466b368ed34..eb301031ba3f 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -128,6 +128,7 @@ public:
virtual void emitArch(unsigned Arch);
virtual void emitArchExtension(unsigned ArchExt);
virtual void emitObjectArch(unsigned Arch);
+ void emitTargetAttributes(const MCSubtargetInfo &STI);
virtual void finishAttributeSection();
virtual void emitInst(uint32_t Inst, char Suffix = '\0');
diff --git a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
index 6229db3bbcb2..bb16463588c3 100644
--- a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
@@ -86,6 +86,10 @@ public:
FeatureBits = FeatureBits_;
}
+ bool hasFeature(unsigned Feature) const {
+ return FeatureBits[Feature];
+ }
+
protected:
/// Initialize the scheduling model and feature bits.
///
diff --git a/contrib/llvm/include/llvm/Object/Archive.h b/contrib/llvm/include/llvm/Object/Archive.h
index d423957d9b79..807508107c56 100644
--- a/contrib/llvm/include/llvm/Object/Archive.h
+++ b/contrib/llvm/include/llvm/Object/Archive.h
@@ -14,15 +14,20 @@
#ifndef LLVM_OBJECT_ARCHIVE_H
#define LLVM_OBJECT_ARCHIVE_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
namespace llvm {
namespace object {
@@ -32,25 +37,28 @@ class Archive;
class ArchiveMemberHeader {
public:
friend class Archive;
+
ArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr,
uint64_t Size, Error *Err);
// ArchiveMemberHeader() = default;
/// Get the name without looking up long names.
- Expected<llvm::StringRef> getRawName() const;
+ Expected<StringRef> getRawName() const;
/// Get the name looking up long names.
- Expected<llvm::StringRef> getName(uint64_t Size) const;
+ Expected<StringRef> getName(uint64_t Size) const;
/// Members are not larger than 4GB.
Expected<uint32_t> getSize() const;
Expected<sys::fs::perms> getAccessMode() const;
Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const;
- llvm::StringRef getRawLastModified() const {
+
+ StringRef getRawLastModified() const {
return StringRef(ArMemHdr->LastModified,
sizeof(ArMemHdr->LastModified)).rtrim(' ');
}
+
Expected<unsigned> getUID() const;
Expected<unsigned> getGID() const;
@@ -75,11 +83,13 @@ private:
class Archive : public Binary {
virtual void anchor();
+
public:
class Child {
friend Archive;
- const Archive *Parent;
friend ArchiveMemberHeader;
+
+ const Archive *Parent;
ArchiveMemberHeader Header;
/// \brief Includes header but not padding byte.
StringRef Data;
@@ -103,17 +113,22 @@ public:
Expected<StringRef> getName() const;
Expected<std::string> getFullName() const;
Expected<StringRef> getRawName() const { return Header.getRawName(); }
+
Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const {
return Header.getLastModified();
}
+
StringRef getRawLastModified() const {
return Header.getRawLastModified();
}
+
Expected<unsigned> getUID() const { return Header.getUID(); }
Expected<unsigned> getGID() const { return Header.getGID(); }
+
Expected<sys::fs::perms> getAccessMode() const {
return Header.getAccessMode();
}
+
/// \return the size of the archive member without the header or padding.
Expected<uint64_t> getSize() const;
/// \return the size in the archive header for this member.
@@ -130,11 +145,12 @@ public:
class child_iterator {
Child C;
- Error *E;
+ Error *E = nullptr;
public:
- child_iterator() : C(Child(nullptr, nullptr, nullptr)), E(nullptr) {}
+ child_iterator() : C(Child(nullptr, nullptr, nullptr)) {}
child_iterator(const Child &C, Error *E) : C(C), E(E) {}
+
const Child *operator->() const { return &C; }
const Child &operator*() const { return C; }
@@ -171,14 +187,15 @@ public:
uint32_t StringIndex; // Extra index to the string.
public:
- bool operator ==(const Symbol &other) const {
- return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
- }
-
Symbol(const Archive *p, uint32_t symi, uint32_t stri)
: Parent(p)
, SymbolIndex(symi)
, StringIndex(stri) {}
+
+ bool operator ==(const Symbol &other) const {
+ return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
+ }
+
StringRef getName() const;
Expected<Child> getMember() const;
Symbol getNext() const;
@@ -186,8 +203,10 @@ public:
class symbol_iterator {
Symbol symbol;
+
public:
symbol_iterator(const Symbol &s) : symbol(s) {}
+
const Symbol *operator->() const { return &symbol; }
const Symbol &operator*() const { return symbol; }
@@ -264,7 +283,7 @@ private:
mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers;
};
-}
-}
+} // end namespace object
+} // end namespace llvm
-#endif
+#endif // LLVM_OBJECT_ARCHIVE_H
diff --git a/contrib/llvm/include/llvm/Object/Binary.h b/contrib/llvm/include/llvm/Object/Binary.h
index bdbe94301dc7..06788326ff57 100644
--- a/contrib/llvm/include/llvm/Object/Binary.h
+++ b/contrib/llvm/include/llvm/Object/Binary.h
@@ -15,10 +15,11 @@
#define LLVM_OBJECT_BINARY_H
#include "llvm/ADT/Triple.h"
-#include "llvm/Object/Error.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <memory>
+#include <utility>
namespace llvm {
@@ -29,9 +30,6 @@ namespace object {
class Binary {
private:
- Binary() = delete;
- Binary(const Binary &other) = delete;
-
unsigned int TypeID;
protected:
@@ -80,6 +78,8 @@ protected:
}
public:
+ Binary() = delete;
+ Binary(const Binary &other) = delete;
virtual ~Binary();
StringRef getData() const;
@@ -173,7 +173,7 @@ OwningBinary<T>::OwningBinary(std::unique_ptr<T> Bin,
std::unique_ptr<MemoryBuffer> Buf)
: Bin(std::move(Bin)), Buf(std::move(Buf)) {}
-template <typename T> OwningBinary<T>::OwningBinary() {}
+template <typename T> OwningBinary<T>::OwningBinary() = default;
template <typename T>
OwningBinary<T>::OwningBinary(OwningBinary &&Other)
@@ -201,7 +201,9 @@ template <typename T> const T* OwningBinary<T>::getBinary() const {
}
Expected<OwningBinary<Binary>> createBinary(StringRef Path);
-}
-}
-#endif
+} // end namespace object
+
+} // end namespace llvm
+
+#endif // LLVM_OBJECT_BINARY_H
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
index 696042d29dab..e0bb8f1cf3dd 100644
--- a/contrib/llvm/include/llvm/Object/COFF.h
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -14,28 +14,39 @@
#ifndef LLVM_OBJECT_COFF_H
#define LLVM_OBJECT_COFF_H
-#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/CodeView/CVDebugRecord.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <system_error>
namespace llvm {
+
template <typename T> class ArrayRef;
namespace object {
-class ImportDirectoryEntryRef;
+
+class BaseRelocRef;
class DelayImportDirectoryEntryRef;
class ExportDirectoryEntryRef;
+class ImportDirectoryEntryRef;
class ImportedSymbolRef;
-class BaseRelocRef;
-typedef content_iterator<ImportDirectoryEntryRef> import_directory_iterator;
-typedef content_iterator<DelayImportDirectoryEntryRef>
- delay_import_directory_iterator;
-typedef content_iterator<ExportDirectoryEntryRef> export_directory_iterator;
-typedef content_iterator<ImportedSymbolRef> imported_symbol_iterator;
-typedef content_iterator<BaseRelocRef> base_reloc_iterator;
+
+using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>;
+using delay_import_directory_iterator =
+ content_iterator<DelayImportDirectoryEntryRef>;
+using export_directory_iterator = content_iterator<ExportDirectoryEntryRef>;
+using imported_symbol_iterator = content_iterator<ImportedSymbolRef>;
+using base_reloc_iterator = content_iterator<BaseRelocRef>;
/// The DOS compatible header at the front of all PE/COFF executables.
struct dos_header {
@@ -190,10 +201,10 @@ struct import_lookup_table_entry {
}
};
-typedef import_lookup_table_entry<support::little32_t>
- import_lookup_table_entry32;
-typedef import_lookup_table_entry<support::little64_t>
- import_lookup_table_entry64;
+using import_lookup_table_entry32 =
+ import_lookup_table_entry<support::little32_t>;
+using import_lookup_table_entry64 =
+ import_lookup_table_entry<support::little64_t>;
struct delay_import_directory_table_entry {
// dumpbin reports this field as "Characteristics" instead of "Attributes".
@@ -226,8 +237,8 @@ union export_address_table_entry {
support::ulittle32_t ForwarderRVA;
};
-typedef support::ulittle32_t export_name_pointer_table_entry;
-typedef support::ulittle16_t export_ordinal_table_entry;
+using export_name_pointer_table_entry = support::ulittle32_t;
+using export_ordinal_table_entry = support::ulittle16_t;
struct StringTableOffset {
support::ulittle32_t Zeroes;
@@ -250,8 +261,8 @@ struct coff_symbol {
uint8_t NumberOfAuxSymbols;
};
-typedef coff_symbol<support::ulittle16_t> coff_symbol16;
-typedef coff_symbol<support::ulittle32_t> coff_symbol32;
+using coff_symbol16 = coff_symbol<support::ulittle16_t>;
+using coff_symbol32 = coff_symbol<support::ulittle32_t>;
// Contains only common parts of coff_symbol16 and coff_symbol32.
struct coff_symbol_generic {
@@ -264,9 +275,9 @@ struct coff_symbol_generic {
class COFFSymbolRef {
public:
- COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS), CS32(nullptr) {}
- COFFSymbolRef(const coff_symbol32 *CS) : CS16(nullptr), CS32(CS) {}
- COFFSymbolRef() : CS16(nullptr), CS32(nullptr) {}
+ COFFSymbolRef() = default;
+ COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS) {}
+ COFFSymbolRef(const coff_symbol32 *CS) : CS32(CS) {}
const void *getRawPtr() const {
return CS16 ? static_cast<const void *>(CS16) : CS32;
@@ -396,8 +407,8 @@ public:
private:
bool isSet() const { return CS16 || CS32; }
- const coff_symbol16 *CS16;
- const coff_symbol32 *CS32;
+ const coff_symbol16 *CS16 = nullptr;
+ const coff_symbol32 *CS32 = nullptr;
};
struct coff_section {
@@ -418,6 +429,7 @@ struct coff_section {
return (Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) &&
NumberOfRelocations == UINT16_MAX;
}
+
uint32_t getAlignment() const {
// The IMAGE_SCN_TYPE_NO_PAD bit is a legacy way of getting to
// IMAGE_SCN_ALIGN_1BYTES.
@@ -508,6 +520,7 @@ struct coff_import_header {
support::ulittle32_t SizeOfData;
support::ulittle16_t OrdinalHint;
support::ulittle16_t TypeInfo;
+
int getType() const { return TypeInfo & 0x3; }
int getNameType() const { return (TypeInfo >> 2) & 0x7; }
};
@@ -518,6 +531,7 @@ struct coff_import_directory_table_entry {
support::ulittle32_t ForwarderChain;
support::ulittle32_t NameRVA;
support::ulittle32_t ImportAddressTableRVA;
+
bool isNull() const {
return ImportLookupTableRVA == 0 && TimeDateStamp == 0 &&
ForwarderChain == 0 && NameRVA == 0 && ImportAddressTableRVA == 0;
@@ -532,6 +546,7 @@ struct coff_tls_directory {
IntTy AddressOfCallBacks;
support::ulittle32_t SizeOfZeroFill;
support::ulittle32_t Characteristics;
+
uint32_t getAlignment() const {
// Bit [20:24] contains section alignment.
uint32_t Shift = (Characteristics & 0x00F00000) >> 20;
@@ -541,8 +556,8 @@ struct coff_tls_directory {
}
};
-typedef coff_tls_directory<support::little32_t> coff_tls_directory32;
-typedef coff_tls_directory<support::little64_t> coff_tls_directory64;
+using coff_tls_directory32 = coff_tls_directory<support::little32_t>;
+using coff_tls_directory64 = coff_tls_directory<support::little64_t>;
struct coff_load_configuration32 {
support::ulittle32_t Characteristics;
@@ -603,6 +618,7 @@ struct coff_base_reloc_block_header {
struct coff_base_reloc_block_entry {
support::ulittle16_t Data;
+
int getType() const { return Data >> 12; }
int getOffset() const { return Data & ((1 << 12) - 1); }
};
@@ -652,6 +668,7 @@ public:
return reinterpret_cast<uintptr_t>(SymbolTable32);
return uintptr_t(0);
}
+
uint16_t getMachine() const {
if (COFFHeader)
return COFFHeader->Machine;
@@ -659,6 +676,7 @@ public:
return COFFBigObjHeader->Machine;
llvm_unreachable("no COFF header!");
}
+
uint16_t getSizeOfOptionalHeader() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0
@@ -668,6 +686,7 @@ public:
return 0;
llvm_unreachable("no COFF header!");
}
+
uint16_t getCharacteristics() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->Characteristics;
@@ -677,6 +696,7 @@ public:
return 0;
llvm_unreachable("no COFF header!");
}
+
uint32_t getTimeDateStamp() const {
if (COFFHeader)
return COFFHeader->TimeDateStamp;
@@ -684,6 +704,7 @@ public:
return COFFBigObjHeader->TimeDateStamp;
llvm_unreachable("no COFF header!");
}
+
uint32_t getNumberOfSections() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSections;
@@ -691,6 +712,7 @@ public:
return COFFBigObjHeader->NumberOfSections;
llvm_unreachable("no COFF header!");
}
+
uint32_t getPointerToSymbolTable() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0
@@ -699,6 +721,7 @@ public:
return COFFBigObjHeader->PointerToSymbolTable;
llvm_unreachable("no COFF header!");
}
+
uint32_t getRawNumberOfSymbols() const {
if (COFFHeader)
return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols;
@@ -706,11 +729,13 @@ public:
return COFFBigObjHeader->NumberOfSymbols;
llvm_unreachable("no COFF header!");
}
+
uint32_t getNumberOfSymbols() const {
if (!SymbolTable16 && !SymbolTable32)
return 0;
return getRawNumberOfSymbols();
}
+
protected:
void moveSymbolNext(DataRefImpl &Symb) const override;
Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
@@ -746,6 +771,7 @@ protected:
public:
COFFObjectFile(MemoryBufferRef Object, std::error_code &EC);
+
basic_symbol_iterator symbol_begin() const override;
basic_symbol_iterator symbol_end() const override;
section_iterator section_begin() const override;
@@ -797,6 +823,7 @@ public:
std::error_code getDataDirectory(uint32_t index,
const data_directory *&Res) const;
std::error_code getSection(int32_t index, const coff_section *&Res) const;
+
template <typename coff_symbol_type>
std::error_code getSymbol(uint32_t Index,
const coff_symbol_type *&Res) const {
@@ -821,6 +848,7 @@ public:
}
return object_error::parse_failed;
}
+
template <typename T>
std::error_code getAuxSymbol(uint32_t index, const T *&Res) const {
ErrorOr<COFFSymbolRef> s = getSymbol(index);
@@ -829,6 +857,7 @@ public:
Res = reinterpret_cast<const T *>(s->getRawPtr());
return std::error_code();
}
+
std::error_code getSymbolName(COFFSymbolRef Symbol, StringRef &Res) const;
std::error_code getSymbolName(const coff_symbol_generic *Symbol,
StringRef &Res) const;
@@ -885,7 +914,7 @@ public:
// The iterator for the import directory table.
class ImportDirectoryEntryRef {
public:
- ImportDirectoryEntryRef() : OwningObject(nullptr) {}
+ ImportDirectoryEntryRef() = default;
ImportDirectoryEntryRef(const coff_import_directory_table_entry *Table,
uint32_t I, const COFFObjectFile *Owner)
: ImportTable(Table), Index(I), OwningObject(Owner) {}
@@ -911,12 +940,12 @@ public:
private:
const coff_import_directory_table_entry *ImportTable;
uint32_t Index;
- const COFFObjectFile *OwningObject;
+ const COFFObjectFile *OwningObject = nullptr;
};
class DelayImportDirectoryEntryRef {
public:
- DelayImportDirectoryEntryRef() : OwningObject(nullptr) {}
+ DelayImportDirectoryEntryRef() = default;
DelayImportDirectoryEntryRef(const delay_import_directory_table_entry *T,
uint32_t I, const COFFObjectFile *Owner)
: Table(T), Index(I), OwningObject(Owner) {}
@@ -936,13 +965,13 @@ public:
private:
const delay_import_directory_table_entry *Table;
uint32_t Index;
- const COFFObjectFile *OwningObject;
+ const COFFObjectFile *OwningObject = nullptr;
};
// The iterator for the export directory table entry.
class ExportDirectoryEntryRef {
public:
- ExportDirectoryEntryRef() : OwningObject(nullptr) {}
+ ExportDirectoryEntryRef() = default;
ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I,
const COFFObjectFile *Owner)
: ExportTable(Table), Index(I), OwningObject(Owner) {}
@@ -962,12 +991,12 @@ public:
private:
const export_directory_table_entry *ExportTable;
uint32_t Index;
- const COFFObjectFile *OwningObject;
+ const COFFObjectFile *OwningObject = nullptr;
};
class ImportedSymbolRef {
public:
- ImportedSymbolRef() : OwningObject(nullptr) {}
+ ImportedSymbolRef() = default;
ImportedSymbolRef(const import_lookup_table_entry32 *Entry, uint32_t I,
const COFFObjectFile *Owner)
: Entry32(Entry), Entry64(nullptr), Index(I), OwningObject(Owner) {}
@@ -987,12 +1016,12 @@ private:
const import_lookup_table_entry32 *Entry32;
const import_lookup_table_entry64 *Entry64;
uint32_t Index;
- const COFFObjectFile *OwningObject;
+ const COFFObjectFile *OwningObject = nullptr;
};
class BaseRelocRef {
public:
- BaseRelocRef() : OwningObject(nullptr) {}
+ BaseRelocRef() = default;
BaseRelocRef(const coff_base_reloc_block_header *Header,
const COFFObjectFile *Owner)
: Header(Header), Index(0), OwningObject(Owner) {}
@@ -1006,7 +1035,7 @@ public:
private:
const coff_base_reloc_block_header *Header;
uint32_t Index;
- const COFFObjectFile *OwningObject;
+ const COFFObjectFile *OwningObject = nullptr;
};
// Corresponds to `_FPO_DATA` structure in the PE/COFF spec.
@@ -1034,6 +1063,7 @@ struct FpoData {
};
} // end namespace object
+
} // end namespace llvm
-#endif
+#endif // LLVM_OBJECT_COFF_H
diff --git a/contrib/llvm/include/llvm/Object/IRSymtab.h b/contrib/llvm/include/llvm/Object/IRSymtab.h
index cde6f3b0f651..be0f02aa7f17 100644
--- a/contrib/llvm/include/llvm/Object/IRSymtab.h
+++ b/contrib/llvm/include/llvm/Object/IRSymtab.h
@@ -41,9 +41,9 @@ typedef support::ulittle32_t Word;
/// A reference to a string in the string table.
struct Str {
- Word Offset;
+ Word Offset, Size;
StringRef get(StringRef Strtab) const {
- return Strtab.data() + Offset;
+ return {Strtab.data() + Offset, Size};
}
};
@@ -59,6 +59,9 @@ template <typename T> struct Range {
/// table.
struct Module {
Word Begin, End;
+
+ /// The index of the first Uncommon for this Module.
+ Word UncBegin;
};
/// This is equivalent to an IR comdat.
@@ -82,7 +85,8 @@ struct Symbol {
Word Flags;
enum FlagBits {
FB_visibility, // 2 bits
- FB_undefined = FB_visibility + 2,
+ FB_has_uncommon = FB_visibility + 2,
+ FB_undefined,
FB_weak,
FB_common,
FB_indirect,
@@ -94,10 +98,6 @@ struct Symbol {
FB_unnamed_addr,
FB_executable,
};
-
- /// The index into the Uncommon table, or -1 if this symbol does not have an
- /// Uncommon.
- Word UncommonIndex;
};
/// This data structure contains rarely used symbol fields and is optionally
@@ -249,15 +249,9 @@ public:
/// Reader::module_symbols().
class Reader::SymbolRef : public Symbol {
const storage::Symbol *SymI, *SymE;
+ const storage::Uncommon *UncI;
const Reader *R;
-public:
- SymbolRef(const storage::Symbol *SymI, const storage::Symbol *SymE,
- const Reader *R)
- : SymI(SymI), SymE(SymE), R(R) {
- read();
- }
-
void read() {
if (SymI == SymE)
return;
@@ -267,16 +261,24 @@ public:
ComdatIndex = SymI->ComdatIndex;
Flags = SymI->Flags;
- uint32_t UncI = SymI->UncommonIndex;
- if (UncI != -1u) {
- const storage::Uncommon &Unc = R->Uncommons[UncI];
- CommonSize = Unc.CommonSize;
- CommonAlign = Unc.CommonAlign;
- COFFWeakExternFallbackName = R->str(Unc.COFFWeakExternFallbackName);
+ if (Flags & (1 << storage::Symbol::FB_has_uncommon)) {
+ CommonSize = UncI->CommonSize;
+ CommonAlign = UncI->CommonAlign;
+ COFFWeakExternFallbackName = R->str(UncI->COFFWeakExternFallbackName);
}
}
+
+public:
+ SymbolRef(const storage::Symbol *SymI, const storage::Symbol *SymE,
+ const storage::Uncommon *UncI, const Reader *R)
+ : SymI(SymI), SymE(SymE), UncI(UncI), R(R) {
+ read();
+ }
+
void moveNext() {
++SymI;
+ if (Flags & (1 << storage::Symbol::FB_has_uncommon))
+ ++UncI;
read();
}
@@ -284,15 +286,16 @@ public:
};
inline Reader::symbol_range Reader::symbols() const {
- return {SymbolRef(Symbols.begin(), Symbols.end(), this),
- SymbolRef(Symbols.end(), Symbols.end(), this)};
+ return {SymbolRef(Symbols.begin(), Symbols.end(), Uncommons.begin(), this),
+ SymbolRef(Symbols.end(), Symbols.end(), nullptr, this)};
}
inline Reader::symbol_range Reader::module_symbols(unsigned I) const {
const storage::Module &M = Modules[I];
const storage::Symbol *MBegin = Symbols.begin() + M.Begin,
*MEnd = Symbols.begin() + M.End;
- return {SymbolRef(MBegin, MEnd, this), SymbolRef(MEnd, MEnd, this)};
+ return {SymbolRef(MBegin, MEnd, Uncommons.begin() + M.UncBegin, this),
+ SymbolRef(MEnd, MEnd, nullptr, this)};
}
}
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
index b689dc2ac03a..9a7bc618ffd0 100644
--- a/contrib/llvm/include/llvm/Object/ObjectFile.h
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -14,39 +14,46 @@
#ifndef LLVM_OBJECT_OBJECTFILE_H
#define LLVM_OBJECT_OBJECTFILE_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/Error.h"
#include "llvm/Object/SymbolicFile.h"
-#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
-#include <cstring>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <system_error>
namespace llvm {
+
class ARMAttributeParser;
namespace object {
-class ObjectFile;
class COFFObjectFile;
class MachOObjectFile;
-class WasmObjectFile;
-
+class ObjectFile;
+class SectionRef;
class SymbolRef;
class symbol_iterator;
-class SectionRef;
-typedef content_iterator<SectionRef> section_iterator;
+class WasmObjectFile;
+
+using section_iterator = content_iterator<SectionRef>;
/// This is a value type class that represents a single relocation in the list
/// of relocations in the object file.
class RelocationRef {
DataRefImpl RelocationPimpl;
- const ObjectFile *OwningObject;
+ const ObjectFile *OwningObject = nullptr;
public:
- RelocationRef() : OwningObject(nullptr) { }
-
+ RelocationRef() = default;
RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
bool operator==(const RelocationRef &Other) const;
@@ -65,18 +72,19 @@ public:
DataRefImpl getRawDataRefImpl() const;
const ObjectFile *getObject() const;
};
-typedef content_iterator<RelocationRef> relocation_iterator;
+
+using relocation_iterator = content_iterator<RelocationRef>;
/// This is a value type class that represents a single section in the list of
/// sections in the object file.
class SectionRef {
friend class SymbolRef;
+
DataRefImpl SectionPimpl;
- const ObjectFile *OwningObject;
+ const ObjectFile *OwningObject = nullptr;
public:
- SectionRef() : OwningObject(nullptr) { }
-
+ SectionRef() = default;
SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
bool operator==(const SectionRef &Other) const;
@@ -119,8 +127,6 @@ class SymbolRef : public BasicSymbolRef {
friend class SectionRef;
public:
- SymbolRef() : BasicSymbolRef() {}
-
enum Type {
ST_Unknown, // Type not specified
ST_Data,
@@ -130,6 +136,7 @@ public:
ST_Other
};
+ SymbolRef() = default;
SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
SymbolRef(const BasicSymbolRef &B) : BasicSymbolRef(B) {
assert(isa<ObjectFile>(BasicSymbolRef::getObject()));
@@ -179,8 +186,6 @@ public:
/// to create.
class ObjectFile : public SymbolicFile {
virtual void anchor();
- ObjectFile() = delete;
- ObjectFile(const ObjectFile &other) = delete;
protected:
ObjectFile(unsigned int Type, MemoryBufferRef Source);
@@ -198,6 +203,7 @@ protected:
// Implementations assume that the DataRefImpl is valid and has not been
// modified externally. It's UB otherwise.
friend class SymbolRef;
+
virtual Expected<StringRef> getSymbolName(DataRefImpl Symb) const = 0;
std::error_code printSymbolName(raw_ostream &OS,
DataRefImpl Symb) const override;
@@ -211,6 +217,7 @@ protected:
// Same as above for SectionRef.
friend class SectionRef;
+
virtual void moveSectionNext(DataRefImpl &Sec) const = 0;
virtual std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const = 0;
@@ -242,12 +249,15 @@ protected:
uint64_t getSymbolValue(DataRefImpl Symb) const;
public:
+ ObjectFile() = delete;
+ ObjectFile(const ObjectFile &other) = delete;
+
uint64_t getCommonSymbolSize(DataRefImpl Symb) const {
assert(getSymbolFlags(Symb) & SymbolRef::SF_Common);
return getCommonSymbolSizeImpl(Symb);
}
- typedef iterator_range<symbol_iterator> symbol_iterator_range;
+ using symbol_iterator_range = iterator_range<symbol_iterator>;
symbol_iterator_range symbols() const {
return symbol_iterator_range(symbol_begin(), symbol_end());
}
@@ -255,7 +265,7 @@ public:
virtual section_iterator section_begin() const = 0;
virtual section_iterator section_end() const = 0;
- typedef iterator_range<section_iterator> section_iterator_range;
+ using section_iterator_range = iterator_range<section_iterator>;
section_iterator_range sections() const {
return section_iterator_range(section_begin(), section_end());
}
@@ -297,7 +307,6 @@ public:
return createObjectFile(Object, sys::fs::file_magic::unknown);
}
-
static inline bool classof(const Binary *v) {
return v->isObject();
}
@@ -354,7 +363,6 @@ inline const ObjectFile *SymbolRef::getObject() const {
return cast<ObjectFile>(O);
}
-
/// SectionRef
inline SectionRef::SectionRef(DataRefImpl SectionP,
const ObjectFile *Owner)
@@ -479,8 +487,8 @@ inline const ObjectFile *RelocationRef::getObject() const {
return OwningObject;
}
-
} // end namespace object
+
} // end namespace llvm
-#endif
+#endif // LLVM_OBJECT_OBJECTFILE_H
diff --git a/contrib/llvm/include/llvm/Object/SymbolicFile.h b/contrib/llvm/include/llvm/Object/SymbolicFile.h
index ef0f96f7834a..f4be4bfdb1a3 100644
--- a/contrib/llvm/include/llvm/Object/SymbolicFile.h
+++ b/contrib/llvm/include/llvm/Object/SymbolicFile.h
@@ -14,10 +14,19 @@
#ifndef LLVM_OBJECT_SYMBOLICFILE_H
#define LLVM_OBJECT_SYMBOLICFILE_H
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Object/Binary.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
#include <cinttypes>
-#include <utility>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <memory>
+#include <system_error>
namespace llvm {
namespace object {
@@ -29,6 +38,7 @@ union DataRefImpl {
uint32_t a, b;
} d;
uintptr_t p;
+
DataRefImpl() { std::memset(this, 0, sizeof(DataRefImpl)); }
};
@@ -87,7 +97,7 @@ class SymbolicFile;
/// symbols in the object file.
class BasicSymbolRef {
DataRefImpl SymbolPimpl;
- const SymbolicFile *OwningObject;
+ const SymbolicFile *OwningObject = nullptr;
public:
enum Flags : unsigned {
@@ -108,7 +118,7 @@ public:
// (IR only)
};
- BasicSymbolRef() : OwningObject(nullptr) { }
+ BasicSymbolRef() = default;
BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner);
bool operator==(const BasicSymbolRef &Other) const;
@@ -125,12 +135,12 @@ public:
const SymbolicFile *getObject() const;
};
-typedef content_iterator<BasicSymbolRef> basic_symbol_iterator;
+using basic_symbol_iterator = content_iterator<BasicSymbolRef>;
class SymbolicFile : public Binary {
public:
- ~SymbolicFile() override;
SymbolicFile(unsigned int Type, MemoryBufferRef Source);
+ ~SymbolicFile() override;
// virtual interface.
virtual void moveSymbolNext(DataRefImpl &Symb) const = 0;
@@ -145,7 +155,7 @@ public:
virtual basic_symbol_iterator symbol_end() const = 0;
// convenience wrappers.
- typedef iterator_range<basic_symbol_iterator> basic_symbol_iterator_range;
+ using basic_symbol_iterator_range = iterator_range<basic_symbol_iterator>;
basic_symbol_iterator_range symbols() const {
return basic_symbol_iterator_range(symbol_begin(), symbol_end());
}
@@ -199,7 +209,7 @@ inline const SymbolicFile *BasicSymbolRef::getObject() const {
return OwningObject;
}
-}
-}
+} // end namespace object
+} // end namespace llvm
-#endif
+#endif // LLVM_OBJECT_SYMBOLICFILE_H
diff --git a/contrib/llvm/include/llvm/ObjectYAML/DWARFYAML.h b/contrib/llvm/include/llvm/ObjectYAML/DWARFYAML.h
index ec34de1f0881..3f39cfc7bb3d 100644
--- a/contrib/llvm/include/llvm/ObjectYAML/DWARFYAML.h
+++ b/contrib/llvm/include/llvm/ObjectYAML/DWARFYAML.h
@@ -236,7 +236,7 @@ template <> struct MappingTraits<DWARFYAML::InitialLength> {
static void mapping(IO &IO, DWARFYAML::InitialLength &DWARF);
};
-#define HANDLE_DW_TAG(unused, name) \
+#define HANDLE_DW_TAG(unused, name, unused2, unused3) \
io.enumCase(value, "DW_TAG_" #name, dwarf::DW_TAG_##name);
template <> struct ScalarEnumerationTraits<dwarf::Tag> {
@@ -266,7 +266,7 @@ template <> struct ScalarEnumerationTraits<dwarf::LineNumberExtendedOps> {
}
};
-#define HANDLE_DW_AT(unused, name) \
+#define HANDLE_DW_AT(unused, name, unused2, unused3) \
io.enumCase(value, "DW_AT_" #name, dwarf::DW_AT_##name);
template <> struct ScalarEnumerationTraits<dwarf::Attribute> {
@@ -276,7 +276,7 @@ template <> struct ScalarEnumerationTraits<dwarf::Attribute> {
}
};
-#define HANDLE_DW_FORM(unused, name) \
+#define HANDLE_DW_FORM(unused, name, unused2, unused3) \
io.enumCase(value, "DW_FORM_" #name, dwarf::DW_FORM_##name);
template <> struct ScalarEnumerationTraits<dwarf::Form> {
diff --git a/contrib/llvm/include/llvm/PassSupport.h b/contrib/llvm/include/llvm/PassSupport.h
index 852d79fbd443..50e6b498fb46 100644
--- a/contrib/llvm/include/llvm/PassSupport.h
+++ b/contrib/llvm/include/llvm/PassSupport.h
@@ -93,11 +93,7 @@ template <typename PassName> Pass *callTargetMachineCtor(TargetMachine *TM) {
/// static RegisterPass<YourPassClassName> tmp("passopt", "My Pass Name");
///
/// This statement will cause your pass to be created by calling the default
-/// constructor exposed by the pass. If you have a different constructor that
-/// must be called, create a global constructor function (which takes the
-/// arguments you need and returns a Pass*) and register your pass like this:
-///
-/// static RegisterPass<PassClassName> tmp("passopt", "My Name");
+/// constructor exposed by the pass.
///
template <typename passName> struct RegisterPass : public PassInfo {
// Register Pass using default constructor...
diff --git a/contrib/llvm/include/llvm/Support/ARMTargetParser.def b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
index 18bf9af43226..32dc57a0fedf 100644
--- a/contrib/llvm/include/llvm/Support/ARMTargetParser.def
+++ b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
@@ -78,33 +78,33 @@ ARM_ARCH("armv7-a", AK_ARMV7A, "7-A", "v7", ARMBuildAttrs::CPUArch::v7,
FK_NEON, ARM::AEK_DSP)
ARM_ARCH("armv7ve", AK_ARMV7VE, "7VE", "v7ve", ARMBuildAttrs::CPUArch::v7,
FK_NEON, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT |
- ARM::AEK_HWDIVARM | ARM::AEK_HWDIV | ARM::AEK_DSP))
+ ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv7-r", AK_ARMV7R, "7-R", "v7r", ARMBuildAttrs::CPUArch::v7,
- FK_NONE, (ARM::AEK_HWDIV | ARM::AEK_DSP))
+ FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv7-m", AK_ARMV7M, "7-M", "v7m", ARMBuildAttrs::CPUArch::v7,
- FK_NONE, ARM::AEK_HWDIV)
+ FK_NONE, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv7e-m", AK_ARMV7EM, "7E-M", "v7em", ARMBuildAttrs::CPUArch::v7E_M,
- FK_NONE, (ARM::AEK_HWDIV | ARM::AEK_DSP))
+ FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
FK_CRYPTO_NEON_FP_ARMV8,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC))
+ ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC))
+ ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.2-a", AK_ARMV8_2A, "8.2-A", "v8.2a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
+ ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
ARM_ARCH("armv8-r", AK_ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R,
FK_NEON_FP_ARMV8,
- (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIV |
+ (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB |
ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8-m.base", AK_ARMV8MBaseline, "8-M.Baseline", "v8m.base",
- ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIV)
+ ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv8-m.main", AK_ARMV8MMainline, "8-M.Mainline", "v8m.main",
- ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIV)
+ ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIVTHUMB)
// Non-standard Arch names.
ARM_ARCH("iwmmxt", AK_IWMMXT, "iwmmxt", "", ARMBuildAttrs::CPUArch::v5TE,
FK_NONE, ARM::AEK_NONE)
@@ -128,7 +128,7 @@ ARM_ARCH_EXT_NAME("crc", ARM::AEK_CRC, "+crc", "-crc")
ARM_ARCH_EXT_NAME("crypto", ARM::AEK_CRYPTO, "+crypto","-crypto")
ARM_ARCH_EXT_NAME("dsp", ARM::AEK_DSP, "+dsp", "-dsp")
ARM_ARCH_EXT_NAME("fp", ARM::AEK_FP, nullptr, nullptr)
-ARM_ARCH_EXT_NAME("idiv", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV), nullptr, nullptr)
+ARM_ARCH_EXT_NAME("idiv", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB), nullptr, nullptr)
ARM_ARCH_EXT_NAME("mp", ARM::AEK_MP, nullptr, nullptr)
ARM_ARCH_EXT_NAME("simd", ARM::AEK_SIMD, nullptr, nullptr)
ARM_ARCH_EXT_NAME("sec", ARM::AEK_SEC, nullptr, nullptr)
@@ -147,9 +147,9 @@ ARM_ARCH_EXT_NAME("xscale", ARM::AEK_XSCALE, nullptr, nullptr)
#endif
ARM_HW_DIV_NAME("invalid", ARM::AEK_INVALID)
ARM_HW_DIV_NAME("none", ARM::AEK_NONE)
-ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIV)
+ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIVTHUMB)
ARM_HW_DIV_NAME("arm", ARM::AEK_HWDIVARM)
-ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV))
+ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
#undef ARM_HW_DIV_NAME
#ifndef ARM_CPU_NAME
@@ -205,20 +205,20 @@ ARM_CPU_NAME("cortex-a5", AK_ARMV7A, FK_NEON_VFPV4, false,
(ARM::AEK_SEC | ARM::AEK_MP))
ARM_CPU_NAME("cortex-a7", AK_ARMV7A, FK_NEON_VFPV4, false,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV))
+ ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a8", AK_ARMV7A, FK_NEON, true, ARM::AEK_SEC)
ARM_CPU_NAME("cortex-a9", AK_ARMV7A, FK_NEON_FP16, false, (ARM::AEK_SEC | ARM::AEK_MP))
ARM_CPU_NAME("cortex-a12", AK_ARMV7A, FK_NEON_VFPV4, false,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV))
+ ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a15", AK_ARMV7A, FK_NEON_VFPV4, false,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV))
+ ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a17", AK_ARMV7A, FK_NEON_VFPV4, false,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
- ARM::AEK_HWDIV))
+ ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("krait", AK_ARMV7A, FK_NEON_VFPV4, false,
- (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV))
+ (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-r4", AK_ARMV7R, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-r4f", AK_ARMV7R, FK_VFPV3_D16, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-r5", AK_ARMV7R, FK_VFPV3_D16, false,
@@ -249,7 +249,7 @@ ARM_CPU_NAME("kryo", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("iwmmxt", AK_IWMMXT, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("xscale", AK_XSCALE, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("swift", AK_ARMV7S, FK_NEON_VFPV4, true,
- (ARM::AEK_HWDIVARM | ARM::AEK_HWDIV))
+ (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
// Invalid CPU
ARM_CPU_NAME("invalid", AK_INVALID, FK_INVALID, true, ARM::AEK_INVALID)
#undef ARM_CPU_NAME
diff --git a/contrib/llvm/include/llvm/Support/ArrayRecycler.h b/contrib/llvm/include/llvm/Support/ArrayRecycler.h
index 4698f12b3bbc..68696be6bf3d 100644
--- a/contrib/llvm/include/llvm/Support/ArrayRecycler.h
+++ b/contrib/llvm/include/llvm/Support/ArrayRecycler.h
@@ -47,7 +47,9 @@ template <class T, size_t Align = alignof(T)> class ArrayRecycler {
FreeList *Entry = Bucket[Idx];
if (!Entry)
return nullptr;
+ __asan_unpoison_memory_region(Entry, Capacity::get(Idx).getSize());
Bucket[Idx] = Entry->Next;
+ __msan_allocated_memory(Entry, Capacity::get(Idx).getSize());
return reinterpret_cast<T*>(Entry);
}
@@ -59,6 +61,7 @@ template <class T, size_t Align = alignof(T)> class ArrayRecycler {
Bucket.resize(size_t(Idx) + 1);
Entry->Next = Bucket[Idx];
Bucket[Idx] = Entry;
+ __asan_poison_memory_region(Ptr, Capacity::get(Idx).getSize());
}
public:
diff --git a/contrib/llvm/include/llvm/Support/BinaryStreamArray.h b/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
index 3b1301d3cc0b..21b2474660f2 100644
--- a/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
+++ b/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
@@ -162,6 +162,11 @@ public:
return ThisValue;
}
+ ValueType &operator*() {
+ assert(Array && !HasError);
+ return ThisValue;
+ }
+
IterType &operator+=(unsigned N) {
for (unsigned I = 0; I < N; ++I) {
// We are done with the current record, discard it so that we are
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.def b/contrib/llvm/include/llvm/Support/Dwarf.def
index fdbd8ea70116..3df3300de466 100644
--- a/contrib/llvm/include/llvm/Support/Dwarf.def
+++ b/contrib/llvm/include/llvm/Support/Dwarf.def
@@ -25,27 +25,27 @@
#endif
#ifndef HANDLE_DW_TAG
-#define HANDLE_DW_TAG(ID, NAME)
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_AT
-#define HANDLE_DW_AT(ID, NAME)
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_FORM
-#define HANDLE_DW_FORM(ID, NAME)
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_OP
-#define HANDLE_DW_OP(ID, NAME)
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_LANG
-#define HANDLE_DW_LANG(ID, NAME)
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_ATE
-#define HANDLE_DW_ATE(ID, NAME)
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR)
#endif
#ifndef HANDLE_DW_VIRTUALITY
@@ -92,591 +92,591 @@
#define HANDLE_DW_UT(ID, NAME)
#endif
-HANDLE_DW_TAG(0x0000, null)
-HANDLE_DW_TAG(0x0001, array_type)
-HANDLE_DW_TAG(0x0002, class_type)
-HANDLE_DW_TAG(0x0003, entry_point)
-HANDLE_DW_TAG(0x0004, enumeration_type)
-HANDLE_DW_TAG(0x0005, formal_parameter)
-HANDLE_DW_TAG(0x0008, imported_declaration)
-HANDLE_DW_TAG(0x000a, label)
-HANDLE_DW_TAG(0x000b, lexical_block)
-HANDLE_DW_TAG(0x000d, member)
-HANDLE_DW_TAG(0x000f, pointer_type)
-HANDLE_DW_TAG(0x0010, reference_type)
-HANDLE_DW_TAG(0x0011, compile_unit)
-HANDLE_DW_TAG(0x0012, string_type)
-HANDLE_DW_TAG(0x0013, structure_type)
-HANDLE_DW_TAG(0x0015, subroutine_type)
-HANDLE_DW_TAG(0x0016, typedef)
-HANDLE_DW_TAG(0x0017, union_type)
-HANDLE_DW_TAG(0x0018, unspecified_parameters)
-HANDLE_DW_TAG(0x0019, variant)
-HANDLE_DW_TAG(0x001a, common_block)
-HANDLE_DW_TAG(0x001b, common_inclusion)
-HANDLE_DW_TAG(0x001c, inheritance)
-HANDLE_DW_TAG(0x001d, inlined_subroutine)
-HANDLE_DW_TAG(0x001e, module)
-HANDLE_DW_TAG(0x001f, ptr_to_member_type)
-HANDLE_DW_TAG(0x0020, set_type)
-HANDLE_DW_TAG(0x0021, subrange_type)
-HANDLE_DW_TAG(0x0022, with_stmt)
-HANDLE_DW_TAG(0x0023, access_declaration)
-HANDLE_DW_TAG(0x0024, base_type)
-HANDLE_DW_TAG(0x0025, catch_block)
-HANDLE_DW_TAG(0x0026, const_type)
-HANDLE_DW_TAG(0x0027, constant)
-HANDLE_DW_TAG(0x0028, enumerator)
-HANDLE_DW_TAG(0x0029, file_type)
-HANDLE_DW_TAG(0x002a, friend)
-HANDLE_DW_TAG(0x002b, namelist)
-HANDLE_DW_TAG(0x002c, namelist_item)
-HANDLE_DW_TAG(0x002d, packed_type)
-HANDLE_DW_TAG(0x002e, subprogram)
-HANDLE_DW_TAG(0x002f, template_type_parameter)
-HANDLE_DW_TAG(0x0030, template_value_parameter)
-HANDLE_DW_TAG(0x0031, thrown_type)
-HANDLE_DW_TAG(0x0032, try_block)
-HANDLE_DW_TAG(0x0033, variant_part)
-HANDLE_DW_TAG(0x0034, variable)
-HANDLE_DW_TAG(0x0035, volatile_type)
+HANDLE_DW_TAG(0x0000, null, 2, DWARF)
+HANDLE_DW_TAG(0x0001, array_type, 2, DWARF)
+HANDLE_DW_TAG(0x0002, class_type, 2, DWARF)
+HANDLE_DW_TAG(0x0003, entry_point, 2, DWARF)
+HANDLE_DW_TAG(0x0004, enumeration_type, 2, DWARF)
+HANDLE_DW_TAG(0x0005, formal_parameter, 2, DWARF)
+HANDLE_DW_TAG(0x0008, imported_declaration, 2, DWARF)
+HANDLE_DW_TAG(0x000a, label, 2, DWARF)
+HANDLE_DW_TAG(0x000b, lexical_block, 2, DWARF)
+HANDLE_DW_TAG(0x000d, member, 2, DWARF)
+HANDLE_DW_TAG(0x000f, pointer_type, 2, DWARF)
+HANDLE_DW_TAG(0x0010, reference_type, 2, DWARF)
+HANDLE_DW_TAG(0x0011, compile_unit, 2, DWARF)
+HANDLE_DW_TAG(0x0012, string_type, 2, DWARF)
+HANDLE_DW_TAG(0x0013, structure_type, 2, DWARF)
+HANDLE_DW_TAG(0x0015, subroutine_type, 2, DWARF)
+HANDLE_DW_TAG(0x0016, typedef, 2, DWARF)
+HANDLE_DW_TAG(0x0017, union_type, 2, DWARF)
+HANDLE_DW_TAG(0x0018, unspecified_parameters, 2, DWARF)
+HANDLE_DW_TAG(0x0019, variant, 2, DWARF)
+HANDLE_DW_TAG(0x001a, common_block, 2, DWARF)
+HANDLE_DW_TAG(0x001b, common_inclusion, 2, DWARF)
+HANDLE_DW_TAG(0x001c, inheritance, 2, DWARF)
+HANDLE_DW_TAG(0x001d, inlined_subroutine, 2, DWARF)
+HANDLE_DW_TAG(0x001e, module, 2, DWARF)
+HANDLE_DW_TAG(0x001f, ptr_to_member_type, 2, DWARF)
+HANDLE_DW_TAG(0x0020, set_type, 2, DWARF)
+HANDLE_DW_TAG(0x0021, subrange_type, 2, DWARF)
+HANDLE_DW_TAG(0x0022, with_stmt, 2, DWARF)
+HANDLE_DW_TAG(0x0023, access_declaration, 2, DWARF)
+HANDLE_DW_TAG(0x0024, base_type, 2, DWARF)
+HANDLE_DW_TAG(0x0025, catch_block, 2, DWARF)
+HANDLE_DW_TAG(0x0026, const_type, 2, DWARF)
+HANDLE_DW_TAG(0x0027, constant, 2, DWARF)
+HANDLE_DW_TAG(0x0028, enumerator, 2, DWARF)
+HANDLE_DW_TAG(0x0029, file_type, 2, DWARF)
+HANDLE_DW_TAG(0x002a, friend, 2, DWARF)
+HANDLE_DW_TAG(0x002b, namelist, 2, DWARF)
+HANDLE_DW_TAG(0x002c, namelist_item, 2, DWARF)
+HANDLE_DW_TAG(0x002d, packed_type, 2, DWARF)
+HANDLE_DW_TAG(0x002e, subprogram, 2, DWARF)
+HANDLE_DW_TAG(0x002f, template_type_parameter, 2, DWARF)
+HANDLE_DW_TAG(0x0030, template_value_parameter, 2, DWARF)
+HANDLE_DW_TAG(0x0031, thrown_type, 2, DWARF)
+HANDLE_DW_TAG(0x0032, try_block, 2, DWARF)
+HANDLE_DW_TAG(0x0033, variant_part, 2, DWARF)
+HANDLE_DW_TAG(0x0034, variable, 2, DWARF)
+HANDLE_DW_TAG(0x0035, volatile_type, 2, DWARF)
// New in DWARF v3:
-HANDLE_DW_TAG(0x0036, dwarf_procedure)
-HANDLE_DW_TAG(0x0037, restrict_type)
-HANDLE_DW_TAG(0x0038, interface_type)
-HANDLE_DW_TAG(0x0039, namespace)
-HANDLE_DW_TAG(0x003a, imported_module)
-HANDLE_DW_TAG(0x003b, unspecified_type)
-HANDLE_DW_TAG(0x003c, partial_unit)
-HANDLE_DW_TAG(0x003d, imported_unit)
-HANDLE_DW_TAG(0x003f, condition)
-HANDLE_DW_TAG(0x0040, shared_type)
+HANDLE_DW_TAG(0x0036, dwarf_procedure, 3, DWARF)
+HANDLE_DW_TAG(0x0037, restrict_type, 3, DWARF)
+HANDLE_DW_TAG(0x0038, interface_type, 3, DWARF)
+HANDLE_DW_TAG(0x0039, namespace, 3, DWARF)
+HANDLE_DW_TAG(0x003a, imported_module, 3, DWARF)
+HANDLE_DW_TAG(0x003b, unspecified_type, 3, DWARF)
+HANDLE_DW_TAG(0x003c, partial_unit, 3, DWARF)
+HANDLE_DW_TAG(0x003d, imported_unit, 3, DWARF)
+HANDLE_DW_TAG(0x003f, condition, 3, DWARF)
+HANDLE_DW_TAG(0x0040, shared_type, 3, DWARF)
// New in DWARF v4:
-HANDLE_DW_TAG(0x0041, type_unit)
-HANDLE_DW_TAG(0x0042, rvalue_reference_type)
-HANDLE_DW_TAG(0x0043, template_alias)
+HANDLE_DW_TAG(0x0041, type_unit, 4, DWARF)
+HANDLE_DW_TAG(0x0042, rvalue_reference_type, 4, DWARF)
+HANDLE_DW_TAG(0x0043, template_alias, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_TAG(0x0044, coarray_type)
-HANDLE_DW_TAG(0x0045, generic_subrange)
-HANDLE_DW_TAG(0x0046, dynamic_type)
-HANDLE_DW_TAG(0x0047, atomic_type)
-HANDLE_DW_TAG(0x0048, call_site)
-HANDLE_DW_TAG(0x0049, call_site_parameter)
-HANDLE_DW_TAG(0x004a, skeleton_unit)
-HANDLE_DW_TAG(0x004b, immutable_type)
+HANDLE_DW_TAG(0x0044, coarray_type, 5, DWARF)
+HANDLE_DW_TAG(0x0045, generic_subrange, 5, DWARF)
+HANDLE_DW_TAG(0x0046, dynamic_type, 5, DWARF)
+HANDLE_DW_TAG(0x0047, atomic_type, 5, DWARF)
+HANDLE_DW_TAG(0x0048, call_site, 5, DWARF)
+HANDLE_DW_TAG(0x0049, call_site_parameter, 5, DWARF)
+HANDLE_DW_TAG(0x004a, skeleton_unit, 5, DWARF)
+HANDLE_DW_TAG(0x004b, immutable_type, 5, DWARF)
// Vendor extensions:
-HANDLE_DW_TAG(0x4081, MIPS_loop)
-HANDLE_DW_TAG(0x4101, format_label)
-HANDLE_DW_TAG(0x4102, function_template)
-HANDLE_DW_TAG(0x4103, class_template)
-HANDLE_DW_TAG(0x4106, GNU_template_template_param)
-HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack)
-HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack)
-HANDLE_DW_TAG(0x4200, APPLE_property)
-HANDLE_DW_TAG(0xb000, BORLAND_property)
-HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string)
-HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array)
-HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set)
-HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant)
+HANDLE_DW_TAG(0x4081, MIPS_loop, 0, MIPS)
+HANDLE_DW_TAG(0x4101, format_label, 0, GNU)
+HANDLE_DW_TAG(0x4102, function_template, 0, GNU)
+HANDLE_DW_TAG(0x4103, class_template, 0, GNU)
+HANDLE_DW_TAG(0x4106, GNU_template_template_param, 0, GNU)
+HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack, 0, GNU)
+HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack, 0, GNU)
+HANDLE_DW_TAG(0x4200, APPLE_property, 0, APPLE)
+HANDLE_DW_TAG(0xb000, BORLAND_property, 0, BORLAND)
+HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string, 0, BORLAND)
+HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array, 0, BORLAND)
+HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set, 0, BORLAND)
+HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant, 0, BORLAND)
// Attributes.
-HANDLE_DW_AT(0x01, sibling)
-HANDLE_DW_AT(0x02, location)
-HANDLE_DW_AT(0x03, name)
-HANDLE_DW_AT(0x09, ordering)
-HANDLE_DW_AT(0x0b, byte_size)
-HANDLE_DW_AT(0x0c, bit_offset)
-HANDLE_DW_AT(0x0d, bit_size)
-HANDLE_DW_AT(0x10, stmt_list)
-HANDLE_DW_AT(0x11, low_pc)
-HANDLE_DW_AT(0x12, high_pc)
-HANDLE_DW_AT(0x13, language)
-HANDLE_DW_AT(0x15, discr)
-HANDLE_DW_AT(0x16, discr_value)
-HANDLE_DW_AT(0x17, visibility)
-HANDLE_DW_AT(0x18, import)
-HANDLE_DW_AT(0x19, string_length)
-HANDLE_DW_AT(0x1a, common_reference)
-HANDLE_DW_AT(0x1b, comp_dir)
-HANDLE_DW_AT(0x1c, const_value)
-HANDLE_DW_AT(0x1d, containing_type)
-HANDLE_DW_AT(0x1e, default_value)
-HANDLE_DW_AT(0x20, inline)
-HANDLE_DW_AT(0x21, is_optional)
-HANDLE_DW_AT(0x22, lower_bound)
-HANDLE_DW_AT(0x25, producer)
-HANDLE_DW_AT(0x27, prototyped)
-HANDLE_DW_AT(0x2a, return_addr)
-HANDLE_DW_AT(0x2c, start_scope)
-HANDLE_DW_AT(0x2e, bit_stride)
-HANDLE_DW_AT(0x2f, upper_bound)
-HANDLE_DW_AT(0x31, abstract_origin)
-HANDLE_DW_AT(0x32, accessibility)
-HANDLE_DW_AT(0x33, address_class)
-HANDLE_DW_AT(0x34, artificial)
-HANDLE_DW_AT(0x35, base_types)
-HANDLE_DW_AT(0x36, calling_convention)
-HANDLE_DW_AT(0x37, count)
-HANDLE_DW_AT(0x38, data_member_location)
-HANDLE_DW_AT(0x39, decl_column)
-HANDLE_DW_AT(0x3a, decl_file)
-HANDLE_DW_AT(0x3b, decl_line)
-HANDLE_DW_AT(0x3c, declaration)
-HANDLE_DW_AT(0x3d, discr_list)
-HANDLE_DW_AT(0x3e, encoding)
-HANDLE_DW_AT(0x3f, external)
-HANDLE_DW_AT(0x40, frame_base)
-HANDLE_DW_AT(0x41, friend)
-HANDLE_DW_AT(0x42, identifier_case)
-HANDLE_DW_AT(0x43, macro_info)
-HANDLE_DW_AT(0x44, namelist_item)
-HANDLE_DW_AT(0x45, priority)
-HANDLE_DW_AT(0x46, segment)
-HANDLE_DW_AT(0x47, specification)
-HANDLE_DW_AT(0x48, static_link)
-HANDLE_DW_AT(0x49, type)
-HANDLE_DW_AT(0x4a, use_location)
-HANDLE_DW_AT(0x4b, variable_parameter)
-HANDLE_DW_AT(0x4c, virtuality)
-HANDLE_DW_AT(0x4d, vtable_elem_location)
+HANDLE_DW_AT(0x01, sibling, 2, DWARF)
+HANDLE_DW_AT(0x02, location, 2, DWARF)
+HANDLE_DW_AT(0x03, name, 2, DWARF)
+HANDLE_DW_AT(0x09, ordering, 2, DWARF)
+HANDLE_DW_AT(0x0b, byte_size, 2, DWARF)
+HANDLE_DW_AT(0x0c, bit_offset, 2, DWARF)
+HANDLE_DW_AT(0x0d, bit_size, 2, DWARF)
+HANDLE_DW_AT(0x10, stmt_list, 2, DWARF)
+HANDLE_DW_AT(0x11, low_pc, 2, DWARF)
+HANDLE_DW_AT(0x12, high_pc, 2, DWARF)
+HANDLE_DW_AT(0x13, language, 2, DWARF)
+HANDLE_DW_AT(0x15, discr, 2, DWARF)
+HANDLE_DW_AT(0x16, discr_value, 2, DWARF)
+HANDLE_DW_AT(0x17, visibility, 2, DWARF)
+HANDLE_DW_AT(0x18, import, 2, DWARF)
+HANDLE_DW_AT(0x19, string_length, 2, DWARF)
+HANDLE_DW_AT(0x1a, common_reference, 2, DWARF)
+HANDLE_DW_AT(0x1b, comp_dir, 2, DWARF)
+HANDLE_DW_AT(0x1c, const_value, 2, DWARF)
+HANDLE_DW_AT(0x1d, containing_type, 2, DWARF)
+HANDLE_DW_AT(0x1e, default_value, 2, DWARF)
+HANDLE_DW_AT(0x20, inline, 2, DWARF)
+HANDLE_DW_AT(0x21, is_optional, 2, DWARF)
+HANDLE_DW_AT(0x22, lower_bound, 2, DWARF)
+HANDLE_DW_AT(0x25, producer, 2, DWARF)
+HANDLE_DW_AT(0x27, prototyped, 2, DWARF)
+HANDLE_DW_AT(0x2a, return_addr, 2, DWARF)
+HANDLE_DW_AT(0x2c, start_scope, 2, DWARF)
+HANDLE_DW_AT(0x2e, bit_stride, 2, DWARF)
+HANDLE_DW_AT(0x2f, upper_bound, 2, DWARF)
+HANDLE_DW_AT(0x31, abstract_origin, 2, DWARF)
+HANDLE_DW_AT(0x32, accessibility, 2, DWARF)
+HANDLE_DW_AT(0x33, address_class, 2, DWARF)
+HANDLE_DW_AT(0x34, artificial, 2, DWARF)
+HANDLE_DW_AT(0x35, base_types, 2, DWARF)
+HANDLE_DW_AT(0x36, calling_convention, 2, DWARF)
+HANDLE_DW_AT(0x37, count, 2, DWARF)
+HANDLE_DW_AT(0x38, data_member_location, 2, DWARF)
+HANDLE_DW_AT(0x39, decl_column, 2, DWARF)
+HANDLE_DW_AT(0x3a, decl_file, 2, DWARF)
+HANDLE_DW_AT(0x3b, decl_line, 2, DWARF)
+HANDLE_DW_AT(0x3c, declaration, 2, DWARF)
+HANDLE_DW_AT(0x3d, discr_list, 2, DWARF)
+HANDLE_DW_AT(0x3e, encoding, 2, DWARF)
+HANDLE_DW_AT(0x3f, external, 2, DWARF)
+HANDLE_DW_AT(0x40, frame_base, 2, DWARF)
+HANDLE_DW_AT(0x41, friend, 2, DWARF)
+HANDLE_DW_AT(0x42, identifier_case, 2, DWARF)
+HANDLE_DW_AT(0x43, macro_info, 2, DWARF)
+HANDLE_DW_AT(0x44, namelist_item, 2, DWARF)
+HANDLE_DW_AT(0x45, priority, 2, DWARF)
+HANDLE_DW_AT(0x46, segment, 2, DWARF)
+HANDLE_DW_AT(0x47, specification, 2, DWARF)
+HANDLE_DW_AT(0x48, static_link, 2, DWARF)
+HANDLE_DW_AT(0x49, type, 2, DWARF)
+HANDLE_DW_AT(0x4a, use_location, 2, DWARF)
+HANDLE_DW_AT(0x4b, variable_parameter, 2, DWARF)
+HANDLE_DW_AT(0x4c, virtuality, 2, DWARF)
+HANDLE_DW_AT(0x4d, vtable_elem_location, 2, DWARF)
// New in DWARF v3:
-HANDLE_DW_AT(0x4e, allocated)
-HANDLE_DW_AT(0x4f, associated)
-HANDLE_DW_AT(0x50, data_location)
-HANDLE_DW_AT(0x51, byte_stride)
-HANDLE_DW_AT(0x52, entry_pc)
-HANDLE_DW_AT(0x53, use_UTF8)
-HANDLE_DW_AT(0x54, extension)
-HANDLE_DW_AT(0x55, ranges)
-HANDLE_DW_AT(0x56, trampoline)
-HANDLE_DW_AT(0x57, call_column)
-HANDLE_DW_AT(0x58, call_file)
-HANDLE_DW_AT(0x59, call_line)
-HANDLE_DW_AT(0x5a, description)
-HANDLE_DW_AT(0x5b, binary_scale)
-HANDLE_DW_AT(0x5c, decimal_scale)
-HANDLE_DW_AT(0x5d, small)
-HANDLE_DW_AT(0x5e, decimal_sign)
-HANDLE_DW_AT(0x5f, digit_count)
-HANDLE_DW_AT(0x60, picture_string)
-HANDLE_DW_AT(0x61, mutable)
-HANDLE_DW_AT(0x62, threads_scaled)
-HANDLE_DW_AT(0x63, explicit)
-HANDLE_DW_AT(0x64, object_pointer)
-HANDLE_DW_AT(0x65, endianity)
-HANDLE_DW_AT(0x66, elemental)
-HANDLE_DW_AT(0x67, pure)
-HANDLE_DW_AT(0x68, recursive)
+HANDLE_DW_AT(0x4e, allocated, 3, DWARF)
+HANDLE_DW_AT(0x4f, associated, 3, DWARF)
+HANDLE_DW_AT(0x50, data_location, 3, DWARF)
+HANDLE_DW_AT(0x51, byte_stride, 3, DWARF)
+HANDLE_DW_AT(0x52, entry_pc, 3, DWARF)
+HANDLE_DW_AT(0x53, use_UTF8, 3, DWARF)
+HANDLE_DW_AT(0x54, extension, 3, DWARF)
+HANDLE_DW_AT(0x55, ranges, 3, DWARF)
+HANDLE_DW_AT(0x56, trampoline, 3, DWARF)
+HANDLE_DW_AT(0x57, call_column, 3, DWARF)
+HANDLE_DW_AT(0x58, call_file, 3, DWARF)
+HANDLE_DW_AT(0x59, call_line, 3, DWARF)
+HANDLE_DW_AT(0x5a, description, 3, DWARF)
+HANDLE_DW_AT(0x5b, binary_scale, 3, DWARF)
+HANDLE_DW_AT(0x5c, decimal_scale, 3, DWARF)
+HANDLE_DW_AT(0x5d, small, 3, DWARF)
+HANDLE_DW_AT(0x5e, decimal_sign, 3, DWARF)
+HANDLE_DW_AT(0x5f, digit_count, 3, DWARF)
+HANDLE_DW_AT(0x60, picture_string, 3, DWARF)
+HANDLE_DW_AT(0x61, mutable, 3, DWARF)
+HANDLE_DW_AT(0x62, threads_scaled, 3, DWARF)
+HANDLE_DW_AT(0x63, explicit, 3, DWARF)
+HANDLE_DW_AT(0x64, object_pointer, 3, DWARF)
+HANDLE_DW_AT(0x65, endianity, 3, DWARF)
+HANDLE_DW_AT(0x66, elemental, 3, DWARF)
+HANDLE_DW_AT(0x67, pure, 3, DWARF)
+HANDLE_DW_AT(0x68, recursive, 3, DWARF)
// New in DWARF v4:
-HANDLE_DW_AT(0x69, signature)
-HANDLE_DW_AT(0x6a, main_subprogram)
-HANDLE_DW_AT(0x6b, data_bit_offset)
-HANDLE_DW_AT(0x6c, const_expr)
-HANDLE_DW_AT(0x6d, enum_class)
-HANDLE_DW_AT(0x6e, linkage_name)
+HANDLE_DW_AT(0x69, signature, 4, DWARF)
+HANDLE_DW_AT(0x6a, main_subprogram, 4, DWARF)
+HANDLE_DW_AT(0x6b, data_bit_offset, 4, DWARF)
+HANDLE_DW_AT(0x6c, const_expr, 4, DWARF)
+HANDLE_DW_AT(0x6d, enum_class, 4, DWARF)
+HANDLE_DW_AT(0x6e, linkage_name, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_AT(0x6f, string_length_bit_size)
-HANDLE_DW_AT(0x70, string_length_byte_size)
-HANDLE_DW_AT(0x71, rank)
-HANDLE_DW_AT(0x72, str_offsets_base)
-HANDLE_DW_AT(0x73, addr_base)
-HANDLE_DW_AT(0x74, rnglists_base)
-HANDLE_DW_AT(0x75, dwo_id) ///< Retracted from DWARF 5.
-HANDLE_DW_AT(0x76, dwo_name)
-HANDLE_DW_AT(0x77, reference)
-HANDLE_DW_AT(0x78, rvalue_reference)
-HANDLE_DW_AT(0x79, macros)
-HANDLE_DW_AT(0x7a, call_all_calls)
-HANDLE_DW_AT(0x7b, call_all_source_calls)
-HANDLE_DW_AT(0x7c, call_all_tail_calls)
-HANDLE_DW_AT(0x7d, call_return_pc)
-HANDLE_DW_AT(0x7e, call_value)
-HANDLE_DW_AT(0x7f, call_origin)
-HANDLE_DW_AT(0x80, call_parameter)
-HANDLE_DW_AT(0x81, call_pc)
-HANDLE_DW_AT(0x82, call_tail_call)
-HANDLE_DW_AT(0x83, call_target)
-HANDLE_DW_AT(0x84, call_target_clobbered)
-HANDLE_DW_AT(0x85, call_data_location)
-HANDLE_DW_AT(0x86, call_data_value)
-HANDLE_DW_AT(0x87, noreturn)
-HANDLE_DW_AT(0x88, alignment)
-HANDLE_DW_AT(0x89, export_symbols)
-HANDLE_DW_AT(0x8a, deleted)
-HANDLE_DW_AT(0x8b, defaulted)
-HANDLE_DW_AT(0x8c, loclists_base)
+HANDLE_DW_AT(0x6f, string_length_bit_size, 5, DWARF)
+HANDLE_DW_AT(0x70, string_length_byte_size, 5, DWARF)
+HANDLE_DW_AT(0x71, rank, 5, DWARF)
+HANDLE_DW_AT(0x72, str_offsets_base, 5, DWARF)
+HANDLE_DW_AT(0x73, addr_base, 5, DWARF)
+HANDLE_DW_AT(0x74, rnglists_base, 5, DWARF)
+HANDLE_DW_AT(0x75, dwo_id, 0, DWARF) ///< Retracted from DWARF v5.
+HANDLE_DW_AT(0x76, dwo_name, 5, DWARF)
+HANDLE_DW_AT(0x77, reference, 5, DWARF)
+HANDLE_DW_AT(0x78, rvalue_reference, 5, DWARF)
+HANDLE_DW_AT(0x79, macros, 5, DWARF)
+HANDLE_DW_AT(0x7a, call_all_calls, 5, DWARF)
+HANDLE_DW_AT(0x7b, call_all_source_calls, 5, DWARF)
+HANDLE_DW_AT(0x7c, call_all_tail_calls, 5, DWARF)
+HANDLE_DW_AT(0x7d, call_return_pc, 5, DWARF)
+HANDLE_DW_AT(0x7e, call_value, 5, DWARF)
+HANDLE_DW_AT(0x7f, call_origin, 5, DWARF)
+HANDLE_DW_AT(0x80, call_parameter, 5, DWARF)
+HANDLE_DW_AT(0x81, call_pc, 5, DWARF)
+HANDLE_DW_AT(0x82, call_tail_call, 5, DWARF)
+HANDLE_DW_AT(0x83, call_target, 5, DWARF)
+HANDLE_DW_AT(0x84, call_target_clobbered, 5, DWARF)
+HANDLE_DW_AT(0x85, call_data_location, 5, DWARF)
+HANDLE_DW_AT(0x86, call_data_value, 5, DWARF)
+HANDLE_DW_AT(0x87, noreturn, 5, DWARF)
+HANDLE_DW_AT(0x88, alignment, 5, DWARF)
+HANDLE_DW_AT(0x89, export_symbols, 5, DWARF)
+HANDLE_DW_AT(0x8a, deleted, 5, DWARF)
+HANDLE_DW_AT(0x8b, defaulted, 5, DWARF)
+HANDLE_DW_AT(0x8c, loclists_base, 5, DWARF)
// Vendor extensions:
-HANDLE_DW_AT(0x2002, MIPS_loop_begin)
-HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin)
-HANDLE_DW_AT(0x2004, MIPS_epilog_begin)
-HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor)
-HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth)
-HANDLE_DW_AT(0x2007, MIPS_linkage_name)
-HANDLE_DW_AT(0x2008, MIPS_stride)
-HANDLE_DW_AT(0x2009, MIPS_abstract_name)
-HANDLE_DW_AT(0x200a, MIPS_clone_origin)
-HANDLE_DW_AT(0x200b, MIPS_has_inlines)
-HANDLE_DW_AT(0x200c, MIPS_stride_byte)
-HANDLE_DW_AT(0x200d, MIPS_stride_elem)
-HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype)
-HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype)
-HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype)
+HANDLE_DW_AT(0x2002, MIPS_loop_begin, 0, MIPS)
+HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin, 0, MIPS)
+HANDLE_DW_AT(0x2004, MIPS_epilog_begin, 0, MIPS)
+HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor, 0, MIPS)
+HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth, 0, MIPS)
+HANDLE_DW_AT(0x2007, MIPS_linkage_name, 0, MIPS)
+HANDLE_DW_AT(0x2008, MIPS_stride, 0, MIPS)
+HANDLE_DW_AT(0x2009, MIPS_abstract_name, 0, MIPS)
+HANDLE_DW_AT(0x200a, MIPS_clone_origin, 0, MIPS)
+HANDLE_DW_AT(0x200b, MIPS_has_inlines, 0, MIPS)
+HANDLE_DW_AT(0x200c, MIPS_stride_byte, 0, MIPS)
+HANDLE_DW_AT(0x200d, MIPS_stride_elem, 0, MIPS)
+HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype, 0, MIPS)
+HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype, 0, MIPS)
+HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype, 0, MIPS)
// This one appears to have only been implemented by Open64 for
// fortran and may conflict with other extensions.
-HANDLE_DW_AT(0x2011, MIPS_assumed_size)
+HANDLE_DW_AT(0x2011, MIPS_assumed_size, 0, MIPS)
// GNU extensions
-HANDLE_DW_AT(0x2101, sf_names)
-HANDLE_DW_AT(0x2102, src_info)
-HANDLE_DW_AT(0x2103, mac_info)
-HANDLE_DW_AT(0x2104, src_coords)
-HANDLE_DW_AT(0x2105, body_begin)
-HANDLE_DW_AT(0x2106, body_end)
-HANDLE_DW_AT(0x2107, GNU_vector)
-HANDLE_DW_AT(0x2110, GNU_template_name)
-HANDLE_DW_AT(0x210f, GNU_odr_signature)
-HANDLE_DW_AT(0x2119, GNU_macros)
+HANDLE_DW_AT(0x2101, sf_names, 0, GNU)
+HANDLE_DW_AT(0x2102, src_info, 0, GNU)
+HANDLE_DW_AT(0x2103, mac_info, 0, GNU)
+HANDLE_DW_AT(0x2104, src_coords, 0, GNU)
+HANDLE_DW_AT(0x2105, body_begin, 0, GNU)
+HANDLE_DW_AT(0x2106, body_end, 0, GNU)
+HANDLE_DW_AT(0x2107, GNU_vector, 0, GNU)
+HANDLE_DW_AT(0x2110, GNU_template_name, 0, GNU)
+HANDLE_DW_AT(0x210f, GNU_odr_signature, 0, GNU)
+HANDLE_DW_AT(0x2119, GNU_macros, 0, GNU)
// Extensions for Fission proposal.
-HANDLE_DW_AT(0x2130, GNU_dwo_name)
-HANDLE_DW_AT(0x2131, GNU_dwo_id)
-HANDLE_DW_AT(0x2132, GNU_ranges_base)
-HANDLE_DW_AT(0x2133, GNU_addr_base)
-HANDLE_DW_AT(0x2134, GNU_pubnames)
-HANDLE_DW_AT(0x2135, GNU_pubtypes)
-HANDLE_DW_AT(0x2136, GNU_discriminator)
+HANDLE_DW_AT(0x2130, GNU_dwo_name, 0, GNU)
+HANDLE_DW_AT(0x2131, GNU_dwo_id, 0, GNU)
+HANDLE_DW_AT(0x2132, GNU_ranges_base, 0, GNU)
+HANDLE_DW_AT(0x2133, GNU_addr_base, 0, GNU)
+HANDLE_DW_AT(0x2134, GNU_pubnames, 0, GNU)
+HANDLE_DW_AT(0x2135, GNU_pubtypes, 0, GNU)
+HANDLE_DW_AT(0x2136, GNU_discriminator, 0, GNU)
// Borland extensions.
-HANDLE_DW_AT(0x3b11, BORLAND_property_read)
-HANDLE_DW_AT(0x3b12, BORLAND_property_write)
-HANDLE_DW_AT(0x3b13, BORLAND_property_implements)
-HANDLE_DW_AT(0x3b14, BORLAND_property_index)
-HANDLE_DW_AT(0x3b15, BORLAND_property_default)
-HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit)
-HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class)
-HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record)
-HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass)
-HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor)
-HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor)
-HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method)
-HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface)
-HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI)
-HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return)
-HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr)
-HANDLE_DW_AT(0x3b31, BORLAND_closure)
+HANDLE_DW_AT(0x3b11, BORLAND_property_read, 0, BORLAND)
+HANDLE_DW_AT(0x3b12, BORLAND_property_write, 0, BORLAND)
+HANDLE_DW_AT(0x3b13, BORLAND_property_implements, 0, BORLAND)
+HANDLE_DW_AT(0x3b14, BORLAND_property_index, 0, BORLAND)
+HANDLE_DW_AT(0x3b15, BORLAND_property_default, 0, BORLAND)
+HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit, 0, BORLAND)
+HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class, 0, BORLAND)
+HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record, 0, BORLAND)
+HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass, 0, BORLAND)
+HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor, 0, BORLAND)
+HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor, 0, BORLAND)
+HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method, 0, BORLAND)
+HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface, 0, BORLAND)
+HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI, 0, BORLAND)
+HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return, 0, BORLAND)
+HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr, 0, BORLAND)
+HANDLE_DW_AT(0x3b31, BORLAND_closure, 0, BORLAND)
// LLVM project extensions.
-HANDLE_DW_AT(0x3e00, LLVM_include_path)
-HANDLE_DW_AT(0x3e01, LLVM_config_macros)
-HANDLE_DW_AT(0x3e02, LLVM_isysroot)
+HANDLE_DW_AT(0x3e00, LLVM_include_path, 0, LLVM)
+HANDLE_DW_AT(0x3e01, LLVM_config_macros, 0, LLVM)
+HANDLE_DW_AT(0x3e02, LLVM_isysroot, 0, LLVM)
// Apple extensions.
-HANDLE_DW_AT(0x3fe1, APPLE_optimized)
-HANDLE_DW_AT(0x3fe2, APPLE_flags)
-HANDLE_DW_AT(0x3fe3, APPLE_isa)
-HANDLE_DW_AT(0x3fe4, APPLE_block)
-HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers)
-HANDLE_DW_AT(0x3fe6, APPLE_runtime_class)
-HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr)
-HANDLE_DW_AT(0x3fe8, APPLE_property_name)
-HANDLE_DW_AT(0x3fe9, APPLE_property_getter)
-HANDLE_DW_AT(0x3fea, APPLE_property_setter)
-HANDLE_DW_AT(0x3feb, APPLE_property_attribute)
-HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type)
-HANDLE_DW_AT(0x3fed, APPLE_property)
+HANDLE_DW_AT(0x3fe1, APPLE_optimized, 0, APPLE)
+HANDLE_DW_AT(0x3fe2, APPLE_flags, 0, APPLE)
+HANDLE_DW_AT(0x3fe3, APPLE_isa, 0, APPLE)
+HANDLE_DW_AT(0x3fe4, APPLE_block, 0, APPLE)
+HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers, 0, APPLE)
+HANDLE_DW_AT(0x3fe6, APPLE_runtime_class, 0, APPLE)
+HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr, 0, APPLE)
+HANDLE_DW_AT(0x3fe8, APPLE_property_name, 0, APPLE)
+HANDLE_DW_AT(0x3fe9, APPLE_property_getter, 0, APPLE)
+HANDLE_DW_AT(0x3fea, APPLE_property_setter, 0, APPLE)
+HANDLE_DW_AT(0x3feb, APPLE_property_attribute, 0, APPLE)
+HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type, 0, APPLE)
+HANDLE_DW_AT(0x3fed, APPLE_property, 0, APPLE)
// Attribute form encodings.
-HANDLE_DW_FORM(0x01, addr)
-HANDLE_DW_FORM(0x03, block2)
-HANDLE_DW_FORM(0x04, block4)
-HANDLE_DW_FORM(0x05, data2)
-HANDLE_DW_FORM(0x06, data4)
-HANDLE_DW_FORM(0x07, data8)
-HANDLE_DW_FORM(0x08, string)
-HANDLE_DW_FORM(0x09, block)
-HANDLE_DW_FORM(0x0a, block1)
-HANDLE_DW_FORM(0x0b, data1)
-HANDLE_DW_FORM(0x0c, flag)
-HANDLE_DW_FORM(0x0d, sdata)
-HANDLE_DW_FORM(0x0e, strp)
-HANDLE_DW_FORM(0x0f, udata)
-HANDLE_DW_FORM(0x10, ref_addr)
-HANDLE_DW_FORM(0x11, ref1)
-HANDLE_DW_FORM(0x12, ref2)
-HANDLE_DW_FORM(0x13, ref4)
-HANDLE_DW_FORM(0x14, ref8)
-HANDLE_DW_FORM(0x15, ref_udata)
-HANDLE_DW_FORM(0x16, indirect)
+HANDLE_DW_FORM(0x01, addr, 2, DWARF)
+HANDLE_DW_FORM(0x03, block2, 2, DWARF)
+HANDLE_DW_FORM(0x04, block4, 2, DWARF)
+HANDLE_DW_FORM(0x05, data2, 2, DWARF)
+HANDLE_DW_FORM(0x06, data4, 2, DWARF)
+HANDLE_DW_FORM(0x07, data8, 2, DWARF)
+HANDLE_DW_FORM(0x08, string, 2, DWARF)
+HANDLE_DW_FORM(0x09, block, 2, DWARF)
+HANDLE_DW_FORM(0x0a, block1, 2, DWARF)
+HANDLE_DW_FORM(0x0b, data1, 2, DWARF)
+HANDLE_DW_FORM(0x0c, flag, 2, DWARF)
+HANDLE_DW_FORM(0x0d, sdata, 2, DWARF)
+HANDLE_DW_FORM(0x0e, strp, 2, DWARF)
+HANDLE_DW_FORM(0x0f, udata, 2, DWARF)
+HANDLE_DW_FORM(0x10, ref_addr, 2, DWARF)
+HANDLE_DW_FORM(0x11, ref1, 2, DWARF)
+HANDLE_DW_FORM(0x12, ref2, 2, DWARF)
+HANDLE_DW_FORM(0x13, ref4, 2, DWARF)
+HANDLE_DW_FORM(0x14, ref8, 2, DWARF)
+HANDLE_DW_FORM(0x15, ref_udata, 2, DWARF)
+HANDLE_DW_FORM(0x16, indirect, 2, DWARF)
// New in DWARF v4:
-HANDLE_DW_FORM(0x17, sec_offset)
-HANDLE_DW_FORM(0x18, exprloc)
-HANDLE_DW_FORM(0x19, flag_present)
+HANDLE_DW_FORM(0x17, sec_offset, 4, DWARF)
+HANDLE_DW_FORM(0x18, exprloc, 4, DWARF)
+HANDLE_DW_FORM(0x19, flag_present, 4, DWARF)
// This was defined out of sequence.
-HANDLE_DW_FORM(0x20, ref_sig8)
+HANDLE_DW_FORM(0x20, ref_sig8, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_FORM(0x1a, strx)
-HANDLE_DW_FORM(0x1b, addrx)
-HANDLE_DW_FORM(0x1c, ref_sup4)
-HANDLE_DW_FORM(0x1d, strp_sup)
-HANDLE_DW_FORM(0x1e, data16)
-HANDLE_DW_FORM(0x1f, line_strp)
-HANDLE_DW_FORM(0x21, implicit_const)
-HANDLE_DW_FORM(0x22, loclistx)
-HANDLE_DW_FORM(0x23, rnglistx)
-HANDLE_DW_FORM(0x24, ref_sup8)
-HANDLE_DW_FORM(0x25, strx1)
-HANDLE_DW_FORM(0x26, strx2)
-HANDLE_DW_FORM(0x27, strx3)
-HANDLE_DW_FORM(0x28, strx4)
-HANDLE_DW_FORM(0x29, addrx1)
-HANDLE_DW_FORM(0x2a, addrx2)
-HANDLE_DW_FORM(0x2b, addrx3)
-HANDLE_DW_FORM(0x2c, addrx4)
+HANDLE_DW_FORM(0x1a, strx, 5, DWARF)
+HANDLE_DW_FORM(0x1b, addrx, 5, DWARF)
+HANDLE_DW_FORM(0x1c, ref_sup4, 5, DWARF)
+HANDLE_DW_FORM(0x1d, strp_sup, 5, DWARF)
+HANDLE_DW_FORM(0x1e, data16, 5, DWARF)
+HANDLE_DW_FORM(0x1f, line_strp, 5, DWARF)
+HANDLE_DW_FORM(0x21, implicit_const, 5, DWARF)
+HANDLE_DW_FORM(0x22, loclistx, 5, DWARF)
+HANDLE_DW_FORM(0x23, rnglistx, 5, DWARF)
+HANDLE_DW_FORM(0x24, ref_sup8, 5, DWARF)
+HANDLE_DW_FORM(0x25, strx1, 5, DWARF)
+HANDLE_DW_FORM(0x26, strx2, 5, DWARF)
+HANDLE_DW_FORM(0x27, strx3, 5, DWARF)
+HANDLE_DW_FORM(0x28, strx4, 5, DWARF)
+HANDLE_DW_FORM(0x29, addrx1, 5, DWARF)
+HANDLE_DW_FORM(0x2a, addrx2, 5, DWARF)
+HANDLE_DW_FORM(0x2b, addrx3, 5, DWARF)
+HANDLE_DW_FORM(0x2c, addrx4, 5, DWARF)
// Extensions for Fission proposal
-HANDLE_DW_FORM(0x1f01, GNU_addr_index)
-HANDLE_DW_FORM(0x1f02, GNU_str_index)
+HANDLE_DW_FORM(0x1f01, GNU_addr_index, 0, GNU)
+HANDLE_DW_FORM(0x1f02, GNU_str_index, 0, GNU)
// Alternate debug sections proposal (output of "dwz" tool).
-HANDLE_DW_FORM(0x1f20, GNU_ref_alt)
-HANDLE_DW_FORM(0x1f21, GNU_strp_alt)
+HANDLE_DW_FORM(0x1f20, GNU_ref_alt, 0, GNU)
+HANDLE_DW_FORM(0x1f21, GNU_strp_alt, 0, GNU)
// DWARF Expression operators.
-HANDLE_DW_OP(0x03, addr)
-HANDLE_DW_OP(0x06, deref)
-HANDLE_DW_OP(0x08, const1u)
-HANDLE_DW_OP(0x09, const1s)
-HANDLE_DW_OP(0x0a, const2u)
-HANDLE_DW_OP(0x0b, const2s)
-HANDLE_DW_OP(0x0c, const4u)
-HANDLE_DW_OP(0x0d, const4s)
-HANDLE_DW_OP(0x0e, const8u)
-HANDLE_DW_OP(0x0f, const8s)
-HANDLE_DW_OP(0x10, constu)
-HANDLE_DW_OP(0x11, consts)
-HANDLE_DW_OP(0x12, dup)
-HANDLE_DW_OP(0x13, drop)
-HANDLE_DW_OP(0x14, over)
-HANDLE_DW_OP(0x15, pick)
-HANDLE_DW_OP(0x16, swap)
-HANDLE_DW_OP(0x17, rot)
-HANDLE_DW_OP(0x18, xderef)
-HANDLE_DW_OP(0x19, abs)
-HANDLE_DW_OP(0x1a, and)
-HANDLE_DW_OP(0x1b, div)
-HANDLE_DW_OP(0x1c, minus)
-HANDLE_DW_OP(0x1d, mod)
-HANDLE_DW_OP(0x1e, mul)
-HANDLE_DW_OP(0x1f, neg)
-HANDLE_DW_OP(0x20, not)
-HANDLE_DW_OP(0x21, or)
-HANDLE_DW_OP(0x22, plus)
-HANDLE_DW_OP(0x23, plus_uconst)
-HANDLE_DW_OP(0x24, shl)
-HANDLE_DW_OP(0x25, shr)
-HANDLE_DW_OP(0x26, shra)
-HANDLE_DW_OP(0x27, xor)
-HANDLE_DW_OP(0x28, bra)
-HANDLE_DW_OP(0x29, eq)
-HANDLE_DW_OP(0x2a, ge)
-HANDLE_DW_OP(0x2b, gt)
-HANDLE_DW_OP(0x2c, le)
-HANDLE_DW_OP(0x2d, lt)
-HANDLE_DW_OP(0x2e, ne)
-HANDLE_DW_OP(0x2f, skip)
-HANDLE_DW_OP(0x30, lit0)
-HANDLE_DW_OP(0x31, lit1)
-HANDLE_DW_OP(0x32, lit2)
-HANDLE_DW_OP(0x33, lit3)
-HANDLE_DW_OP(0x34, lit4)
-HANDLE_DW_OP(0x35, lit5)
-HANDLE_DW_OP(0x36, lit6)
-HANDLE_DW_OP(0x37, lit7)
-HANDLE_DW_OP(0x38, lit8)
-HANDLE_DW_OP(0x39, lit9)
-HANDLE_DW_OP(0x3a, lit10)
-HANDLE_DW_OP(0x3b, lit11)
-HANDLE_DW_OP(0x3c, lit12)
-HANDLE_DW_OP(0x3d, lit13)
-HANDLE_DW_OP(0x3e, lit14)
-HANDLE_DW_OP(0x3f, lit15)
-HANDLE_DW_OP(0x40, lit16)
-HANDLE_DW_OP(0x41, lit17)
-HANDLE_DW_OP(0x42, lit18)
-HANDLE_DW_OP(0x43, lit19)
-HANDLE_DW_OP(0x44, lit20)
-HANDLE_DW_OP(0x45, lit21)
-HANDLE_DW_OP(0x46, lit22)
-HANDLE_DW_OP(0x47, lit23)
-HANDLE_DW_OP(0x48, lit24)
-HANDLE_DW_OP(0x49, lit25)
-HANDLE_DW_OP(0x4a, lit26)
-HANDLE_DW_OP(0x4b, lit27)
-HANDLE_DW_OP(0x4c, lit28)
-HANDLE_DW_OP(0x4d, lit29)
-HANDLE_DW_OP(0x4e, lit30)
-HANDLE_DW_OP(0x4f, lit31)
-HANDLE_DW_OP(0x50, reg0)
-HANDLE_DW_OP(0x51, reg1)
-HANDLE_DW_OP(0x52, reg2)
-HANDLE_DW_OP(0x53, reg3)
-HANDLE_DW_OP(0x54, reg4)
-HANDLE_DW_OP(0x55, reg5)
-HANDLE_DW_OP(0x56, reg6)
-HANDLE_DW_OP(0x57, reg7)
-HANDLE_DW_OP(0x58, reg8)
-HANDLE_DW_OP(0x59, reg9)
-HANDLE_DW_OP(0x5a, reg10)
-HANDLE_DW_OP(0x5b, reg11)
-HANDLE_DW_OP(0x5c, reg12)
-HANDLE_DW_OP(0x5d, reg13)
-HANDLE_DW_OP(0x5e, reg14)
-HANDLE_DW_OP(0x5f, reg15)
-HANDLE_DW_OP(0x60, reg16)
-HANDLE_DW_OP(0x61, reg17)
-HANDLE_DW_OP(0x62, reg18)
-HANDLE_DW_OP(0x63, reg19)
-HANDLE_DW_OP(0x64, reg20)
-HANDLE_DW_OP(0x65, reg21)
-HANDLE_DW_OP(0x66, reg22)
-HANDLE_DW_OP(0x67, reg23)
-HANDLE_DW_OP(0x68, reg24)
-HANDLE_DW_OP(0x69, reg25)
-HANDLE_DW_OP(0x6a, reg26)
-HANDLE_DW_OP(0x6b, reg27)
-HANDLE_DW_OP(0x6c, reg28)
-HANDLE_DW_OP(0x6d, reg29)
-HANDLE_DW_OP(0x6e, reg30)
-HANDLE_DW_OP(0x6f, reg31)
-HANDLE_DW_OP(0x70, breg0)
-HANDLE_DW_OP(0x71, breg1)
-HANDLE_DW_OP(0x72, breg2)
-HANDLE_DW_OP(0x73, breg3)
-HANDLE_DW_OP(0x74, breg4)
-HANDLE_DW_OP(0x75, breg5)
-HANDLE_DW_OP(0x76, breg6)
-HANDLE_DW_OP(0x77, breg7)
-HANDLE_DW_OP(0x78, breg8)
-HANDLE_DW_OP(0x79, breg9)
-HANDLE_DW_OP(0x7a, breg10)
-HANDLE_DW_OP(0x7b, breg11)
-HANDLE_DW_OP(0x7c, breg12)
-HANDLE_DW_OP(0x7d, breg13)
-HANDLE_DW_OP(0x7e, breg14)
-HANDLE_DW_OP(0x7f, breg15)
-HANDLE_DW_OP(0x80, breg16)
-HANDLE_DW_OP(0x81, breg17)
-HANDLE_DW_OP(0x82, breg18)
-HANDLE_DW_OP(0x83, breg19)
-HANDLE_DW_OP(0x84, breg20)
-HANDLE_DW_OP(0x85, breg21)
-HANDLE_DW_OP(0x86, breg22)
-HANDLE_DW_OP(0x87, breg23)
-HANDLE_DW_OP(0x88, breg24)
-HANDLE_DW_OP(0x89, breg25)
-HANDLE_DW_OP(0x8a, breg26)
-HANDLE_DW_OP(0x8b, breg27)
-HANDLE_DW_OP(0x8c, breg28)
-HANDLE_DW_OP(0x8d, breg29)
-HANDLE_DW_OP(0x8e, breg30)
-HANDLE_DW_OP(0x8f, breg31)
-HANDLE_DW_OP(0x90, regx)
-HANDLE_DW_OP(0x91, fbreg)
-HANDLE_DW_OP(0x92, bregx)
-HANDLE_DW_OP(0x93, piece)
-HANDLE_DW_OP(0x94, deref_size)
-HANDLE_DW_OP(0x95, xderef_size)
-HANDLE_DW_OP(0x96, nop)
+HANDLE_DW_OP(0x03, addr, 2, DWARF)
+HANDLE_DW_OP(0x06, deref, 2, DWARF)
+HANDLE_DW_OP(0x08, const1u, 2, DWARF)
+HANDLE_DW_OP(0x09, const1s, 2, DWARF)
+HANDLE_DW_OP(0x0a, const2u, 2, DWARF)
+HANDLE_DW_OP(0x0b, const2s, 2, DWARF)
+HANDLE_DW_OP(0x0c, const4u, 2, DWARF)
+HANDLE_DW_OP(0x0d, const4s, 2, DWARF)
+HANDLE_DW_OP(0x0e, const8u, 2, DWARF)
+HANDLE_DW_OP(0x0f, const8s, 2, DWARF)
+HANDLE_DW_OP(0x10, constu, 2, DWARF)
+HANDLE_DW_OP(0x11, consts, 2, DWARF)
+HANDLE_DW_OP(0x12, dup, 2, DWARF)
+HANDLE_DW_OP(0x13, drop, 2, DWARF)
+HANDLE_DW_OP(0x14, over, 2, DWARF)
+HANDLE_DW_OP(0x15, pick, 2, DWARF)
+HANDLE_DW_OP(0x16, swap, 2, DWARF)
+HANDLE_DW_OP(0x17, rot, 2, DWARF)
+HANDLE_DW_OP(0x18, xderef, 2, DWARF)
+HANDLE_DW_OP(0x19, abs, 2, DWARF)
+HANDLE_DW_OP(0x1a, and, 2, DWARF)
+HANDLE_DW_OP(0x1b, div, 2, DWARF)
+HANDLE_DW_OP(0x1c, minus, 2, DWARF)
+HANDLE_DW_OP(0x1d, mod, 2, DWARF)
+HANDLE_DW_OP(0x1e, mul, 2, DWARF)
+HANDLE_DW_OP(0x1f, neg, 2, DWARF)
+HANDLE_DW_OP(0x20, not, 2, DWARF)
+HANDLE_DW_OP(0x21, or, 2, DWARF)
+HANDLE_DW_OP(0x22, plus, 2, DWARF)
+HANDLE_DW_OP(0x23, plus_uconst, 2, DWARF)
+HANDLE_DW_OP(0x24, shl, 2, DWARF)
+HANDLE_DW_OP(0x25, shr, 2, DWARF)
+HANDLE_DW_OP(0x26, shra, 2, DWARF)
+HANDLE_DW_OP(0x27, xor, 2, DWARF)
+HANDLE_DW_OP(0x28, bra, 2, DWARF)
+HANDLE_DW_OP(0x29, eq, 2, DWARF)
+HANDLE_DW_OP(0x2a, ge, 2, DWARF)
+HANDLE_DW_OP(0x2b, gt, 2, DWARF)
+HANDLE_DW_OP(0x2c, le, 2, DWARF)
+HANDLE_DW_OP(0x2d, lt, 2, DWARF)
+HANDLE_DW_OP(0x2e, ne, 2, DWARF)
+HANDLE_DW_OP(0x2f, skip, 2, DWARF)
+HANDLE_DW_OP(0x30, lit0, 2, DWARF)
+HANDLE_DW_OP(0x31, lit1, 2, DWARF)
+HANDLE_DW_OP(0x32, lit2, 2, DWARF)
+HANDLE_DW_OP(0x33, lit3, 2, DWARF)
+HANDLE_DW_OP(0x34, lit4, 2, DWARF)
+HANDLE_DW_OP(0x35, lit5, 2, DWARF)
+HANDLE_DW_OP(0x36, lit6, 2, DWARF)
+HANDLE_DW_OP(0x37, lit7, 2, DWARF)
+HANDLE_DW_OP(0x38, lit8, 2, DWARF)
+HANDLE_DW_OP(0x39, lit9, 2, DWARF)
+HANDLE_DW_OP(0x3a, lit10, 2, DWARF)
+HANDLE_DW_OP(0x3b, lit11, 2, DWARF)
+HANDLE_DW_OP(0x3c, lit12, 2, DWARF)
+HANDLE_DW_OP(0x3d, lit13, 2, DWARF)
+HANDLE_DW_OP(0x3e, lit14, 2, DWARF)
+HANDLE_DW_OP(0x3f, lit15, 2, DWARF)
+HANDLE_DW_OP(0x40, lit16, 2, DWARF)
+HANDLE_DW_OP(0x41, lit17, 2, DWARF)
+HANDLE_DW_OP(0x42, lit18, 2, DWARF)
+HANDLE_DW_OP(0x43, lit19, 2, DWARF)
+HANDLE_DW_OP(0x44, lit20, 2, DWARF)
+HANDLE_DW_OP(0x45, lit21, 2, DWARF)
+HANDLE_DW_OP(0x46, lit22, 2, DWARF)
+HANDLE_DW_OP(0x47, lit23, 2, DWARF)
+HANDLE_DW_OP(0x48, lit24, 2, DWARF)
+HANDLE_DW_OP(0x49, lit25, 2, DWARF)
+HANDLE_DW_OP(0x4a, lit26, 2, DWARF)
+HANDLE_DW_OP(0x4b, lit27, 2, DWARF)
+HANDLE_DW_OP(0x4c, lit28, 2, DWARF)
+HANDLE_DW_OP(0x4d, lit29, 2, DWARF)
+HANDLE_DW_OP(0x4e, lit30, 2, DWARF)
+HANDLE_DW_OP(0x4f, lit31, 2, DWARF)
+HANDLE_DW_OP(0x50, reg0, 2, DWARF)
+HANDLE_DW_OP(0x51, reg1, 2, DWARF)
+HANDLE_DW_OP(0x52, reg2, 2, DWARF)
+HANDLE_DW_OP(0x53, reg3, 2, DWARF)
+HANDLE_DW_OP(0x54, reg4, 2, DWARF)
+HANDLE_DW_OP(0x55, reg5, 2, DWARF)
+HANDLE_DW_OP(0x56, reg6, 2, DWARF)
+HANDLE_DW_OP(0x57, reg7, 2, DWARF)
+HANDLE_DW_OP(0x58, reg8, 2, DWARF)
+HANDLE_DW_OP(0x59, reg9, 2, DWARF)
+HANDLE_DW_OP(0x5a, reg10, 2, DWARF)
+HANDLE_DW_OP(0x5b, reg11, 2, DWARF)
+HANDLE_DW_OP(0x5c, reg12, 2, DWARF)
+HANDLE_DW_OP(0x5d, reg13, 2, DWARF)
+HANDLE_DW_OP(0x5e, reg14, 2, DWARF)
+HANDLE_DW_OP(0x5f, reg15, 2, DWARF)
+HANDLE_DW_OP(0x60, reg16, 2, DWARF)
+HANDLE_DW_OP(0x61, reg17, 2, DWARF)
+HANDLE_DW_OP(0x62, reg18, 2, DWARF)
+HANDLE_DW_OP(0x63, reg19, 2, DWARF)
+HANDLE_DW_OP(0x64, reg20, 2, DWARF)
+HANDLE_DW_OP(0x65, reg21, 2, DWARF)
+HANDLE_DW_OP(0x66, reg22, 2, DWARF)
+HANDLE_DW_OP(0x67, reg23, 2, DWARF)
+HANDLE_DW_OP(0x68, reg24, 2, DWARF)
+HANDLE_DW_OP(0x69, reg25, 2, DWARF)
+HANDLE_DW_OP(0x6a, reg26, 2, DWARF)
+HANDLE_DW_OP(0x6b, reg27, 2, DWARF)
+HANDLE_DW_OP(0x6c, reg28, 2, DWARF)
+HANDLE_DW_OP(0x6d, reg29, 2, DWARF)
+HANDLE_DW_OP(0x6e, reg30, 2, DWARF)
+HANDLE_DW_OP(0x6f, reg31, 2, DWARF)
+HANDLE_DW_OP(0x70, breg0, 2, DWARF)
+HANDLE_DW_OP(0x71, breg1, 2, DWARF)
+HANDLE_DW_OP(0x72, breg2, 2, DWARF)
+HANDLE_DW_OP(0x73, breg3, 2, DWARF)
+HANDLE_DW_OP(0x74, breg4, 2, DWARF)
+HANDLE_DW_OP(0x75, breg5, 2, DWARF)
+HANDLE_DW_OP(0x76, breg6, 2, DWARF)
+HANDLE_DW_OP(0x77, breg7, 2, DWARF)
+HANDLE_DW_OP(0x78, breg8, 2, DWARF)
+HANDLE_DW_OP(0x79, breg9, 2, DWARF)
+HANDLE_DW_OP(0x7a, breg10, 2, DWARF)
+HANDLE_DW_OP(0x7b, breg11, 2, DWARF)
+HANDLE_DW_OP(0x7c, breg12, 2, DWARF)
+HANDLE_DW_OP(0x7d, breg13, 2, DWARF)
+HANDLE_DW_OP(0x7e, breg14, 2, DWARF)
+HANDLE_DW_OP(0x7f, breg15, 2, DWARF)
+HANDLE_DW_OP(0x80, breg16, 2, DWARF)
+HANDLE_DW_OP(0x81, breg17, 2, DWARF)
+HANDLE_DW_OP(0x82, breg18, 2, DWARF)
+HANDLE_DW_OP(0x83, breg19, 2, DWARF)
+HANDLE_DW_OP(0x84, breg20, 2, DWARF)
+HANDLE_DW_OP(0x85, breg21, 2, DWARF)
+HANDLE_DW_OP(0x86, breg22, 2, DWARF)
+HANDLE_DW_OP(0x87, breg23, 2, DWARF)
+HANDLE_DW_OP(0x88, breg24, 2, DWARF)
+HANDLE_DW_OP(0x89, breg25, 2, DWARF)
+HANDLE_DW_OP(0x8a, breg26, 2, DWARF)
+HANDLE_DW_OP(0x8b, breg27, 2, DWARF)
+HANDLE_DW_OP(0x8c, breg28, 2, DWARF)
+HANDLE_DW_OP(0x8d, breg29, 2, DWARF)
+HANDLE_DW_OP(0x8e, breg30, 2, DWARF)
+HANDLE_DW_OP(0x8f, breg31, 2, DWARF)
+HANDLE_DW_OP(0x90, regx, 2, DWARF)
+HANDLE_DW_OP(0x91, fbreg, 2, DWARF)
+HANDLE_DW_OP(0x92, bregx, 2, DWARF)
+HANDLE_DW_OP(0x93, piece, 2, DWARF)
+HANDLE_DW_OP(0x94, deref_size, 2, DWARF)
+HANDLE_DW_OP(0x95, xderef_size, 2, DWARF)
+HANDLE_DW_OP(0x96, nop, 2, DWARF)
// New in DWARF v3:
-HANDLE_DW_OP(0x97, push_object_address)
-HANDLE_DW_OP(0x98, call2)
-HANDLE_DW_OP(0x99, call4)
-HANDLE_DW_OP(0x9a, call_ref)
-HANDLE_DW_OP(0x9b, form_tls_address)
-HANDLE_DW_OP(0x9c, call_frame_cfa)
-HANDLE_DW_OP(0x9d, bit_piece)
+HANDLE_DW_OP(0x97, push_object_address, 3, DWARF)
+HANDLE_DW_OP(0x98, call2, 3, DWARF)
+HANDLE_DW_OP(0x99, call4, 3, DWARF)
+HANDLE_DW_OP(0x9a, call_ref, 3, DWARF)
+HANDLE_DW_OP(0x9b, form_tls_address, 3, DWARF)
+HANDLE_DW_OP(0x9c, call_frame_cfa, 3, DWARF)
+HANDLE_DW_OP(0x9d, bit_piece, 3, DWARF)
// New in DWARF v4:
-HANDLE_DW_OP(0x9e, implicit_value)
-HANDLE_DW_OP(0x9f, stack_value)
+HANDLE_DW_OP(0x9e, implicit_value, 4, DWARF)
+HANDLE_DW_OP(0x9f, stack_value, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_OP(0xa0, implicit_pointer)
-HANDLE_DW_OP(0xa1, addrx)
-HANDLE_DW_OP(0xa2, constx)
-HANDLE_DW_OP(0xa3, entry_value)
-HANDLE_DW_OP(0xa4, const_type)
-HANDLE_DW_OP(0xa5, regval_type)
-HANDLE_DW_OP(0xa6, deref_type)
-HANDLE_DW_OP(0xa7, xderef_type)
-HANDLE_DW_OP(0xa8, convert)
-HANDLE_DW_OP(0xa9, reinterpret)
+HANDLE_DW_OP(0xa0, implicit_pointer, 5, DWARF)
+HANDLE_DW_OP(0xa1, addrx, 5, DWARF)
+HANDLE_DW_OP(0xa2, constx, 5, DWARF)
+HANDLE_DW_OP(0xa3, entry_value, 5, DWARF)
+HANDLE_DW_OP(0xa4, const_type, 5, DWARF)
+HANDLE_DW_OP(0xa5, regval_type, 5, DWARF)
+HANDLE_DW_OP(0xa6, deref_type, 5, DWARF)
+HANDLE_DW_OP(0xa7, xderef_type, 5, DWARF)
+HANDLE_DW_OP(0xa8, convert, 5, DWARF)
+HANDLE_DW_OP(0xa9, reinterpret, 5, DWARF)
// Vendor extensions:
// Extensions for GNU-style thread-local storage.
-HANDLE_DW_OP(0xe0, GNU_push_tls_address)
+HANDLE_DW_OP(0xe0, GNU_push_tls_address, 0, GNU)
// Extensions for Fission proposal.
-HANDLE_DW_OP(0xfb, GNU_addr_index)
-HANDLE_DW_OP(0xfc, GNU_const_index)
+HANDLE_DW_OP(0xfb, GNU_addr_index, 0, GNU)
+HANDLE_DW_OP(0xfc, GNU_const_index, 0, GNU)
// DWARF languages.
-HANDLE_DW_LANG(0x0001, C89)
-HANDLE_DW_LANG(0x0002, C)
-HANDLE_DW_LANG(0x0003, Ada83)
-HANDLE_DW_LANG(0x0004, C_plus_plus)
-HANDLE_DW_LANG(0x0005, Cobol74)
-HANDLE_DW_LANG(0x0006, Cobol85)
-HANDLE_DW_LANG(0x0007, Fortran77)
-HANDLE_DW_LANG(0x0008, Fortran90)
-HANDLE_DW_LANG(0x0009, Pascal83)
-HANDLE_DW_LANG(0x000a, Modula2)
+HANDLE_DW_LANG(0x0001, C89, 2, DWARF)
+HANDLE_DW_LANG(0x0002, C, 2, DWARF)
+HANDLE_DW_LANG(0x0003, Ada83, 2, DWARF)
+HANDLE_DW_LANG(0x0004, C_plus_plus, 2, DWARF)
+HANDLE_DW_LANG(0x0005, Cobol74, 2, DWARF)
+HANDLE_DW_LANG(0x0006, Cobol85, 2, DWARF)
+HANDLE_DW_LANG(0x0007, Fortran77, 2, DWARF)
+HANDLE_DW_LANG(0x0008, Fortran90, 2, DWARF)
+HANDLE_DW_LANG(0x0009, Pascal83, 2, DWARF)
+HANDLE_DW_LANG(0x000a, Modula2, 2, DWARF)
// New in DWARF v3:
-HANDLE_DW_LANG(0x000b, Java)
-HANDLE_DW_LANG(0x000c, C99)
-HANDLE_DW_LANG(0x000d, Ada95)
-HANDLE_DW_LANG(0x000e, Fortran95)
-HANDLE_DW_LANG(0x000f, PLI)
-HANDLE_DW_LANG(0x0010, ObjC)
-HANDLE_DW_LANG(0x0011, ObjC_plus_plus)
-HANDLE_DW_LANG(0x0012, UPC)
-HANDLE_DW_LANG(0x0013, D)
+HANDLE_DW_LANG(0x000b, Java, 3, DWARF)
+HANDLE_DW_LANG(0x000c, C99, 3, DWARF)
+HANDLE_DW_LANG(0x000d, Ada95, 3, DWARF)
+HANDLE_DW_LANG(0x000e, Fortran95, 3, DWARF)
+HANDLE_DW_LANG(0x000f, PLI, 3, DWARF)
+HANDLE_DW_LANG(0x0010, ObjC, 3, DWARF)
+HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 3, DWARF)
+HANDLE_DW_LANG(0x0012, UPC, 3, DWARF)
+HANDLE_DW_LANG(0x0013, D, 3, DWARF)
// New in DWARF v4:
-HANDLE_DW_LANG(0x0014, Python)
+HANDLE_DW_LANG(0x0014, Python, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_LANG(0x0015, OpenCL)
-HANDLE_DW_LANG(0x0016, Go)
-HANDLE_DW_LANG(0x0017, Modula3)
-HANDLE_DW_LANG(0x0018, Haskell)
-HANDLE_DW_LANG(0x0019, C_plus_plus_03)
-HANDLE_DW_LANG(0x001a, C_plus_plus_11)
-HANDLE_DW_LANG(0x001b, OCaml)
-HANDLE_DW_LANG(0x001c, Rust)
-HANDLE_DW_LANG(0x001d, C11)
-HANDLE_DW_LANG(0x001e, Swift)
-HANDLE_DW_LANG(0x001f, Julia)
-HANDLE_DW_LANG(0x0020, Dylan)
-HANDLE_DW_LANG(0x0021, C_plus_plus_14)
-HANDLE_DW_LANG(0x0022, Fortran03)
-HANDLE_DW_LANG(0x0023, Fortran08)
-HANDLE_DW_LANG(0x0024, RenderScript)
-HANDLE_DW_LANG(0x0025, BLISS)
+HANDLE_DW_LANG(0x0015, OpenCL, 5, DWARF)
+HANDLE_DW_LANG(0x0016, Go, 5, DWARF)
+HANDLE_DW_LANG(0x0017, Modula3, 5, DWARF)
+HANDLE_DW_LANG(0x0018, Haskell, 5, DWARF)
+HANDLE_DW_LANG(0x0019, C_plus_plus_03, 5, DWARF)
+HANDLE_DW_LANG(0x001a, C_plus_plus_11, 5, DWARF)
+HANDLE_DW_LANG(0x001b, OCaml, 5, DWARF)
+HANDLE_DW_LANG(0x001c, Rust, 5, DWARF)
+HANDLE_DW_LANG(0x001d, C11, 5, DWARF)
+HANDLE_DW_LANG(0x001e, Swift, 5, DWARF)
+HANDLE_DW_LANG(0x001f, Julia, 5, DWARF)
+HANDLE_DW_LANG(0x0020, Dylan, 5, DWARF)
+HANDLE_DW_LANG(0x0021, C_plus_plus_14, 5, DWARF)
+HANDLE_DW_LANG(0x0022, Fortran03, 5, DWARF)
+HANDLE_DW_LANG(0x0023, Fortran08, 5, DWARF)
+HANDLE_DW_LANG(0x0024, RenderScript, 5, DWARF)
+HANDLE_DW_LANG(0x0025, BLISS, 5, DWARF)
// Vendor extensions:
-HANDLE_DW_LANG(0x8001, Mips_Assembler)
-HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript)
-HANDLE_DW_LANG(0xb000, BORLAND_Delphi)
+HANDLE_DW_LANG(0x8001, Mips_Assembler, 0, MIPS)
+HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, GOOGLE)
+HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, BORLAND)
// DWARF attribute type encodings.
-HANDLE_DW_ATE(0x01, address)
-HANDLE_DW_ATE(0x02, boolean)
-HANDLE_DW_ATE(0x03, complex_float)
-HANDLE_DW_ATE(0x04, float)
-HANDLE_DW_ATE(0x05, signed)
-HANDLE_DW_ATE(0x06, signed_char)
-HANDLE_DW_ATE(0x07, unsigned)
-HANDLE_DW_ATE(0x08, unsigned_char)
+HANDLE_DW_ATE(0x01, address, 2, DWARF)
+HANDLE_DW_ATE(0x02, boolean, 2, DWARF)
+HANDLE_DW_ATE(0x03, complex_float, 2, DWARF)
+HANDLE_DW_ATE(0x04, float, 2, DWARF)
+HANDLE_DW_ATE(0x05, signed, 2, DWARF)
+HANDLE_DW_ATE(0x06, signed_char, 2, DWARF)
+HANDLE_DW_ATE(0x07, unsigned, 2, DWARF)
+HANDLE_DW_ATE(0x08, unsigned_char, 2, DWARF)
// New in DWARF v3:
-HANDLE_DW_ATE(0x09, imaginary_float)
-HANDLE_DW_ATE(0x0a, packed_decimal)
-HANDLE_DW_ATE(0x0b, numeric_string)
-HANDLE_DW_ATE(0x0c, edited)
-HANDLE_DW_ATE(0x0d, signed_fixed)
-HANDLE_DW_ATE(0x0e, unsigned_fixed)
-HANDLE_DW_ATE(0x0f, decimal_float)
+HANDLE_DW_ATE(0x09, imaginary_float, 3, DWARF)
+HANDLE_DW_ATE(0x0a, packed_decimal, 3, DWARF)
+HANDLE_DW_ATE(0x0b, numeric_string, 3, DWARF)
+HANDLE_DW_ATE(0x0c, edited, 3, DWARF)
+HANDLE_DW_ATE(0x0d, signed_fixed, 3, DWARF)
+HANDLE_DW_ATE(0x0e, unsigned_fixed, 3, DWARF)
+HANDLE_DW_ATE(0x0f, decimal_float, 3, DWARF)
// New in DWARF v4:
-HANDLE_DW_ATE(0x10, UTF)
+HANDLE_DW_ATE(0x10, UTF, 4, DWARF)
// New in DWARF v5:
-HANDLE_DW_ATE(0x11, UCS)
-HANDLE_DW_ATE(0x12, ASCII)
+HANDLE_DW_ATE(0x11, UCS, 5, DWARF)
+HANDLE_DW_ATE(0x12, ASCII, 5, DWARF)
// DWARF virtuality codes.
HANDLE_DW_VIRTUALITY(0x00, none)
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.h b/contrib/llvm/include/llvm/Support/Dwarf.h
index 84056682924e..3061b7b5fa0f 100644
--- a/contrib/llvm/include/llvm/Support/Dwarf.h
+++ b/contrib/llvm/include/llvm/Support/Dwarf.h
@@ -46,7 +46,15 @@ enum LLVMConstants : uint32_t {
DWARF_VERSION = 4, // Default dwarf version we output.
DW_PUBTYPES_VERSION = 2, // Section version number for .debug_pubtypes.
DW_PUBNAMES_VERSION = 2, // Section version number for .debug_pubnames.
- DW_ARANGES_VERSION = 2 // Section version number for .debug_aranges.
+ DW_ARANGES_VERSION = 2, // Section version number for .debug_aranges.
+ // Identifiers we use to distinguish vendor extensions.
+ DWARF_VENDOR_DWARF = 0, // Defined in v2 or later of the DWARF standard.
+ DWARF_VENDOR_APPLE = 1,
+ DWARF_VENDOR_BORLAND = 2,
+ DWARF_VENDOR_GNU = 3,
+ DWARF_VENDOR_GOOGLE = 4,
+ DWARF_VENDOR_LLVM = 5,
+ DWARF_VENDOR_MIPS = 6
};
// Special ID values that distinguish a CIE from a FDE in DWARF CFI.
@@ -55,7 +63,7 @@ const uint32_t DW_CIE_ID = UINT32_MAX;
const uint64_t DW64_CIE_ID = UINT64_MAX;
enum Tag : uint16_t {
-#define HANDLE_DW_TAG(ID, NAME) DW_TAG_##NAME = ID,
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) DW_TAG_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_TAG_lo_user = 0x4080,
DW_TAG_hi_user = 0xffff,
@@ -92,20 +100,20 @@ inline bool isType(Tag T) {
/// Attributes.
enum Attribute : uint16_t {
-#define HANDLE_DW_AT(ID, NAME) DW_AT_##NAME = ID,
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) DW_AT_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_AT_lo_user = 0x2000,
DW_AT_hi_user = 0x3fff,
};
enum Form : uint16_t {
-#define HANDLE_DW_FORM(ID, NAME) DW_FORM_##NAME = ID,
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) DW_FORM_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_FORM_lo_user = 0x1f00, ///< Not specified by DWARF.
};
enum LocationAtom {
-#define HANDLE_DW_OP(ID, NAME) DW_OP_##NAME = ID,
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) DW_OP_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_OP_lo_user = 0xe0,
DW_OP_hi_user = 0xff,
@@ -113,7 +121,7 @@ enum LocationAtom {
};
enum TypeKind {
-#define HANDLE_DW_ATE(ID, NAME) DW_ATE_##NAME = ID,
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) DW_ATE_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_ATE_lo_user = 0x80,
DW_ATE_hi_user = 0xff
@@ -164,7 +172,7 @@ enum DefaultedMemberAttribute {
};
enum SourceLanguage {
-#define HANDLE_DW_LANG(ID, NAME) DW_LANG_##NAME = ID,
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) DW_LANG_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_LANG_lo_user = 0x8000,
DW_LANG_hi_user = 0xffff
@@ -220,8 +228,8 @@ enum LineNumberExtendedOps {
DW_LNE_hi_user = 0xff
};
-enum LinerNumberEntryFormat {
-#define HANDLE_DW_LNCT(ID, NAME) DW_DEFAULTED_##NAME = ID,
+enum LineNumberEntryFormat {
+#define HANDLE_DW_LNCT(ID, NAME) DW_LNCT_##NAME = ID,
#include "llvm/Support/Dwarf.def"
DW_LNCT_lo_user = 0x2000,
DW_LNCT_hi_user = 0x3fff,
@@ -406,6 +414,40 @@ unsigned getAttributeEncoding(StringRef EncodingString);
unsigned getMacinfo(StringRef MacinfoString);
/// @}
+/// \defgroup DwarfConstantsVersioning Dwarf version for constants
+///
+/// For constants defined by DWARF, returns the DWARF version when the constant
+/// was first defined. For vendor extensions, if there is a version-related
+/// policy for when to emit it, returns a version number for that policy.
+/// Otherwise returns 0.
+///
+/// @{
+unsigned TagVersion(Tag T);
+unsigned AttributeVersion(Attribute A);
+unsigned FormVersion(Form F);
+unsigned OperationVersion(LocationAtom O);
+unsigned AttributeEncodingVersion(TypeKind E);
+unsigned LanguageVersion(SourceLanguage L);
+/// @}
+
+/// \defgroup DwarfConstantsVendor Dwarf "vendor" for constants
+///
+/// These functions return an identifier describing "who" defined the constant,
+/// either the DWARF standard itself or the vendor who defined the extension.
+///
+/// @{
+unsigned TagVendor(Tag T);
+unsigned AttributeVendor(Attribute A);
+unsigned FormVendor(Form F);
+unsigned OperationVendor(LocationAtom O);
+unsigned AttributeEncodingVendor(TypeKind E);
+unsigned LanguageVendor(SourceLanguage L);
+/// @}
+
+/// Tells whether the specified form is defined in the specified version,
+/// or is an extension if extensions are allowed.
+bool isValidFormForVersion(Form F, unsigned Version, bool ExtensionsOk = true);
+
/// \brief Returns the symbolic string representing Val when used as a value
/// for attribute Attr.
StringRef AttributeValueString(uint16_t Attr, unsigned Val);
diff --git a/contrib/llvm/include/llvm/Support/GenericDomTree.h b/contrib/llvm/include/llvm/Support/GenericDomTree.h
index 20f3ffdf3aab..eb7c27d2ffa5 100644
--- a/contrib/llvm/include/llvm/Support/GenericDomTree.h
+++ b/contrib/llvm/include/llvm/Support/GenericDomTree.h
@@ -276,32 +276,25 @@ protected:
// NewBB is split and now it has one successor. Update dominator tree to
// reflect this change.
- template <class N, class GraphT>
- void Split(DominatorTreeBaseByGraphTraits<GraphT> &DT,
- typename GraphT::NodeRef NewBB) {
+ template <class N>
+ void Split(typename GraphTraits<N>::NodeRef NewBB) {
+ using GraphT = GraphTraits<N>;
+ using NodeRef = typename GraphT::NodeRef;
assert(std::distance(GraphT::child_begin(NewBB),
GraphT::child_end(NewBB)) == 1 &&
"NewBB should have a single successor!");
- typename GraphT::NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
+ NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
- std::vector<typename GraphT::NodeRef> PredBlocks;
- typedef GraphTraits<Inverse<N>> InvTraits;
- for (typename InvTraits::ChildIteratorType
- PI = InvTraits::child_begin(NewBB),
- PE = InvTraits::child_end(NewBB);
- PI != PE; ++PI)
- PredBlocks.push_back(*PI);
+ std::vector<NodeRef> PredBlocks;
+ for (const auto Pred : children<Inverse<N>>(NewBB))
+ PredBlocks.push_back(Pred);
assert(!PredBlocks.empty() && "No predblocks?");
bool NewBBDominatesNewBBSucc = true;
- for (typename InvTraits::ChildIteratorType
- PI = InvTraits::child_begin(NewBBSucc),
- E = InvTraits::child_end(NewBBSucc);
- PI != E; ++PI) {
- typename InvTraits::NodeRef ND = *PI;
- if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
- DT.isReachableFromEntry(ND)) {
+ for (const auto Pred : children<Inverse<N>>(NewBBSucc)) {
+ if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
+ isReachableFromEntry(Pred)) {
NewBBDominatesNewBBSucc = false;
break;
}
@@ -312,7 +305,7 @@ protected:
NodeT *NewBBIDom = nullptr;
unsigned i = 0;
for (i = 0; i < PredBlocks.size(); ++i)
- if (DT.isReachableFromEntry(PredBlocks[i])) {
+ if (isReachableFromEntry(PredBlocks[i])) {
NewBBIDom = PredBlocks[i];
break;
}
@@ -324,18 +317,18 @@ protected:
return;
for (i = i + 1; i < PredBlocks.size(); ++i) {
- if (DT.isReachableFromEntry(PredBlocks[i]))
- NewBBIDom = DT.findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
+ if (isReachableFromEntry(PredBlocks[i]))
+ NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
}
// Create the new dominator tree node... and set the idom of NewBB.
- DomTreeNodeBase<NodeT> *NewBBNode = DT.addNewBlock(NewBB, NewBBIDom);
+ DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);
// If NewBB strictly dominates other blocks, then it is now the immediate
// dominator of NewBBSucc. Update the dominator tree as appropriate.
if (NewBBDominatesNewBBSucc) {
- DomTreeNodeBase<NodeT> *NewBBSuccNode = DT.getNode(NewBBSucc);
- DT.changeImmediateDominator(NewBBSuccNode, NewBBNode);
+ DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
+ changeImmediateDominator(NewBBSuccNode, NewBBNode);
}
}
@@ -379,7 +372,7 @@ public:
if (DomTreeNodes.size() != OtherDomTreeNodes.size())
return true;
- for (const auto &DomTreeNode : this->DomTreeNodes) {
+ for (const auto &DomTreeNode : DomTreeNodes) {
NodeT *BB = DomTreeNode.first;
typename DomTreeNodeMapType::const_iterator OI =
OtherDomTreeNodes.find(BB);
@@ -663,10 +656,9 @@ public:
/// tree to reflect this change.
void splitBlock(NodeT *NewBB) {
if (this->IsPostDominators)
- this->Split<Inverse<NodeT *>, GraphTraits<Inverse<NodeT *>>>(*this,
- NewBB);
+ Split<Inverse<NodeT *>>(NewBB);
else
- this->Split<NodeT *, GraphTraits<NodeT *>>(*this, NewBB);
+ Split<NodeT *>(NewBB);
}
/// print - Convert to human readable form
@@ -677,7 +669,7 @@ public:
o << "Inorder PostDominator Tree: ";
else
o << "Inorder Dominator Tree: ";
- if (!this->DFSInfoValid)
+ if (!DFSInfoValid)
o << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
o << "\n";
@@ -712,12 +704,12 @@ protected:
// immediate dominator.
NodeT *IDom = getIDom(BB);
- assert(IDom || this->DomTreeNodes[nullptr]);
+ assert(IDom || DomTreeNodes[nullptr]);
DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom);
// Add a new tree node for this NodeT, and link it as a child of
// IDomNode
- return (this->DomTreeNodes[BB] = IDomNode->addChild(
+ return (DomTreeNodes[BB] = IDomNode->addChild(
llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
}
@@ -780,7 +772,7 @@ public:
template <class FT> void recalculate(FT &F) {
typedef GraphTraits<FT *> TraitsTy;
reset();
- this->Vertex.push_back(nullptr);
+ Vertex.push_back(nullptr);
if (!this->IsPostDominators) {
// Initialize root
diff --git a/contrib/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm/include/llvm/Support/GraphWriter.h
index 7555d5b31a8d..c318fea53651 100644
--- a/contrib/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm/include/llvm/Support/GraphWriter.h
@@ -143,10 +143,9 @@ public:
void writeNodes() {
// Loop over the graph, printing it out...
- for (node_iterator I = GTraits::nodes_begin(G), E = GTraits::nodes_end(G);
- I != E; ++I)
- if (!isNodeHidden(*I))
- writeNode(*I);
+ for (const auto Node : nodes<GraphType>(G))
+ if (!isNodeHidden(Node))
+ writeNode(Node);
}
bool isNodeHidden(NodeRef Node) {
diff --git a/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h b/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h
index 02df4d806f13..e18e58b7b5b2 100644
--- a/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h
+++ b/contrib/llvm/include/llvm/Support/LowLevelTypeImpl.h
@@ -39,100 +39,123 @@ class raw_ostream;
class LLT {
public:
- enum TypeKind : uint16_t {
- Invalid,
- Scalar,
- Pointer,
- Vector,
- };
-
/// Get a low-level scalar or aggregate "bag of bits".
static LLT scalar(unsigned SizeInBits) {
assert(SizeInBits > 0 && "invalid scalar size");
- return LLT{Scalar, 1, SizeInBits};
+ return LLT{/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0,
+ SizeInBits, /*AddressSpace=*/0};
}
/// Get a low-level pointer in the given address space (defaulting to 0).
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
- return LLT{Pointer, AddressSpace, SizeInBits};
+ assert(SizeInBits > 0 && "invalid pointer size");
+ return LLT{/*isPointer=*/true, /*isVector=*/false, /*NumElements=*/0,
+ SizeInBits, AddressSpace};
}
/// Get a low-level vector of some number of elements and element width.
/// \p NumElements must be at least 2.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
assert(NumElements > 1 && "invalid number of vector elements");
- return LLT{Vector, NumElements, ScalarSizeInBits};
+ assert(ScalarSizeInBits > 0 && "invalid vector element size");
+ return LLT{/*isPointer=*/false, /*isVector=*/true, NumElements,
+ ScalarSizeInBits, /*AddressSpace=*/0};
}
/// Get a low-level vector of some number of elements and element type.
static LLT vector(uint16_t NumElements, LLT ScalarTy) {
assert(NumElements > 1 && "invalid number of vector elements");
- assert(ScalarTy.isScalar() && "invalid vector element type");
- return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
+ assert(!ScalarTy.isVector() && "invalid vector element type");
+ return LLT{ScalarTy.isPointer(), /*isVector=*/true, NumElements,
+ ScalarTy.getSizeInBits(),
+ ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
}
- explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
- : SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
- assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
- "invalid number of vector elements");
+ explicit LLT(bool isPointer, bool isVector, uint16_t NumElements,
+ unsigned SizeInBits, unsigned AddressSpace) {
+ init(isPointer, isVector, NumElements, SizeInBits, AddressSpace);
}
-
- explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
+ explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {}
explicit LLT(MVT VT);
- bool isValid() const { return Kind != Invalid; }
+ bool isValid() const { return RawData != 0; }
- bool isScalar() const { return Kind == Scalar; }
+ bool isScalar() const { return isValid() && !IsPointer && !IsVector; }
- bool isPointer() const { return Kind == Pointer; }
+ bool isPointer() const { return isValid() && IsPointer && !IsVector; }
- bool isVector() const { return Kind == Vector; }
+ bool isVector() const { return isValid() && IsVector; }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
uint16_t getNumElements() const {
- assert(isVector() && "cannot get number of elements on scalar/aggregate");
- return ElementsOrAddrSpace;
+ assert(IsVector && "cannot get number of elements on scalar/aggregate");
+ if (!IsPointer)
+ return getFieldValue(VectorElementsFieldInfo);
+ else
+ return getFieldValue(PointerVectorElementsFieldInfo);
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
if (isPointer() || isScalar())
- return SizeInBits;
- return SizeInBits * ElementsOrAddrSpace;
+ return getScalarSizeInBits();
+ return getScalarSizeInBits() * getNumElements();
}
unsigned getScalarSizeInBits() const {
- return SizeInBits;
+ assert(RawData != 0 && "Invalid Type");
+ if (!IsVector) {
+ if (!IsPointer)
+ return getFieldValue(ScalarSizeFieldInfo);
+ else
+ return getFieldValue(PointerSizeFieldInfo);
+ } else {
+ if (!IsPointer)
+ return getFieldValue(VectorSizeFieldInfo);
+ else
+ return getFieldValue(PointerVectorSizeFieldInfo);
+ }
}
unsigned getAddressSpace() const {
- assert(isPointer() && "cannot get address space of non-pointer type");
- return ElementsOrAddrSpace;
+ assert(RawData != 0 && "Invalid Type");
+ assert(IsPointer && "cannot get address space of non-pointer type");
+ if (!IsVector)
+ return getFieldValue(PointerAddressSpaceFieldInfo);
+ else
+ return getFieldValue(PointerVectorAddressSpaceFieldInfo);
}
/// Returns the vector's element type. Only valid for vector types.
LLT getElementType() const {
assert(isVector() && "cannot get element type of scalar/aggregate");
- return scalar(SizeInBits);
+ if (IsPointer)
+ return pointer(getAddressSpace(), getScalarSizeInBits());
+ else
+ return scalar(getScalarSizeInBits());
}
/// Get a low-level type with half the size of the original, by halving the
/// size of the scalar type involved. For example `s32` will become `s16`,
/// `<2 x s32>` will become `<2 x s16>`.
LLT halfScalarSize() const {
- assert(!isPointer() && getScalarSizeInBits() > 1 &&
+ assert(!IsPointer && getScalarSizeInBits() > 1 &&
getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
- return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
+ return LLT{/*isPointer=*/false, IsVector ? true : false,
+ IsVector ? getNumElements() : (uint16_t)0,
+ getScalarSizeInBits() / 2, /*AddressSpace=*/0};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// size of the scalar type involved. For example `s32` will become `s64`,
/// `<2 x s32>` will become `<2 x s64>`.
LLT doubleScalarSize() const {
- assert(!isPointer() && "cannot change size of this type");
- return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
+ assert(!IsPointer && "cannot change size of this type");
+ return LLT{/*isPointer=*/false, IsVector ? true : false,
+ IsVector ? getNumElements() : (uint16_t)0,
+ getScalarSizeInBits() * 2, /*AddressSpace=*/0};
}
/// Get a low-level type with half the size of the original, by halving the
@@ -140,13 +163,13 @@ public:
/// a vector type with an even number of elements. For example `<4 x s32>`
/// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
LLT halfElements() const {
- assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
- "cannot half odd vector");
- if (ElementsOrAddrSpace == 2)
- return scalar(SizeInBits);
+ assert(isVector() && getNumElements() % 2 == 0 && "cannot half odd vector");
+ if (getNumElements() == 2)
+ return scalar(getScalarSizeInBits());
- return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
- SizeInBits};
+ return LLT{/*isPointer=*/false, /*isVector=*/true,
+ (uint16_t)(getNumElements() / 2), getScalarSizeInBits(),
+ /*AddressSpace=*/0};
}
/// Get a low-level type with twice the size of the original, by doubling the
@@ -154,25 +177,105 @@ public:
/// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
/// the number of elements in sN produces <2 x sN>.
LLT doubleElements() const {
- assert(!isPointer() && "cannot double elements in pointer");
- return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
- SizeInBits};
+ return LLT{IsPointer ? true : false, /*isVector=*/true,
+ (uint16_t)(getNumElements() * 2), getScalarSizeInBits(),
+ IsPointer ? getAddressSpace() : 0};
}
void print(raw_ostream &OS) const;
bool operator==(const LLT &RHS) const {
- return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
- ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
+ return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
+ RHS.RawData == RawData;
}
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
+
private:
- unsigned SizeInBits;
- uint16_t ElementsOrAddrSpace;
- TypeKind Kind;
+ /// LLT is packed into 64 bits as follows:
+ /// isPointer : 1
+ /// isVector : 1
+ /// with 62 bits remaining for Kind-specific data, packed in bitfields
+ /// as described below. As there isn't a simple portable way to pack bits
+ /// into bitfields, here the different fields in the packed structure is
+ /// described in static const *Field variables. Each of these variables
+ /// is a 2-element array, with the first element describing the bitfield size
+ /// and the second element describing the bitfield offset.
+ typedef int BitFieldInfo[2];
+ ///
+ /// This is how the bitfields are packed per Kind:
+ /// * Invalid:
+ /// gets encoded as RawData == 0, as that is an invalid encoding, since for
+ /// valid encodings, SizeInBits/SizeOfElement must be larger than 0.
+ /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
+ /// SizeInBits: 32;
+ static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 0};
+ /// * Pointer (isPointer == 1 && isVector == 0):
+ /// SizeInBits: 16;
+ /// AddressSpace: 23;
+ static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 0};
+ static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{
+ 23, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]};
+ /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
+ /// NumElements: 16;
+ /// SizeOfElement: 32;
+ static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 0};
+ static const constexpr BitFieldInfo VectorSizeFieldInfo{
+ 32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]};
+ /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
+ /// NumElements: 16;
+ /// SizeOfElement: 16;
+ /// AddressSpace: 23;
+ static const constexpr BitFieldInfo PointerVectorElementsFieldInfo{16, 0};
+ static const constexpr BitFieldInfo PointerVectorSizeFieldInfo{
+ 16,
+ PointerVectorElementsFieldInfo[1] + PointerVectorElementsFieldInfo[0]};
+ static const constexpr BitFieldInfo PointerVectorAddressSpaceFieldInfo{
+ 23, PointerVectorSizeFieldInfo[1] + PointerVectorSizeFieldInfo[0]};
+
+ uint64_t IsPointer : 1;
+ uint64_t IsVector : 1;
+ uint64_t RawData : 62;
+
+ static uint64_t getMask(const BitFieldInfo FieldInfo) {
+ const int FieldSizeInBits = FieldInfo[0];
+ return (((uint64_t)1) << FieldSizeInBits) - 1;
+ }
+ static uint64_t maskAndShift(uint64_t Val, uint64_t Mask, uint8_t Shift) {
+ assert(Val <= Mask && "Value too large for field");
+ return (Val & Mask) << Shift;
+ }
+ static uint64_t maskAndShift(uint64_t Val, const BitFieldInfo FieldInfo) {
+ return maskAndShift(Val, getMask(FieldInfo), FieldInfo[1]);
+ }
+ uint64_t getFieldValue(const BitFieldInfo FieldInfo) const {
+ return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
+ }
+
+ void init(bool IsPointer, bool IsVector, uint16_t NumElements,
+ unsigned SizeInBits, unsigned AddressSpace) {
+ this->IsPointer = IsPointer;
+ this->IsVector = IsVector;
+ if (!IsVector) {
+ if (!IsPointer)
+ RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
+ else
+ RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
+ maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
+ } else {
+ assert(NumElements > 1 && "invalid number of vector elements");
+ if (!IsPointer)
+ RawData = maskAndShift(NumElements, VectorElementsFieldInfo) |
+ maskAndShift(SizeInBits, VectorSizeFieldInfo);
+ else
+ RawData =
+ maskAndShift(NumElements, PointerVectorElementsFieldInfo) |
+ maskAndShift(SizeInBits, PointerVectorSizeFieldInfo) |
+ maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo);
+ }
+ }
};
inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
@@ -182,14 +285,18 @@ inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
template<> struct DenseMapInfo<LLT> {
static inline LLT getEmptyKey() {
- return LLT{LLT::Invalid, 0, -1u};
+ LLT Invalid;
+ Invalid.IsPointer = true;
+ return Invalid;
}
static inline LLT getTombstoneKey() {
- return LLT{LLT::Invalid, 0, -2u};
+ LLT Invalid;
+ Invalid.IsVector = true;
+ return Invalid;
}
static inline unsigned getHashValue(const LLT &Ty) {
- uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
- ((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
+ uint64_t Val = ((uint64_t)Ty.RawData) << 2 | ((uint64_t)Ty.IsPointer) << 1 |
+ ((uint64_t)Ty.IsVector);
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
index 19380b23d9d2..994456f9a681 100644
--- a/contrib/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -18,6 +18,7 @@
#include "llvm/Support/SwapByteOrder.h"
#include <algorithm>
#include <cassert>
+#include <climits>
#include <cstring>
#include <type_traits>
#include <limits>
@@ -198,6 +199,21 @@ template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
return countTrailingZeros(Val, ZB_Undefined);
}
+/// \brief Create a bitmask with the N right-most bits set to 1, and all other
+/// bits set to 0. Only unsigned types are allowed.
+template <typename T> T maskTrailingOnes(unsigned N) {
+ static_assert(std::is_unsigned<T>::value, "Invalid type!");
+ const unsigned Bits = CHAR_BIT * sizeof(T);
+ assert(N <= Bits && "Invalid bit index");
+ return N == 0 ? 0 : (T(-1) >> (Bits - N));
+}
+
+/// \brief Create a bitmask with the N left-most bits set to 1, and all other
+/// bits set to 0. Only unsigned types are allowed.
+template <typename T> T maskLeadingOnes(unsigned N) {
+ return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
/// \brief Get the index of the last set bit starting from the least
/// significant bit.
///
diff --git a/contrib/llvm/include/llvm/Support/Recycler.h b/contrib/llvm/include/llvm/Support/Recycler.h
index 1523aad38d46..53db2e86d12d 100644
--- a/contrib/llvm/include/llvm/Support/Recycler.h
+++ b/contrib/llvm/include/llvm/Support/Recycler.h
@@ -42,13 +42,16 @@ class Recycler {
FreeNode *pop_val() {
auto *Val = FreeList;
+ __asan_unpoison_memory_region(Val, Size);
FreeList = FreeList->Next;
+ __msan_allocated_memory(Val, Size);
return Val;
}
void push(FreeNode *N) {
N->Next = FreeList;
FreeList = N;
+ __asan_poison_memory_region(N, Size);
}
public:
diff --git a/contrib/llvm/include/llvm/Support/Regex.h b/contrib/llvm/include/llvm/Support/Regex.h
index 83db80359ee2..f498835bcb58 100644
--- a/contrib/llvm/include/llvm/Support/Regex.h
+++ b/contrib/llvm/include/llvm/Support/Regex.h
@@ -57,7 +57,7 @@ namespace llvm {
/// isValid - returns the error encountered during regex compilation, or
/// matching, if any.
- bool isValid(std::string &Error);
+ bool isValid(std::string &Error) const;
/// getNumMatches - In a valid regex, return the number of parenthesized
/// matches it contains. The number filled in by match will include this
diff --git a/contrib/llvm/include/llvm/Support/TargetParser.h b/contrib/llvm/include/llvm/Support/TargetParser.h
index 68e6b2765810..f29cc40ffdd5 100644
--- a/contrib/llvm/include/llvm/Support/TargetParser.h
+++ b/contrib/llvm/include/llvm/Support/TargetParser.h
@@ -75,7 +75,7 @@ enum ArchExtKind : unsigned {
AEK_CRC = 0x2,
AEK_CRYPTO = 0x4,
AEK_FP = 0x8,
- AEK_HWDIV = 0x10,
+ AEK_HWDIVTHUMB = 0x10,
AEK_HWDIVARM = 0x20,
AEK_MP = 0x40,
AEK_SIMD = 0x80,
diff --git a/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h b/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h
index aaf2a356ffab..4b11e889ea6c 100644
--- a/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h
+++ b/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h
@@ -27,6 +27,8 @@ class StringToOffsetTable {
std::string AggregateString;
public:
+ bool Empty() const { return StringOffset.empty(); }
+
unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) {
auto IterBool =
StringOffset.insert(std::make_pair(Str, AggregateString.size()));
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index 85297ae837c5..24039ea10816 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -230,6 +230,12 @@ public:
return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
}
+ /// Return the type for frame index, which is determined by
+ /// the alloca address space specified through the data layout.
+ MVT getFrameIndexTy(const DataLayout &DL) const {
+ return getPointerTy(DL, DL.getAllocaAddrSpace());
+ }
+
/// EVT is not used in-tree, but is used by out-of-tree target.
/// A documentation for this function would be nice...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
@@ -2807,7 +2813,7 @@ public:
/// Return true if the target may be able emit the call instruction as a tail
/// call. This is used by optimization passes to determine if it's profitable
/// to duplicate return instructions to enable tailcall optimization.
- virtual bool mayBeEmittedAsTailCall(CallInst *) const {
+ virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
return false;
}
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
index a2978663a4d1..a602498e5f22 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -65,14 +65,6 @@ template <typename T> class ArrayRef;
/// Blocks containing EHPads, allocas, invokes, or vastarts are not valid.
static bool isBlockValidForExtraction(const BasicBlock &BB);
- /// \brief Create a code extractor for a single basic block.
- ///
- /// In this formation, we don't require a dominator tree. The given basic
- /// block is set up for extraction.
- CodeExtractor(BasicBlock *BB, bool AggregateArgs = false,
- BlockFrequencyInfo *BFI = nullptr,
- BranchProbabilityInfo *BPI = nullptr);
-
/// \brief Create a code extractor for a sequence of blocks.
///
/// Given a sequence of basic blocks where the first block in the sequence
@@ -91,14 +83,6 @@ template <typename T> class ArrayRef;
BlockFrequencyInfo *BFI = nullptr,
BranchProbabilityInfo *BPI = nullptr);
- /// \brief Create a code extractor for a region node.
- ///
- /// Behaves just like the generic code sequence constructor, but uses the
- /// block sequence of the region node passed in.
- CodeExtractor(DominatorTree &DT, const RegionNode &RN,
- bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
- BranchProbabilityInfo *BPI = nullptr);
-
/// \brief Perform the extraction, returning the new function.
///
/// Returns zero when called on a CodeExtractor instance where isEligible
diff --git a/contrib/llvm/include/llvm/XRay/InstrumentationMap.h b/contrib/llvm/include/llvm/XRay/InstrumentationMap.h
index f7286c52ff42..0342da0a2f0f 100644
--- a/contrib/llvm/include/llvm/XRay/InstrumentationMap.h
+++ b/contrib/llvm/include/llvm/XRay/InstrumentationMap.h
@@ -59,6 +59,7 @@ struct YAMLXRaySledEntry {
yaml::Hex64 Function;
SledEntry::FunctionKinds Kind;
bool AlwaysInstrument;
+ std::string FunctionName;
};
/// The InstrumentationMap represents the computed function id's and indicated
@@ -115,6 +116,7 @@ template <> struct MappingTraits<xray::YAMLXRaySledEntry> {
IO.mapRequired("function", Entry.Function);
IO.mapRequired("kind", Entry.Kind);
IO.mapRequired("always-instrument", Entry.AlwaysInstrument);
+ IO.mapOptional("function-name", Entry.FunctionName);
}
static constexpr bool flow = true;
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 09582cf9a71d..3db041cc0fa6 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -808,7 +808,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// well. Or alternatively, replace all of this with inaccessiblememonly once
// that's implemented fully.
auto *Inst = CS.getInstruction();
- if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI)) {
+ if (isMallocOrCallocLikeFn(Inst, &TLI)) {
// Be conservative if the accessed pointer may alias the allocation -
// fallback to the generic handling below.
if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
@@ -925,9 +925,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
const DataLayout &DL) {
assert(GEP1->getPointerOperand()->stripPointerCasts() ==
- GEP2->getPointerOperand()->stripPointerCasts() &&
- GEP1->getPointerOperand()->getType() ==
- GEP2->getPointerOperand()->getType() &&
+ GEP2->getPointerOperand()->stripPointerCasts() &&
+ GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
"Expected GEPs with the same pointer operand");
// Try to determine whether GEP1 and GEP2 index through arrays, into structs,
@@ -1186,9 +1185,8 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// just the same underlying object), see if that tells us anything about
// the resulting pointers.
if (GEP1->getPointerOperand()->stripPointerCasts() ==
- GEP2->getPointerOperand()->stripPointerCasts() &&
- GEP1->getPointerOperand()->getType() ==
- GEP2->getPointerOperand()->getType()) {
+ GEP2->getPointerOperand()->stripPointerCasts() &&
+ GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
// If we couldn't find anything interesting, don't abandon just yet.
if (R != MayAlias)
diff --git a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 5935dec15c70..0dc4475ca0e2 100644
--- a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -72,6 +72,32 @@ static const uint32_t UR_TAKEN_WEIGHT = 1;
/// easily subsume it.
static const uint32_t UR_NONTAKEN_WEIGHT = 1024*1024 - 1;
+/// \brief Returns the branch probability for unreachable edge according to
+/// heuristic.
+///
+/// This is the branch probability being taken to a block that terminates
+/// (eventually) in unreachable. These are predicted as unlikely as possible.
+static BranchProbability getUnreachableProbability(uint64_t UnreachableCount) {
+ assert(UnreachableCount > 0 && "UnreachableCount must be > 0");
+ return BranchProbability::getBranchProbability(
+ UR_TAKEN_WEIGHT,
+ (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * UnreachableCount);
+}
+
+/// \brief Returns the branch probability for reachable edge according to
+/// heuristic.
+///
+/// This is the branch probability not being taken toward a block that
+/// terminates (eventually) in unreachable. Such a branch is essentially never
+/// taken. Set the weight to an absurdly high value so that nested loops don't
+/// easily subsume it.
+static BranchProbability getReachableProbability(uint64_t ReachableCount) {
+ assert(ReachableCount > 0 && "ReachableCount must be > 0");
+ return BranchProbability::getBranchProbability(
+ UR_NONTAKEN_WEIGHT,
+ (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * ReachableCount);
+}
+
/// \brief Weight for a branch taken going into a cold block.
///
/// This is the weight for a branch taken toward a block marked
@@ -179,7 +205,11 @@ BranchProbabilityInfo::updatePostDominatedByColdCall(const BasicBlock *BB) {
/// unreachable-terminated block as extremely unlikely.
bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) {
const TerminatorInst *TI = BB->getTerminator();
- if (TI->getNumSuccessors() == 0)
+ assert(TI->getNumSuccessors() > 1 && "expected more than one successor!");
+
+ // Return false here so that edge weights for InvokeInst could be decided
+ // in calcInvokeHeuristics().
+ if (isa<InvokeInst>(TI))
return false;
SmallVector<unsigned, 4> UnreachableEdges;
@@ -191,14 +221,8 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) {
else
ReachableEdges.push_back(I.getSuccessorIndex());
- // Skip probabilities if this block has a single successor or if all were
- // reachable.
- if (TI->getNumSuccessors() == 1 || UnreachableEdges.empty())
- return false;
-
- // Return false here so that edge weights for InvokeInst could be decided
- // in calcInvokeHeuristics().
- if (isa<InvokeInst>(TI))
+ // Skip probabilities if all were reachable.
+ if (UnreachableEdges.empty())
return false;
if (ReachableEdges.empty()) {
@@ -208,12 +232,8 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) {
return true;
}
- auto UnreachableProb = BranchProbability::getBranchProbability(
- UR_TAKEN_WEIGHT, (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) *
- uint64_t(UnreachableEdges.size()));
- auto ReachableProb = BranchProbability::getBranchProbability(
- UR_NONTAKEN_WEIGHT,
- (UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * uint64_t(ReachableEdges.size()));
+ auto UnreachableProb = getUnreachableProbability(UnreachableEdges.size());
+ auto ReachableProb = getReachableProbability(ReachableEdges.size());
for (unsigned SuccIdx : UnreachableEdges)
setEdgeProbability(BB, SuccIdx, UnreachableProb);
@@ -224,11 +244,12 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) {
}
// Propagate existing explicit probabilities from either profile data or
-// 'expect' intrinsic processing.
+// 'expect' intrinsic processing. Examine metadata against unreachable
+// heuristic. The probability of the edge coming to unreachable block is
+// set to min of metadata and unreachable heuristic.
bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
const TerminatorInst *TI = BB->getTerminator();
- if (TI->getNumSuccessors() == 1)
- return false;
+ assert(TI->getNumSuccessors() > 1 && "expected more than one successor!");
if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
return false;
@@ -249,6 +270,8 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
// be scaled to fit in 32 bits.
uint64_t WeightSum = 0;
SmallVector<uint32_t, 2> Weights;
+ SmallVector<unsigned, 2> UnreachableIdxs;
+ SmallVector<unsigned, 2> ReachableIdxs;
Weights.reserve(TI->getNumSuccessors());
for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
ConstantInt *Weight =
@@ -259,6 +282,10 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
"Too many bits for uint32_t");
Weights.push_back(Weight->getZExtValue());
WeightSum += Weights.back();
+ if (PostDominatedByUnreachable.count(TI->getSuccessor(i - 1)))
+ UnreachableIdxs.push_back(i - 1);
+ else
+ ReachableIdxs.push_back(i - 1);
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
@@ -267,20 +294,52 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
uint64_t ScalingFactor =
(WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;
- WeightSum = 0;
- for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
- Weights[i] /= ScalingFactor;
- WeightSum += Weights[i];
+ if (ScalingFactor > 1) {
+ WeightSum = 0;
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
+ Weights[i] /= ScalingFactor;
+ WeightSum += Weights[i];
+ }
}
- if (WeightSum == 0) {
- for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- setEdgeProbability(BB, i, {1, e});
- } else {
+ if (WeightSum == 0 || ReachableIdxs.size() == 0) {
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- setEdgeProbability(BB, i, {Weights[i], static_cast<uint32_t>(WeightSum)});
+ Weights[i] = 1;
+ WeightSum = TI->getNumSuccessors();
+ }
+
+ // Set the probability.
+ SmallVector<BranchProbability, 2> BP;
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+ BP.push_back({ Weights[i], static_cast<uint32_t>(WeightSum) });
+
+ // Examine the metadata against unreachable heuristic.
+ // If the unreachable heuristic is more strong then we use it for this edge.
+ if (UnreachableIdxs.size() > 0 && ReachableIdxs.size() > 0) {
+ auto ToDistribute = BranchProbability::getZero();
+ auto UnreachableProb = getUnreachableProbability(UnreachableIdxs.size());
+ for (auto i : UnreachableIdxs)
+ if (UnreachableProb < BP[i]) {
+ ToDistribute += BP[i] - UnreachableProb;
+ BP[i] = UnreachableProb;
+ }
+
+ // If we modified the probability of some edges then we must distribute
+ // the difference between reachable blocks.
+ if (ToDistribute > BranchProbability::getZero()) {
+ BranchProbability PerEdge = ToDistribute / ReachableIdxs.size();
+ for (auto i : ReachableIdxs) {
+ BP[i] += PerEdge;
+ ToDistribute -= PerEdge;
+ }
+ // Tail goes to the first reachable edge.
+ BP[ReachableIdxs[0]] += ToDistribute;
+ }
}
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+ setEdgeProbability(BB, i, BP[i]);
+
assert(WeightSum <= UINT32_MAX &&
"Expected weights to scale down to 32 bits");
@@ -297,7 +356,11 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
/// Return false, otherwise.
bool BranchProbabilityInfo::calcColdCallHeuristics(const BasicBlock *BB) {
const TerminatorInst *TI = BB->getTerminator();
- if (TI->getNumSuccessors() == 0)
+ assert(TI->getNumSuccessors() > 1 && "expected more than one successor!");
+
+ // Return false here so that edge weights for InvokeInst could be decided
+ // in calcInvokeHeuristics().
+ if (isa<InvokeInst>(TI))
return false;
// Determine which successors are post-dominated by a cold block.
@@ -309,13 +372,8 @@ bool BranchProbabilityInfo::calcColdCallHeuristics(const BasicBlock *BB) {
else
NormalEdges.push_back(I.getSuccessorIndex());
- // Return false here so that edge weights for InvokeInst could be decided
- // in calcInvokeHeuristics().
- if (isa<InvokeInst>(TI))
- return false;
-
- // Skip probabilities if this block has a single successor.
- if (TI->getNumSuccessors() == 1 || ColdEdges.empty())
+ // Skip probabilities if no cold edges.
+ if (ColdEdges.empty())
return false;
if (NormalEdges.empty()) {
@@ -698,10 +756,13 @@ void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LI) {
DEBUG(dbgs() << "Computing probabilities for " << BB->getName() << "\n");
updatePostDominatedByUnreachable(BB);
updatePostDominatedByColdCall(BB);
- if (calcUnreachableHeuristics(BB))
+ // If there is no at least two successors, no sense to set probability.
+ if (BB->getTerminator()->getNumSuccessors() < 2)
continue;
if (calcMetadataWeights(BB))
continue;
+ if (calcUnreachableHeuristics(BB))
+ continue;
if (calcColdCallHeuristics(BB))
continue;
if (calcLoopBranchHeuristics(BB, LI))
diff --git a/contrib/llvm/lib/Analysis/CFLGraph.h b/contrib/llvm/lib/Analysis/CFLGraph.h
index e526e0e16aa7..75726e84569b 100644
--- a/contrib/llvm/lib/Analysis/CFLGraph.h
+++ b/contrib/llvm/lib/Analysis/CFLGraph.h
@@ -400,8 +400,7 @@ template <typename CFLAA> class CFLGraphBuilder {
// TODO: address other common library functions such as realloc(),
// strdup(),
// etc.
- if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI) ||
- isFreeCall(Inst, &TLI))
+ if (isMallocOrCallocLikeFn(Inst, &TLI) || isFreeCall(Inst, &TLI))
return;
// TODO: Add support for noalias args/all the other fun function
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index e12f640394e6..2259fbaeb982 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -75,20 +75,16 @@ static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned);
static Value *SimplifyCastInst(unsigned, Value *, Type *,
const Query &, unsigned);
-/// For a boolean type, or a vector of boolean type, return false, or
-/// a vector with every element false, as appropriate for the type.
+/// For a boolean type or a vector of boolean type, return false or a vector
+/// with every element false.
static Constant *getFalse(Type *Ty) {
- assert(Ty->getScalarType()->isIntegerTy(1) &&
- "Expected i1 type or a vector of i1!");
- return Constant::getNullValue(Ty);
+ return ConstantInt::getFalse(Ty);
}
-/// For a boolean type, or a vector of boolean type, return true, or
-/// a vector with every element true, as appropriate for the type.
+/// For a boolean type or a vector of boolean type, return true or a vector
+/// with every element true.
static Constant *getTrue(Type *Ty) {
- assert(Ty->getScalarType()->isIntegerTy(1) &&
- "Expected i1 type or a vector of i1!");
- return Constant::getAllOnesValue(Ty);
+ return ConstantInt::getTrue(Ty);
}
/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
@@ -572,11 +568,11 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
match(Op1, m_Not(m_Specific(Op0))))
return Constant::getAllOnesValue(Ty);
- // add nsw/nuw (xor Y, signbit), signbit --> Y
+ // add nsw/nuw (xor Y, signmask), signmask --> Y
// The no-wrapping add guarantees that the top bit will be set by the add.
// Therefore, the xor must be clearing the already set sign bit of Y.
- if ((isNSW || isNUW) && match(Op1, m_SignBit()) &&
- match(Op0, m_Xor(m_Value(Y), m_SignBit())))
+ if ((isNSW || isNUW) && match(Op1, m_SignMask()) &&
+ match(Op0, m_Xor(m_Value(Y), m_SignMask())))
return Y;
/// i1 add -> xor.
@@ -1085,7 +1081,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
match(Op1, m_ConstantInt(C2))) {
bool Overflow;
- C1->getValue().umul_ov(C2->getValue(), Overflow);
+ (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
if (Overflow)
return Constant::getNullValue(Op0->getType());
}
@@ -2823,7 +2819,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
return ConstantInt::getTrue(RHS->getContext());
}
}
- if (CIVal->isSignBit() && *CI2Val == 1) {
+ if (CIVal->isSignMask() && *CI2Val == 1) {
if (Pred == ICmpInst::ICMP_UGT)
return ConstantInt::getFalse(RHS->getContext());
if (Pred == ICmpInst::ICMP_ULE)
@@ -3800,6 +3796,8 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
Type *GEPTy = PointerType::get(LastType, AS);
if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
GEPTy = VectorType::get(GEPTy, VT->getNumElements());
+ else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
+ GEPTy = VectorType::get(GEPTy, VT->getNumElements());
if (isa<UndefValue>(Ops[0]))
return UndefValue::get(GEPTy);
@@ -4082,6 +4080,60 @@ Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
RecursionLimit);
}
+/// For the given destination element of a shuffle, peek through shuffles to
+/// match a root vector source operand that contains that element in the same
+/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
+static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
+ Constant *Mask, Value *RootVec, int RootElt,
+ unsigned MaxRecurse) {
+ if (!MaxRecurse--)
+ return nullptr;
+
+ // Bail out if any mask value is undefined. That kind of shuffle may be
+ // simplified further based on demanded bits or other folds.
+ int MaskVal = ShuffleVectorInst::getMaskValue(Mask, RootElt);
+ if (MaskVal == -1)
+ return nullptr;
+
+ // The mask value chooses which source operand we need to look at next.
+ Value *SourceOp;
+ int InVecNumElts = Op0->getType()->getVectorNumElements();
+ if (MaskVal < InVecNumElts) {
+ RootElt = MaskVal;
+ SourceOp = Op0;
+ } else {
+ RootElt = MaskVal - InVecNumElts;
+ SourceOp = Op1;
+ }
+
+ // If the source operand is a shuffle itself, look through it to find the
+ // matching root vector.
+ if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
+ return foldIdentityShuffles(
+ DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
+ SourceShuf->getMask(), RootVec, RootElt, MaxRecurse);
+ }
+
+ // TODO: Look through bitcasts? What if the bitcast changes the vector element
+ // size?
+
+ // The source operand is not a shuffle. Initialize the root vector value for
+ // this shuffle if that has not been done yet.
+ if (!RootVec)
+ RootVec = SourceOp;
+
+ // Give up as soon as a source operand does not match the existing root value.
+ if (RootVec != SourceOp)
+ return nullptr;
+
+ // The element must be coming from the same lane in the source vector
+ // (although it may have crossed lanes in intermediate shuffles).
+ if (RootElt != DestElt)
+ return nullptr;
+
+ return RootVec;
+}
+
static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
Type *RetTy, const Query &Q,
unsigned MaxRecurse) {
@@ -4126,7 +4178,28 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
OpShuf->getMask()->getSplatValue())
return Op1;
- return nullptr;
+ // Don't fold a shuffle with undef mask elements. This may get folded in a
+ // better way using demanded bits or other analysis.
+ // TODO: Should we allow this?
+ for (unsigned i = 0; i != MaskNumElts; ++i)
+ if (ShuffleVectorInst::getMaskValue(Mask, i) == -1)
+ return nullptr;
+
+ // Check if every element of this shuffle can be mapped back to the
+ // corresponding element of a single root vector. If so, we don't need this
+ // shuffle. This handles simple identity shuffles as well as chains of
+ // shuffles that may widen/narrow and/or move elements across lanes and back.
+ Value *RootVec = nullptr;
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ // Note that recursion is limited for each vector element, so if any element
+ // exceeds the limit, this will fail to simplify.
+ RootVec = foldIdentityShuffles(i, Op0, Op1, Mask, RootVec, i, MaxRecurse);
+
+ // We can't replace a widening/narrowing shuffle with one of its operands.
+ if (!RootVec || RootVec->getType() != RetTy)
+ return nullptr;
+ }
+ return RootVec;
}
/// Given operands for a ShuffleVectorInst, fold the result or return null.
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index b8c444904723..7983d62c2f7a 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -37,6 +37,7 @@ enum AllocType : uint8_t {
CallocLike = 1<<2, // allocates + bzero
ReallocLike = 1<<3, // reallocates
StrDupLike = 1<<4,
+ MallocOrCallocLike = MallocLike | CallocLike,
AllocLike = MallocLike | CallocLike | StrDupLike,
AnyAlloc = AllocLike | ReallocLike
};
@@ -77,8 +78,8 @@ static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
// TODO: Handle "int posix_memalign(void **, size_t, size_t)"
};
-static Function *getCalledFunction(const Value *V, bool LookThroughBitCast,
- bool &IsNoBuiltin) {
+static const Function *getCalledFunction(const Value *V, bool LookThroughBitCast,
+ bool &IsNoBuiltin) {
// Don't care about intrinsics in this case.
if (isa<IntrinsicInst>(V))
return nullptr;
@@ -86,13 +87,13 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast,
if (LookThroughBitCast)
V = V->stripPointerCasts();
- CallSite CS(const_cast<Value*>(V));
+ ImmutableCallSite CS(V);
if (!CS.getInstruction())
return nullptr;
IsNoBuiltin = CS.isNoBuiltin();
- Function *Callee = CS.getCalledFunction();
+ const Function *Callee = CS.getCalledFunction();
if (!Callee || !Callee->isDeclaration())
return nullptr;
return Callee;
@@ -220,6 +221,14 @@ bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
}
/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory similiar to malloc or calloc.
+bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, MallocOrCallocLike, TLI,
+ LookThroughBitCast).hasValue();
+}
+
+/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
diff --git a/contrib/llvm/lib/Analysis/MemorySSA.cpp b/contrib/llvm/lib/Analysis/MemorySSA.cpp
index 910170561abf..2480fe44d5c0 100644
--- a/contrib/llvm/lib/Analysis/MemorySSA.cpp
+++ b/contrib/llvm/lib/Analysis/MemorySSA.cpp
@@ -1291,7 +1291,6 @@ void MemorySSA::buildMemorySSA() {
// could just look up the memory access for every possible instruction in the
// stream.
SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
- SmallPtrSet<BasicBlock *, 32> DefUseBlocks;
// Go through each block, figure out where defs occur, and chain together all
// the accesses.
for (BasicBlock &B : F) {
@@ -1316,8 +1315,6 @@ void MemorySSA::buildMemorySSA() {
}
if (InsertIntoDef)
DefiningBlocks.insert(&B);
- if (Accesses)
- DefUseBlocks.insert(&B);
}
placePHINodes(DefiningBlocks, BBNumbers);
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index ca32cf3c7c34..700c383a9dd4 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1093,7 +1093,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
APInt Mult(W, i);
unsigned TwoFactors = Mult.countTrailingZeros();
T += TwoFactors;
- Mult = Mult.lshr(TwoFactors);
+ Mult.lshrInPlace(TwoFactors);
OddFactorial *= Mult;
}
@@ -1276,7 +1276,8 @@ static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
namespace {
struct ExtendOpTraitsBase {
- typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *);
+ typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
+ const SCEV *, Type *, ScalarEvolution::ExtendCacheTy &Cache);
};
// Used to make code generic over signed and unsigned overflow.
@@ -1305,8 +1306,9 @@ struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
}
};
-const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
- SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
+const ExtendOpTraitsBase::GetExtendExprTy
+ ExtendOpTraits<SCEVSignExtendExpr>::GetExtendExpr =
+ &ScalarEvolution::getSignExtendExprCached;
template <>
struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
@@ -1321,8 +1323,9 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
}
};
-const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
- SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
+const ExtendOpTraitsBase::GetExtendExprTy
+ ExtendOpTraits<SCEVZeroExtendExpr>::GetExtendExpr =
+ &ScalarEvolution::getZeroExtendExprCached;
}
// The recurrence AR has been shown to have no signed/unsigned wrap or something
@@ -1334,7 +1337,8 @@ const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
// "sext/zext(PostIncAR)"
template <typename ExtendOpTy>
static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
- ScalarEvolution *SE) {
+ ScalarEvolution *SE,
+ ScalarEvolution::ExtendCacheTy &Cache) {
auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
@@ -1381,9 +1385,9 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
const SCEV *OperandExtendedStart =
- SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy),
- (SE->*GetExtendExpr)(Step, WideTy));
- if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) {
+ SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Cache),
+ (SE->*GetExtendExpr)(Step, WideTy, Cache));
+ if ((SE->*GetExtendExpr)(Start, WideTy, Cache) == OperandExtendedStart) {
if (PreAR && AR->getNoWrapFlags(WrapType)) {
// If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
// or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
@@ -1408,15 +1412,17 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
// Get the normalized zero or sign extended expression for this AddRec's Start.
template <typename ExtendOpTy>
static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
- ScalarEvolution *SE) {
+ ScalarEvolution *SE,
+ ScalarEvolution::ExtendCacheTy &Cache) {
auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
- const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE);
+ const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Cache);
if (!PreStart)
- return (SE->*GetExtendExpr)(AR->getStart(), Ty);
+ return (SE->*GetExtendExpr)(AR->getStart(), Ty, Cache);
- return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty),
- (SE->*GetExtendExpr)(PreStart, Ty));
+ return SE->getAddExpr(
+ (SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, Cache),
+ (SE->*GetExtendExpr)(PreStart, Ty, Cache));
}
// Try to prove away overflow by looking at "nearby" add recurrences. A
@@ -1496,8 +1502,31 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
return false;
}
-const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
- Type *Ty) {
+const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty) {
+ // Use the local cache to prevent exponential behavior of
+ // getZeroExtendExprImpl.
+ ExtendCacheTy Cache;
+ return getZeroExtendExprCached(Op, Ty, Cache);
+}
+
+/// Query \p Cache before calling getZeroExtendExprImpl. If there is no
+/// related entry in the \p Cache, call getZeroExtendExprImpl and save
+/// the result in the \p Cache.
+const SCEV *ScalarEvolution::getZeroExtendExprCached(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache) {
+ auto It = Cache.find({Op, Ty});
+ if (It != Cache.end())
+ return It->second;
+ const SCEV *ZExt = getZeroExtendExprImpl(Op, Ty, Cache);
+ auto InsertResult = Cache.insert({{Op, Ty}, ZExt});
+ assert(InsertResult.second && "Expect the key was not in the cache");
+ (void)InsertResult;
+ return ZExt;
+}
+
+/// The real implementation of getZeroExtendExpr.
+const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1507,11 +1536,11 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
// zext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
- return getZeroExtendExpr(SZ->getOperand(), Ty);
+ return getZeroExtendExprCached(SZ->getOperand(), Ty, Cache);
// Before doing any expensive analysis, check to see if we've already
// computed a SCEV for this Op and Ty.
@@ -1555,8 +1584,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// we don't need to do any further analysis.
if (AR->hasNoUnsignedWrap())
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
- getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
+ getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags());
// Check whether the backedge-taken count is SCEVCouldNotCompute.
// Note that this serves two purposes: It filters out loops that are
@@ -1581,21 +1610,22 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
- const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
- const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
+ const SCEV *ZAdd =
+ getZeroExtendExprCached(getAddExpr(Start, ZMul), WideTy, Cache);
+ const SCEV *WideStart = getZeroExtendExprCached(Start, WideTy, Cache);
const SCEV *WideMaxBECount =
- getZeroExtendExpr(CastedMaxBECount, WideTy);
- const SCEV *OperandExtendedAdd =
- getAddExpr(WideStart,
- getMulExpr(WideMaxBECount,
- getZeroExtendExpr(Step, WideTy)));
+ getZeroExtendExprCached(CastedMaxBECount, WideTy, Cache);
+ const SCEV *OperandExtendedAdd = getAddExpr(
+ WideStart, getMulExpr(WideMaxBECount, getZeroExtendExprCached(
+ Step, WideTy, Cache)));
if (ZAdd == OperandExtendedAdd) {
// Cache knowledge of AR NUW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
- getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
+ getZeroExtendExprCached(Step, Ty, Cache), L,
+ AR->getNoWrapFlags());
}
// Similar to above, only this time treat the step value as signed.
// This covers loops that count down.
@@ -1609,7 +1639,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
}
}
@@ -1641,8 +1671,9 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
- getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
+ getZeroExtendExprCached(Step, Ty, Cache), L,
+ AR->getNoWrapFlags());
}
} else if (isKnownNegative(Step)) {
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
@@ -1657,7 +1688,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
}
}
@@ -1666,8 +1697,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
- getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
+ getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags());
}
}
@@ -1678,7 +1709,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// commute the zero extension with the addition operation.
SmallVector<const SCEV *, 4> Ops;
for (const auto *Op : SA->operands())
- Ops.push_back(getZeroExtendExpr(Op, Ty));
+ Ops.push_back(getZeroExtendExprCached(Op, Ty, Cache));
return getAddExpr(Ops, SCEV::FlagNUW);
}
}
@@ -1692,8 +1723,31 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
return S;
}
-const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
- Type *Ty) {
+const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty) {
+ // Use the local cache to prevent exponential behavior of
+ // getSignExtendExprImpl.
+ ExtendCacheTy Cache;
+ return getSignExtendExprCached(Op, Ty, Cache);
+}
+
+/// Query \p Cache before calling getSignExtendExprImpl. If there is no
+/// related entry in the \p Cache, call getSignExtendExprImpl and save
+/// the result in the \p Cache.
+const SCEV *ScalarEvolution::getSignExtendExprCached(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache) {
+ auto It = Cache.find({Op, Ty});
+ if (It != Cache.end())
+ return It->second;
+ const SCEV *SExt = getSignExtendExprImpl(Op, Ty, Cache);
+ auto InsertResult = Cache.insert({{Op, Ty}, SExt});
+ assert(InsertResult.second && "Expect the key was not in the cache");
+ (void)InsertResult;
+ return SExt;
+}
+
+/// The real implementation of getSignExtendExpr.
+const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
+ ExtendCacheTy &Cache) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1703,11 +1757,11 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
// sext(sext(x)) --> sext(x)
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
- return getSignExtendExpr(SS->getOperand(), Ty);
+ return getSignExtendExprCached(SS->getOperand(), Ty, Cache);
// sext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
@@ -1746,8 +1800,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const APInt &C2 = SC2->getAPInt();
if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
C2.ugt(C1) && C2.isPowerOf2())
- return getAddExpr(getSignExtendExpr(SC1, Ty),
- getSignExtendExpr(SMul, Ty));
+ return getAddExpr(getSignExtendExprCached(SC1, Ty, Cache),
+ getSignExtendExprCached(SMul, Ty, Cache));
}
}
}
@@ -1758,7 +1812,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// commute the sign extension with the addition operation.
SmallVector<const SCEV *, 4> Ops;
for (const auto *Op : SA->operands())
- Ops.push_back(getSignExtendExpr(Op, Ty));
+ Ops.push_back(getSignExtendExprCached(Op, Ty, Cache));
return getAddExpr(Ops, SCEV::FlagNSW);
}
}
@@ -1782,8 +1836,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// we don't need to do any further analysis.
if (AR->hasNoSignedWrap())
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
- getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW);
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
+ getSignExtendExprCached(Step, Ty, Cache), L, SCEV::FlagNSW);
// Check whether the backedge-taken count is SCEVCouldNotCompute.
// Note that this serves two purposes: It filters out loops that are
@@ -1808,21 +1862,22 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
- const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
- const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
+ const SCEV *SAdd =
+ getSignExtendExprCached(getAddExpr(Start, SMul), WideTy, Cache);
+ const SCEV *WideStart = getSignExtendExprCached(Start, WideTy, Cache);
const SCEV *WideMaxBECount =
- getZeroExtendExpr(CastedMaxBECount, WideTy);
- const SCEV *OperandExtendedAdd =
- getAddExpr(WideStart,
- getMulExpr(WideMaxBECount,
- getSignExtendExpr(Step, WideTy)));
+ getZeroExtendExpr(CastedMaxBECount, WideTy);
+ const SCEV *OperandExtendedAdd = getAddExpr(
+ WideStart, getMulExpr(WideMaxBECount, getSignExtendExprCached(
+ Step, WideTy, Cache)));
if (SAdd == OperandExtendedAdd) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
- getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
+ getSignExtendExprCached(Step, Ty, Cache), L,
+ AR->getNoWrapFlags());
}
// Similar to above, only this time treat the step value as unsigned.
// This covers loops that count up with an unsigned step.
@@ -1843,7 +1898,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
}
}
@@ -1875,8 +1930,9 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
- getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
+ getSignExtendExprCached(Step, Ty, Cache), L,
+ AR->getNoWrapFlags());
}
}
@@ -1890,18 +1946,18 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const APInt &C2 = SC2->getAPInt();
if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
C2.isPowerOf2()) {
- Start = getSignExtendExpr(Start, Ty);
+ Start = getSignExtendExprCached(Start, Ty, Cache);
const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L,
AR->getNoWrapFlags());
- return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
+ return getAddExpr(Start, getSignExtendExprCached(NewAR, Ty, Cache));
}
}
if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
- getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
+ getSignExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags());
}
}
@@ -3951,9 +4007,9 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
case Instruction::Xor:
if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
- // If the RHS of the xor is a signbit, then this is just an add.
- // Instcombine turns add of signbit into xor as a strength reduction step.
- if (RHSC->getValue().isSignBit())
+ // If the RHS of the xor is a signmask, then this is just an add.
+ // Instcombine turns add of signmask into xor as a strength reduction step.
+ if (RHSC->getValue().isSignMask())
return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
return BinaryOp(Op);
@@ -5272,28 +5328,12 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
break;
case Instruction::Or:
- // If the RHS of the Or is a constant, we may have something like:
- // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
- // optimizations will transparently handle this case.
- //
- // In order for this transformation to be safe, the LHS must be of the
- // form X*(2^n) and the Or constant must be less than 2^n.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
- const SCEV *LHS = getSCEV(BO->LHS);
- const APInt &CIVal = CI->getValue();
- if (GetMinTrailingZeros(LHS) >=
- (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
- // Build a plain add SCEV.
- const SCEV *S = getAddExpr(LHS, getSCEV(CI));
- // If the LHS of the add was an addrec and it has no-wrap flags,
- // transfer the no-wrap flags, since an or won't introduce a wrap.
- if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
- const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
- const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
- OldAR->getNoWrapFlags());
- }
- return S;
- }
+ // Use ValueTracking to check whether this is actually an add.
+ if (haveNoCommonBitsSet(BO->LHS, BO->RHS, getDataLayout(), &AC,
+ nullptr, &DT)) {
+ // There aren't any common bits set, so the add can't wrap.
+ auto Flags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNSW);
+ return getAddExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
}
break;
@@ -5329,7 +5369,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// using an add, which is equivalent, and re-apply the zext.
APInt Trunc = CI->getValue().trunc(Z0TySize);
if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
- Trunc.isSignBit())
+ Trunc.isSignMask())
return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
UTy);
}
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index d871e83f222a..900a2363e60d 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -292,15 +292,15 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
KnownOne = PossibleSumOne & Known;
// Are we still trying to solve for the sign bit?
- if (!Known.isNegative()) {
+ if (!Known.isSignBitSet()) {
if (NSW) {
// Adding two non-negative numbers, or subtracting a negative number from
// a non-negative one, can't wrap into negative.
- if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
+ if (LHSKnownZero.isSignBitSet() && KnownZero2.isSignBitSet())
KnownZero.setSignBit();
// Adding two negative numbers, or subtracting a non-negative number from
// a negative one, can't wrap into non-negative.
- else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
+ else if (LHSKnownOne.isSignBitSet() && KnownOne2.isSignBitSet())
KnownOne.setSignBit();
}
}
@@ -322,10 +322,10 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// The product of a number with itself is non-negative.
isKnownNonNegative = true;
} else {
- bool isKnownNonNegativeOp1 = KnownZero.isNegative();
- bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
- bool isKnownNegativeOp1 = KnownOne.isNegative();
- bool isKnownNegativeOp0 = KnownOne2.isNegative();
+ bool isKnownNonNegativeOp1 = KnownZero.isSignBitSet();
+ bool isKnownNonNegativeOp0 = KnownZero2.isSignBitSet();
+ bool isKnownNegativeOp1 = KnownOne.isSignBitSet();
+ bool isKnownNegativeOp0 = KnownOne2.isSignBitSet();
// The product of two numbers with the same sign is non-negative.
isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
(isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
@@ -361,9 +361,9 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// which case we prefer to follow the result of the direct computation,
// though as the program is invoking undefined behaviour we can choose
// whatever we like here.
- if (isKnownNonNegative && !KnownOne.isNegative())
+ if (isKnownNonNegative && !KnownOne.isSignBitSet())
KnownZero.setSignBit();
- else if (isKnownNegative && !KnownZero.isNegative())
+ else if (isKnownNegative && !KnownZero.isSignBitSet())
KnownOne.setSignBit();
}
@@ -661,8 +661,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// For those bits in RHS that are known, we can propagate them to known
// bits in V shifted to the right by C.
- KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
- KnownOne |= RHSKnownOne.lshr(C->getZExtValue());
+ RHSKnownZero.lshrInPlace(C->getZExtValue());
+ KnownZero |= RHSKnownZero;
+ RHSKnownOne.lshrInPlace(C->getZExtValue());
+ KnownOne |= RHSKnownOne;
// assume(~(v << c) = a)
} else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
m_Value(A))) &&
@@ -672,8 +674,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// For those bits in RHS that are known, we can propagate them inverted
// to known bits in V shifted to the right by C.
- KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
- KnownOne |= RHSKnownZero.lshr(C->getZExtValue());
+ RHSKnownOne.lshrInPlace(C->getZExtValue());
+ KnownZero |= RHSKnownOne;
+ RHSKnownZero.lshrInPlace(C->getZExtValue());
+ KnownOne |= RHSKnownZero;
// assume(v >> c = a)
} else if (match(Arg,
m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
@@ -707,7 +711,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
- if (RHSKnownZero.isNegative()) {
+ if (RHSKnownZero.isSignBitSet()) {
// We know that the sign bit is zero.
KnownZero.setSignBit();
}
@@ -718,7 +722,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
- if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
+ if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isSignBitSet()) {
// We know that the sign bit is zero.
KnownZero.setSignBit();
}
@@ -729,7 +733,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
- if (RHSKnownOne.isNegative()) {
+ if (RHSKnownOne.isSignBitSet()) {
// We know that the sign bit is one.
KnownOne.setSignBit();
}
@@ -740,7 +744,7 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
- if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
+ if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isSignBitSet()) {
// We know that the sign bit is one.
KnownOne.setSignBit();
}
@@ -990,23 +994,23 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
unsigned MaxHighZeros = 0;
if (SPF == SPF_SMAX) {
// If both sides are negative, the result is negative.
- if (KnownOne.isNegative() && KnownOne2.isNegative())
+ if (KnownOne.isSignBitSet() && KnownOne2.isSignBitSet())
// We can derive a lower bound on the result by taking the max of the
// leading one bits.
MaxHighOnes =
std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
// If either side is non-negative, the result is non-negative.
- else if (KnownZero.isNegative() || KnownZero2.isNegative())
+ else if (KnownZero.isSignBitSet() || KnownZero2.isSignBitSet())
MaxHighZeros = 1;
} else if (SPF == SPF_SMIN) {
// If both sides are non-negative, the result is non-negative.
- if (KnownZero.isNegative() && KnownZero2.isNegative())
+ if (KnownZero.isSignBitSet() && KnownZero2.isSignBitSet())
// We can derive an upper bound on the result by taking the max of the
// leading zero bits.
MaxHighZeros = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
// If either side is negative, the result is negative.
- else if (KnownOne.isNegative() || KnownOne2.isNegative())
+ else if (KnownOne.isSignBitSet() || KnownOne2.isSignBitSet())
MaxHighOnes = 1;
} else if (SPF == SPF_UMAX) {
// We can derive a lower bound on the result by taking the max of the
@@ -1092,14 +1096,14 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
KZResult.setLowBits(ShiftAmt); // Low bits known 0.
// If this shift has "nsw" keyword, then the result is either a poison
// value or has the same sign bit as the first operand.
- if (NSW && KnownZero.isNegative())
+ if (NSW && KnownZero.isSignBitSet())
KZResult.setSignBit();
return KZResult;
};
auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
APInt KOResult = KnownOne << ShiftAmt;
- if (NSW && KnownOne.isNegative())
+ if (NSW && KnownOne.isSignBitSet())
KOResult.setSignBit();
return KOResult;
};
@@ -1111,10 +1115,11 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
}
case Instruction::LShr: {
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
- auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
- return KnownZero.lshr(ShiftAmt) |
- // High bits known zero.
- APInt::getHighBitsSet(BitWidth, ShiftAmt);
+ auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
+ APInt KZResult = KnownZero.lshr(ShiftAmt);
+ // High bits known zero.
+ KZResult.setHighBits(ShiftAmt);
+ return KZResult;
};
auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
@@ -1169,28 +1174,25 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
// If the first operand is non-negative or has all low bits zero, then
// the upper bits are all zero.
- if (KnownZero2.isNegative() || ((KnownZero2 & LowBits) == LowBits))
+ if (KnownZero2.isSignBitSet() || ((KnownZero2 & LowBits) == LowBits))
KnownZero |= ~LowBits;
// If the first operand is negative and not all low bits are zero, then
// the upper bits are all one.
- if (KnownOne2.isNegative() && ((KnownOne2 & LowBits) != 0))
+ if (KnownOne2.isSignBitSet() && ((KnownOne2 & LowBits) != 0))
KnownOne |= ~LowBits;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ break;
}
}
// The sign bit is the LHS's sign bit, except when the result of the
// remainder is zero.
- if (KnownZero.isNonNegative()) {
- APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
- Q);
- // If it's known zero, our sign bit is also zero.
- if (LHSKnownZero.isNegative())
- KnownZero.setSignBit();
- }
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
+ // If it's known zero, our sign bit is also zero.
+ if (KnownZero2.isSignBitSet())
+ KnownZero.setSignBit();
break;
case Instruction::URem: {
@@ -1331,24 +1333,24 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
// (add non-negative, non-negative) --> non-negative
// (add negative, negative) --> negative
if (Opcode == Instruction::Add) {
- if (KnownZero2.isNegative() && KnownZero3.isNegative())
+ if (KnownZero2.isSignBitSet() && KnownZero3.isSignBitSet())
KnownZero.setSignBit();
- else if (KnownOne2.isNegative() && KnownOne3.isNegative())
+ else if (KnownOne2.isSignBitSet() && KnownOne3.isSignBitSet())
KnownOne.setSignBit();
}
// (sub nsw non-negative, negative) --> non-negative
// (sub nsw negative, non-negative) --> negative
else if (Opcode == Instruction::Sub && LL == I) {
- if (KnownZero2.isNegative() && KnownOne3.isNegative())
+ if (KnownZero2.isSignBitSet() && KnownOne3.isSignBitSet())
KnownZero.setSignBit();
- else if (KnownOne2.isNegative() && KnownZero3.isNegative())
+ else if (KnownOne2.isSignBitSet() && KnownZero3.isSignBitSet())
KnownOne.setSignBit();
}
// (mul nsw non-negative, non-negative) --> non-negative
- else if (Opcode == Instruction::Mul && KnownZero2.isNegative() &&
- KnownZero3.isNegative())
+ else if (Opcode == Instruction::Mul && KnownZero2.isSignBitSet() &&
+ KnownZero3.isSignBitSet())
KnownZero.setSignBit();
}
@@ -1614,8 +1616,8 @@ void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
APInt ZeroBits(BitWidth, 0);
APInt OneBits(BitWidth, 0);
computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
- KnownOne = OneBits.isNegative();
- KnownZero = ZeroBits.isNegative();
+ KnownOne = OneBits.isSignBitSet();
+ KnownZero = ZeroBits.isSignBitSet();
}
/// Return true if the given value is known to have exactly one
@@ -1638,9 +1640,9 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
if (match(V, m_Shl(m_One(), m_Value())))
return true;
- // (signbit) >>l X is clearly a power of two if the one is not shifted off the
- // bottom. If it is shifted off the bottom then the result is undefined.
- if (match(V, m_LShr(m_SignBit(), m_Value())))
+ // (signmask) >>l X is clearly a power of two if the one is not shifted off
+ // the bottom. If it is shifted off the bottom then the result is undefined.
+ if (match(V, m_LShr(m_SignMask(), m_Value())))
return true;
// The remaining tests are all recursive, so bail out if we hit the limit.
@@ -2241,7 +2243,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// If we are subtracting one from a positive number, there is no carry
// out of the result.
- if (KnownZero.isNegative())
+ if (KnownZero.isSignBitSet())
return Tmp;
}
@@ -2265,7 +2267,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// If the input is known to be positive (the sign bit is known clear),
// the output of the NEG has the same number of sign bits as the input.
- if (KnownZero.isNegative())
+ if (KnownZero.isSignBitSet())
return Tmp2;
// Otherwise, we treat this like a SUB.
@@ -2322,10 +2324,10 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// If we know that the sign bit is either zero or one, determine the number of
// identical bits in the top of the input value.
- if (KnownZero.isNegative())
+ if (KnownZero.isSignBitSet())
return std::max(FirstAnswer, KnownZero.countLeadingOnes());
- if (KnownOne.isNegative())
+ if (KnownOne.isSignBitSet())
return std::max(FirstAnswer, KnownOne.countLeadingOnes());
// computeKnownBits gave us no extra information about the top bits.
@@ -3556,14 +3558,14 @@ OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
// We know the multiply operation doesn't overflow if the maximum values for
// each operand will not overflow after we multiply them together.
bool MaxOverflow;
- LHSMax.umul_ov(RHSMax, MaxOverflow);
+ (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
if (!MaxOverflow)
return OverflowResult::NeverOverflows;
// We know it always overflows if multiplying the smallest possible values for
// the operands also results in overflow.
bool MinOverflow;
- LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
+ (void)LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
if (MinOverflow)
return OverflowResult::AlwaysOverflows;
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index 58ea9296afda..c7076ed0dd81 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -143,27 +143,24 @@ bool LLParser::ValidateEndOfModule() {
FnAttrs.removeAttribute(Attribute::Alignment);
}
- AS = AS.addAttributes(
- Context, AttributeList::FunctionIndex,
- AttributeList::get(Context, AttributeList::FunctionIndex, FnAttrs));
+ AS = AS.addAttributes(Context, AttributeList::FunctionIndex,
+ AttributeSet::get(Context, FnAttrs));
Fn->setAttributes(AS);
} else if (CallInst *CI = dyn_cast<CallInst>(V)) {
AttributeList AS = CI->getAttributes();
AttrBuilder FnAttrs(AS.getFnAttributes());
AS = AS.removeAttributes(Context, AttributeList::FunctionIndex);
FnAttrs.merge(B);
- AS = AS.addAttributes(
- Context, AttributeList::FunctionIndex,
- AttributeList::get(Context, AttributeList::FunctionIndex, FnAttrs));
+ AS = AS.addAttributes(Context, AttributeList::FunctionIndex,
+ AttributeSet::get(Context, FnAttrs));
CI->setAttributes(AS);
} else if (InvokeInst *II = dyn_cast<InvokeInst>(V)) {
AttributeList AS = II->getAttributes();
AttrBuilder FnAttrs(AS.getFnAttributes());
AS = AS.removeAttributes(Context, AttributeList::FunctionIndex);
FnAttrs.merge(B);
- AS = AS.addAttributes(
- Context, AttributeList::FunctionIndex,
- AttributeList::get(Context, AttributeList::FunctionIndex, FnAttrs));
+ AS = AS.addAttributes(Context, AttributeList::FunctionIndex,
+ AttributeSet::get(Context, FnAttrs));
II->setAttributes(AS);
} else {
llvm_unreachable("invalid object with forward attribute group reference");
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 24ab7e9a950c..6d727ce83346 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -372,15 +372,27 @@ Expected<std::string> readTriple(BitstreamCursor &Stream) {
class BitcodeReaderBase {
protected:
- BitcodeReaderBase(BitstreamCursor Stream) : Stream(std::move(Stream)) {
+ BitcodeReaderBase(BitstreamCursor Stream, StringRef Strtab)
+ : Stream(std::move(Stream)), Strtab(Strtab) {
this->Stream.setBlockInfo(&BlockInfo);
}
BitstreamBlockInfo BlockInfo;
BitstreamCursor Stream;
+ StringRef Strtab;
+
+ /// In version 2 of the bitcode we store names of global values and comdats in
+ /// a string table rather than in the VST.
+ bool UseStrtab = false;
Expected<unsigned> parseVersionRecord(ArrayRef<uint64_t> Record);
+ /// If this module uses a string table, pop the reference to the string table
+ /// and return the referenced string and the rest of the record. Otherwise
+ /// just return the record itself.
+ std::pair<StringRef, ArrayRef<uint64_t>>
+ readNameFromStrtab(ArrayRef<uint64_t> Record);
+
bool readBlockInfo();
// Contains an arbitrary and optional string identifying the bitcode producer
@@ -402,11 +414,22 @@ BitcodeReaderBase::parseVersionRecord(ArrayRef<uint64_t> Record) {
if (Record.size() < 1)
return error("Invalid record");
unsigned ModuleVersion = Record[0];
- if (ModuleVersion > 1)
+ if (ModuleVersion > 2)
return error("Invalid value");
+ UseStrtab = ModuleVersion >= 2;
return ModuleVersion;
}
+std::pair<StringRef, ArrayRef<uint64_t>>
+BitcodeReaderBase::readNameFromStrtab(ArrayRef<uint64_t> Record) {
+ if (!UseStrtab)
+ return {"", Record};
+ // Invalid reference. Let the caller complain about the record being empty.
+ if (Record[0] + Record[1] > Strtab.size())
+ return {"", {}};
+ return {StringRef(Strtab.data() + Record[0], Record[1]), Record.slice(2)};
+}
+
class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
LLVMContext &Context;
Module *TheModule = nullptr;
@@ -492,8 +515,8 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
std::vector<std::string> BundleTags;
public:
- BitcodeReader(BitstreamCursor Stream, StringRef ProducerIdentification,
- LLVMContext &Context);
+ BitcodeReader(BitstreamCursor Stream, StringRef Strtab,
+ StringRef ProducerIdentification, LLVMContext &Context);
Error materializeForwardReferencedFunctions();
@@ -628,7 +651,10 @@ private:
Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record,
unsigned NameIndex, Triple &TT);
+ void setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, Function *F,
+ ArrayRef<uint64_t> Record);
Error parseValueSymbolTable(uint64_t Offset = 0);
+ Error parseGlobalValueSymbolTable();
Error parseConstants();
Error rememberAndSkipFunctionBodies();
Error rememberAndSkipFunctionBody();
@@ -681,12 +707,15 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
std::string SourceFileName;
public:
- ModuleSummaryIndexBitcodeReader(
- BitstreamCursor Stream, ModuleSummaryIndex &TheIndex);
+ ModuleSummaryIndexBitcodeReader(BitstreamCursor Stream, StringRef Strtab,
+ ModuleSummaryIndex &TheIndex);
Error parseModule(StringRef ModulePath);
private:
+ void setValueGUID(uint64_t ValueID, StringRef ValueName,
+ GlobalValue::LinkageTypes Linkage,
+ StringRef SourceFileName);
Error parseValueSymbolTable(
uint64_t Offset,
DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap);
@@ -716,10 +745,10 @@ std::error_code llvm::errorToErrorCodeAndEmitErrors(LLVMContext &Ctx,
return std::error_code();
}
-BitcodeReader::BitcodeReader(BitstreamCursor Stream,
+BitcodeReader::BitcodeReader(BitstreamCursor Stream, StringRef Strtab,
StringRef ProducerIdentification,
LLVMContext &Context)
- : BitcodeReaderBase(std::move(Stream)), Context(Context),
+ : BitcodeReaderBase(std::move(Stream), Strtab), Context(Context),
ValueList(Context) {
this->ProducerIdentification = ProducerIdentification;
}
@@ -1749,6 +1778,54 @@ static uint64_t jumpToValueSymbolTable(uint64_t Offset,
return CurrentBit;
}
+void BitcodeReader::setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta,
+ Function *F,
+ ArrayRef<uint64_t> Record) {
+ // Note that we subtract 1 here because the offset is relative to one word
+ // before the start of the identification or module block, which was
+ // historically always the start of the regular bitcode header.
+ uint64_t FuncWordOffset = Record[1] - 1;
+ uint64_t FuncBitOffset = FuncWordOffset * 32;
+ DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta;
+ // Set the LastFunctionBlockBit to point to the last function block.
+ // Later when parsing is resumed after function materialization,
+ // we can simply skip that last function block.
+ if (FuncBitOffset > LastFunctionBlockBit)
+ LastFunctionBlockBit = FuncBitOffset;
+}
+
+/// Read a new-style GlobalValue symbol table.
+Error BitcodeReader::parseGlobalValueSymbolTable() {
+ unsigned FuncBitcodeOffsetDelta =
+ Stream.getAbbrevIDWidth() + bitc::BlockIDWidth;
+
+ if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
+ return error("Invalid record");
+
+ SmallVector<uint64_t, 64> Record;
+ while (true) {
+ BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock:
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ switch (Stream.readRecord(Entry.ID, Record)) {
+ case bitc::VST_CODE_FNENTRY: // [valueid, offset]
+ setDeferredFunctionInfo(FuncBitcodeOffsetDelta,
+ cast<Function>(ValueList[Record[0]]), Record);
+ break;
+ }
+ }
+}
+
/// Parse the value symbol table at either the current parsing location or
/// at the given bit offset if provided.
Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) {
@@ -1756,8 +1833,18 @@ Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) {
// Pass in the Offset to distinguish between calling for the module-level
// VST (where we want to jump to the VST offset) and the function-level
// VST (where we don't).
- if (Offset > 0)
+ if (Offset > 0) {
CurrentBit = jumpToValueSymbolTable(Offset, Stream);
+ // If this module uses a string table, read this as a module-level VST.
+ if (UseStrtab) {
+ if (Error Err = parseGlobalValueSymbolTable())
+ return Err;
+ Stream.JumpToBit(CurrentBit);
+ return Error::success();
+ }
+ // Otherwise, the VST will be in a similar format to a function-level VST,
+ // and will contain symbol names.
+ }
// Compute the delta between the bitcode indices in the VST (the word offset
// to the word-aligned ENTER_SUBBLOCK for the function block, and that
@@ -1818,23 +1905,10 @@ Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) {
return Err;
Value *V = ValOrErr.get();
- auto *F = dyn_cast<Function>(V);
// Ignore function offsets emitted for aliases of functions in older
// versions of LLVM.
- if (!F)
- break;
-
- // Note that we subtract 1 here because the offset is relative to one word
- // before the start of the identification or module block, which was
- // historically always the start of the regular bitcode header.
- uint64_t FuncWordOffset = Record[1] - 1;
- uint64_t FuncBitOffset = FuncWordOffset * 32;
- DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta;
- // Set the LastFunctionBlockBit to point to the last function block.
- // Later when parsing is resumed after function materialization,
- // we can simply skip that last function block.
- if (FuncBitOffset > LastFunctionBlockBit)
- LastFunctionBlockBit = FuncBitOffset;
+ if (auto *F = dyn_cast<Function>(V))
+ setDeferredFunctionInfo(FuncBitcodeOffsetDelta, F, Record);
break;
}
case bitc::VST_CODE_BBENTRY: {
@@ -2557,6 +2631,7 @@ Error BitcodeReader::globalCleanup() {
// Look for intrinsic functions which need to be upgraded at some point
for (Function &F : *TheModule) {
+ MDLoader->upgradeDebugIntrinsics(F);
Function *NewFn;
if (UpgradeIntrinsicFunction(&F, NewFn))
UpgradedIntrinsics[&F] = NewFn;
@@ -2626,15 +2701,24 @@ bool BitcodeReaderBase::readBlockInfo() {
}
Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) {
- // [selection_kind, name]
- if (Record.size() < 2)
+ // v1: [selection_kind, name]
+ // v2: [strtab_offset, strtab_size, selection_kind]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
+ if (Record.size() < 1)
return error("Invalid record");
Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]);
- std::string Name;
- unsigned ComdatNameSize = Record[1];
- Name.reserve(ComdatNameSize);
- for (unsigned i = 0; i != ComdatNameSize; ++i)
- Name += (char)Record[2 + i];
+ std::string OldFormatName;
+ if (!UseStrtab) {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ unsigned ComdatNameSize = Record[1];
+ OldFormatName.reserve(ComdatNameSize);
+ for (unsigned i = 0; i != ComdatNameSize; ++i)
+ OldFormatName += (char)Record[2 + i];
+ Name = OldFormatName;
+ }
Comdat *C = TheModule->getOrInsertComdat(Name);
C->setSelectionKind(SK);
ComdatList.push_back(C);
@@ -2642,9 +2726,13 @@ Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) {
}
Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) {
- // [pointer type, isconst, initid, linkage, alignment, section,
+ // v1: [pointer type, isconst, initid, linkage, alignment, section,
// visibility, threadlocal, unnamed_addr, externally_initialized,
- // dllstorageclass, comdat]
+ // dllstorageclass, comdat] (name in VST)
+ // v2: [strtab_offset, strtab_size, v1]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
if (Record.size() < 6)
return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
@@ -2692,7 +2780,7 @@ Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) {
ExternallyInitialized = Record[9];
GlobalVariable *NewGV =
- new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, "",
+ new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, Name,
nullptr, TLM, AddressSpace, ExternallyInitialized);
NewGV->setAlignment(Alignment);
if (!Section.empty())
@@ -2724,9 +2812,13 @@ Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) {
}
Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) {
- // [type, callingconv, isproto, linkage, paramattr, alignment, section,
+ // v1: [type, callingconv, isproto, linkage, paramattr, alignment, section,
// visibility, gc, unnamed_addr, prologuedata, dllstorageclass, comdat,
- // prefixdata]
+ // prefixdata] (name in VST)
+ // v2: [strtab_offset, strtab_size, v1]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
if (Record.size() < 8)
return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
@@ -2742,7 +2834,7 @@ Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) {
return error("Invalid calling convention ID");
Function *Func =
- Function::Create(FTy, GlobalValue::ExternalLinkage, "", TheModule);
+ Function::Create(FTy, GlobalValue::ExternalLinkage, Name, TheModule);
Func->setCallingConv(CC);
bool isProto = Record[2];
@@ -2810,11 +2902,15 @@ Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) {
Error BitcodeReader::parseGlobalIndirectSymbolRecord(
unsigned BitCode, ArrayRef<uint64_t> Record) {
- // ALIAS_OLD: [alias type, aliasee val#, linkage]
- // ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility,
- // dllstorageclass]
- // IFUNC: [alias type, addrspace, aliasee val#, linkage,
- // visibility, dllstorageclass]
+ // v1 ALIAS_OLD: [alias type, aliasee val#, linkage] (name in VST)
+ // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility,
+ // dllstorageclass] (name in VST)
+ // v1 IFUNC: [alias type, addrspace, aliasee val#, linkage,
+ // visibility, dllstorageclass] (name in VST)
+ // v2: [strtab_offset, strtab_size, v1]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
bool NewRecord = BitCode != bitc::MODULE_CODE_ALIAS_OLD;
if (Record.size() < (3 + (unsigned)NewRecord))
return error("Invalid record");
@@ -2839,10 +2935,10 @@ Error BitcodeReader::parseGlobalIndirectSymbolRecord(
GlobalIndirectSymbol *NewGA;
if (BitCode == bitc::MODULE_CODE_ALIAS ||
BitCode == bitc::MODULE_CODE_ALIAS_OLD)
- NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), "",
+ NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name,
TheModule);
else
- NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), "",
+ NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name,
nullptr, TheModule);
// Old bitcode files didn't have visibility field.
// Local linkage must have default visibility.
@@ -4570,8 +4666,8 @@ std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const {
}
ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader(
- BitstreamCursor Cursor, ModuleSummaryIndex &TheIndex)
- : BitcodeReaderBase(std::move(Cursor)), TheIndex(TheIndex) {}
+ BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex)
+ : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex) {}
std::pair<GlobalValue::GUID, GlobalValue::GUID>
ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
@@ -4580,12 +4676,32 @@ ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
return VGI->second;
}
+void ModuleSummaryIndexBitcodeReader::setValueGUID(
+ uint64_t ValueID, StringRef ValueName, GlobalValue::LinkageTypes Linkage,
+ StringRef SourceFileName) {
+ std::string GlobalId =
+ GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName);
+ auto ValueGUID = GlobalValue::getGUID(GlobalId);
+ auto OriginalNameID = ValueGUID;
+ if (GlobalValue::isLocalLinkage(Linkage))
+ OriginalNameID = GlobalValue::getGUID(ValueName);
+ if (PrintSummaryGUIDs)
+ dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
+ << ValueName << "\n";
+ ValueIdToCallGraphGUIDMap[ValueID] =
+ std::make_pair(ValueGUID, OriginalNameID);
+}
+
// Specialized value symbol table parser used when reading module index
// blocks where we don't actually create global values. The parsed information
// is saved in the bitcode reader for use when later parsing summaries.
Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
uint64_t Offset,
DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap) {
+ // With a strtab the VST is not required to parse the summary.
+ if (UseStrtab)
+ return Error::success();
+
assert(Offset > 0 && "Expected non-zero VST offset");
uint64_t CurrentBit = jumpToValueSymbolTable(Offset, Stream);
@@ -4627,17 +4743,7 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
assert(VLI != ValueIdToLinkageMap.end() &&
"No linkage found for VST entry?");
auto Linkage = VLI->second;
- std::string GlobalId =
- GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName);
- auto ValueGUID = GlobalValue::getGUID(GlobalId);
- auto OriginalNameID = ValueGUID;
- if (GlobalValue::isLocalLinkage(Linkage))
- OriginalNameID = GlobalValue::getGUID(ValueName);
- if (PrintSummaryGUIDs)
- dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
- << ValueName << "\n";
- ValueIdToCallGraphGUIDMap[ValueID] =
- std::make_pair(ValueGUID, OriginalNameID);
+ setValueGUID(ValueID, ValueName, Linkage, SourceFileName);
ValueName.clear();
break;
}
@@ -4651,18 +4757,7 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
assert(VLI != ValueIdToLinkageMap.end() &&
"No linkage found for VST entry?");
auto Linkage = VLI->second;
- std::string FunctionGlobalId = GlobalValue::getGlobalIdentifier(
- ValueName, VLI->second, SourceFileName);
- auto FunctionGUID = GlobalValue::getGUID(FunctionGlobalId);
- auto OriginalNameID = FunctionGUID;
- if (GlobalValue::isLocalLinkage(Linkage))
- OriginalNameID = GlobalValue::getGUID(ValueName);
- if (PrintSummaryGUIDs)
- dbgs() << "GUID " << FunctionGUID << "(" << OriginalNameID << ") is "
- << ValueName << "\n";
- ValueIdToCallGraphGUIDMap[ValueID] =
- std::make_pair(FunctionGUID, OriginalNameID);
-
+ setValueGUID(ValueID, ValueName, Linkage, SourceFileName);
ValueName.clear();
break;
}
@@ -4749,6 +4844,11 @@ Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) {
switch (BitCode) {
default:
break; // Default behavior, ignore unknown content.
+ case bitc::MODULE_CODE_VERSION: {
+ if (Error Err = parseVersionRecord(Record).takeError())
+ return Err;
+ break;
+ }
/// MODULE_CODE_SOURCE_FILENAME: [namechar x N]
case bitc::MODULE_CODE_SOURCE_FILENAME: {
SmallString<128> ValueName;
@@ -4783,17 +4883,26 @@ Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) {
// was historically always the start of the regular bitcode header.
VSTOffset = Record[0] - 1;
break;
- // GLOBALVAR: [pointer type, isconst, initid, linkage, ...]
- // FUNCTION: [type, callingconv, isproto, linkage, ...]
- // ALIAS: [alias type, addrspace, aliasee val#, linkage, ...]
+ // v1 GLOBALVAR: [pointer type, isconst, initid, linkage, ...]
+ // v1 FUNCTION: [type, callingconv, isproto, linkage, ...]
+ // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, ...]
+ // v2: [strtab offset, strtab size, v1]
case bitc::MODULE_CODE_GLOBALVAR:
case bitc::MODULE_CODE_FUNCTION:
case bitc::MODULE_CODE_ALIAS: {
- if (Record.size() <= 3)
+ StringRef Name;
+ ArrayRef<uint64_t> GVRecord;
+ std::tie(Name, GVRecord) = readNameFromStrtab(Record);
+ if (GVRecord.size() <= 3)
return error("Invalid record");
- uint64_t RawLinkage = Record[3];
+ uint64_t RawLinkage = GVRecord[3];
GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
- ValueIdToLinkageMap[ValueId++] = Linkage;
+ if (!UseStrtab) {
+ ValueIdToLinkageMap[ValueId++] = Linkage;
+ break;
+ }
+
+ setValueGUID(ValueId++, Name, Linkage, SourceFileName);
break;
}
}
@@ -4904,6 +5013,12 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
switch (BitCode) {
default: // Default behavior: ignore.
break;
+ case bitc::FS_VALUE_GUID: { // [valueid, refguid]
+ uint64_t ValueID = Record[0];
+ GlobalValue::GUID RefGUID = Record[1];
+ ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
+ break;
+ }
// FS_PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
// n x (valueid)]
// FS_PERMODULE_PROFILE: [valueid, flags, instcount, numrefs,
@@ -5208,6 +5323,35 @@ const std::error_category &llvm::BitcodeErrorCategory() {
return *ErrorCategory;
}
+static Expected<StringRef> readStrtab(BitstreamCursor &Stream) {
+ if (Stream.EnterSubBlock(bitc::STRTAB_BLOCK_ID))
+ return error("Invalid record");
+
+ StringRef Strtab;
+ while (1) {
+ BitstreamEntry Entry = Stream.advance();
+ switch (Entry.Kind) {
+ case BitstreamEntry::EndBlock:
+ return Strtab;
+
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+
+ case BitstreamEntry::SubBlock:
+ if (Stream.SkipBlock())
+ return error("Malformed block");
+ break;
+
+ case BitstreamEntry::Record:
+ StringRef Blob;
+ SmallVector<uint64_t, 1> Record;
+ if (Stream.readRecord(Entry.ID, Record, &Blob) == bitc::STRTAB_BLOB)
+ Strtab = Blob;
+ break;
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// External interface
//===----------------------------------------------------------------------===//
@@ -5260,6 +5404,22 @@ llvm::getBitcodeModuleList(MemoryBufferRef Buffer) {
continue;
}
+ if (Entry.ID == bitc::STRTAB_BLOCK_ID) {
+ Expected<StringRef> Strtab = readStrtab(Stream);
+ if (!Strtab)
+ return Strtab.takeError();
+ // This string table is used by every preceding bitcode module that does
+ // not have its own string table. A bitcode file may have multiple
+ // string tables if it was created by binary concatenation, for example
+ // with "llvm-cat -b".
+ for (auto I = Modules.rbegin(), E = Modules.rend(); I != E; ++I) {
+ if (!I->Strtab.empty())
+ break;
+ I->Strtab = *Strtab;
+ }
+ continue;
+ }
+
if (Stream.SkipBlock())
return error("Malformed block");
continue;
@@ -5296,8 +5456,8 @@ BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll,
}
Stream.JumpToBit(ModuleBit);
- auto *R =
- new BitcodeReader(std::move(Stream), ProducerIdentification, Context);
+ auto *R = new BitcodeReader(std::move(Stream), Strtab, ProducerIdentification,
+ Context);
std::unique_ptr<Module> M =
llvm::make_unique<Module>(ModuleIdentifier, Context);
@@ -5332,7 +5492,7 @@ Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() {
Stream.JumpToBit(ModuleBit);
auto Index = llvm::make_unique<ModuleSummaryIndex>();
- ModuleSummaryIndexBitcodeReader R(std::move(Stream), *Index);
+ ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index);
if (Error Err = R.parseModule(ModuleIdentifier))
return std::move(Err);
diff --git a/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
index 274dfe89cce5..d089684a052f 100644
--- a/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -54,6 +54,7 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
@@ -452,6 +453,7 @@ class MetadataLoader::MetadataLoaderImpl {
bool StripTBAA = false;
bool HasSeenOldLoopTags = false;
bool NeedUpgradeToDIGlobalVariableExpression = false;
+ bool NeedDeclareExpressionUpgrade = false;
/// True if metadata is being parsed for a module being ThinLTO imported.
bool IsImporting = false;
@@ -511,6 +513,26 @@ class MetadataLoader::MetadataLoaderImpl {
}
}
+ /// Remove a leading DW_OP_deref from DIExpressions in a dbg.declare that
+ /// describes a function argument.
+ void upgradeDeclareExpressions(Function &F) {
+ if (!NeedDeclareExpressionUpgrade)
+ return;
+
+ for (auto &BB : F)
+ for (auto &I : BB)
+ if (auto *DDI = dyn_cast<DbgDeclareInst>(&I))
+ if (auto *DIExpr = DDI->getExpression())
+ if (DIExpr->startsWithDeref() &&
+ dyn_cast_or_null<Argument>(DDI->getAddress())) {
+ SmallVector<uint64_t, 8> Ops;
+ Ops.append(std::next(DIExpr->elements_begin()),
+ DIExpr->elements_end());
+ auto *E = DIExpression::get(Context, Ops);
+ DDI->setOperand(2, MetadataAsValue::get(Context, E));
+ }
+ }
+
void upgradeDebugInfo() {
upgradeCUSubprograms();
upgradeCUVariables();
@@ -565,6 +587,7 @@ public:
unsigned size() const { return MetadataList.size(); }
void shrinkTo(unsigned N) { MetadataList.shrinkTo(N); }
+ void upgradeDebugIntrinsics(Function &F) { upgradeDeclareExpressions(F); }
};
static Error error(const Twine &Message) {
@@ -1520,12 +1543,32 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
return error("Invalid record");
IsDistinct = Record[0] & 1;
- bool HasOpFragment = Record[0] & 2;
+ uint64_t Version = Record[0] >> 1;
auto Elts = MutableArrayRef<uint64_t>(Record).slice(1);
- if (!HasOpFragment)
- if (unsigned N = Elts.size())
- if (N >= 3 && Elts[N - 3] == dwarf::DW_OP_bit_piece)
- Elts[N - 3] = dwarf::DW_OP_LLVM_fragment;
+ unsigned N = Elts.size();
+ // Perform various upgrades.
+ switch (Version) {
+ case 0:
+ if (N >= 3 && Elts[N - 3] == dwarf::DW_OP_bit_piece)
+ Elts[N - 3] = dwarf::DW_OP_LLVM_fragment;
+ LLVM_FALLTHROUGH;
+ case 1:
+ // Move DW_OP_deref to the end.
+ if (N && Elts[0] == dwarf::DW_OP_deref) {
+ auto End = Elts.end();
+ if (Elts.size() >= 3 && *std::prev(End, 3) == dwarf::DW_OP_LLVM_fragment)
+ End = std::prev(End, 3);
+ std::move(std::next(Elts.begin()), End, Elts.begin());
+ *std::prev(End) = dwarf::DW_OP_deref;
+ }
+ NeedDeclareExpressionUpgrade = true;
+ LLVM_FALLTHROUGH;
+ case 2:
+ // Up-to-date!
+ break;
+ default:
+ return error("Invalid record");
+ }
MetadataList.assignValue(
GET_OR_DISTINCT(DIExpression, (Context, makeArrayRef(Record).slice(1))),
@@ -1858,3 +1901,7 @@ bool MetadataLoader::isStrippingTBAA() { return Pimpl->isStrippingTBAA(); }
unsigned MetadataLoader::size() const { return Pimpl->size(); }
void MetadataLoader::shrinkTo(unsigned N) { return Pimpl->shrinkTo(N); }
+
+void MetadataLoader::upgradeDebugIntrinsics(Function &F) {
+ return Pimpl->upgradeDebugIntrinsics(F);
+}
diff --git a/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h
index 442dfc94e4e1..f23dcc06cc94 100644
--- a/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h
+++ b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h
@@ -79,6 +79,9 @@ public:
unsigned size() const;
void shrinkTo(unsigned N);
+
+ /// Perform bitcode upgrades on llvm.dbg.* calls.
+ void upgradeDebugIntrinsics(Function &F);
};
}
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 043441bac4de..1d3cde2f5ddb 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -28,6 +28,7 @@
#include "llvm/IR/Operator.h"
#include "llvm/IR/UseListOrder.h"
#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Program.h"
@@ -76,26 +77,28 @@ protected:
/// The stream created and owned by the client.
BitstreamWriter &Stream;
- /// Saves the offset of the VSTOffset record that must eventually be
- /// backpatched with the offset of the actual VST.
- uint64_t VSTOffsetPlaceholder = 0;
-
public:
/// Constructs a BitcodeWriterBase object that writes to the provided
/// \p Stream.
BitcodeWriterBase(BitstreamWriter &Stream) : Stream(Stream) {}
protected:
- bool hasVSTOffsetPlaceholder() { return VSTOffsetPlaceholder != 0; }
- void writeValueSymbolTableForwardDecl();
void writeBitcodeHeader();
+ void writeModuleVersion();
};
+void BitcodeWriterBase::writeModuleVersion() {
+ // VERSION: [version#]
+ Stream.EmitRecord(bitc::MODULE_CODE_VERSION, ArrayRef<uint64_t>{2});
+}
+
/// Class to manage the bitcode writing for a module.
class ModuleBitcodeWriter : public BitcodeWriterBase {
/// Pointer to the buffer allocated by caller for bitcode writing.
const SmallVectorImpl<char> &Buffer;
+ StringTableBuilder &StrtabBuilder;
+
/// The Module to write to bitcode.
const Module &M;
@@ -127,15 +130,20 @@ class ModuleBitcodeWriter : public BitcodeWriterBase {
/// Tracks the last value id recorded in the GUIDToValueMap.
unsigned GlobalValueId;
+ /// Saves the offset of the VSTOffset record that must eventually be
+ /// backpatched with the offset of the actual VST.
+ uint64_t VSTOffsetPlaceholder = 0;
+
public:
/// Constructs a ModuleBitcodeWriter object for the given Module,
/// writing to the provided \p Buffer.
ModuleBitcodeWriter(const Module *M, SmallVectorImpl<char> &Buffer,
+ StringTableBuilder &StrtabBuilder,
BitstreamWriter &Stream, bool ShouldPreserveUseListOrder,
const ModuleSummaryIndex *Index, bool GenerateHash,
ModuleHash *ModHash = nullptr)
- : BitcodeWriterBase(Stream), Buffer(Buffer), M(*M),
- VE(*M, ShouldPreserveUseListOrder), Index(Index),
+ : BitcodeWriterBase(Stream), Buffer(Buffer), StrtabBuilder(StrtabBuilder),
+ M(*M), VE(*M, ShouldPreserveUseListOrder), Index(Index),
GenerateHash(GenerateHash), ModHash(ModHash),
BitcodeStartBit(Stream.GetCurrentBitNo()) {
// Assign ValueIds to any callee values in the index that came from
@@ -169,6 +177,7 @@ private:
void writeAttributeTable();
void writeTypeTable();
void writeComdats();
+ void writeValueSymbolTableForwardDecl();
void writeModuleInfo();
void writeValueAsMetadata(const ValueAsMetadata *MD,
SmallVectorImpl<uint64_t> &Record);
@@ -261,9 +270,9 @@ private:
SmallVectorImpl<uint64_t> &Vals);
void writeInstruction(const Instruction &I, unsigned InstID,
SmallVectorImpl<unsigned> &Vals);
- void writeValueSymbolTable(
- const ValueSymbolTable &VST, bool IsModuleLevel = false,
- DenseMap<const Function *, uint64_t> *FunctionToBitcodeIndex = nullptr);
+ void writeFunctionLevelValueSymbolTable(const ValueSymbolTable &VST);
+ void writeGlobalValueSymbolTable(
+ DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex);
void writeUseList(UseListOrder &&Order);
void writeUseListBlock(const Function *F);
void
@@ -477,7 +486,6 @@ public:
private:
void writeModStrings();
- void writeCombinedValueSymbolTable();
void writeCombinedGlobalValueSummary();
/// Indicates whether the provided \p ModulePath should be written into
@@ -492,15 +500,15 @@ private:
const auto &VMI = GUIDToValueIdMap.find(ValGUID);
return VMI != GUIDToValueIdMap.end();
}
+ void assignValueId(GlobalValue::GUID ValGUID) {
+ unsigned &ValueId = GUIDToValueIdMap[ValGUID];
+ if (ValueId == 0)
+ ValueId = ++GlobalValueId;
+ }
unsigned getValueId(GlobalValue::GUID ValGUID) {
- const auto &VMI = GUIDToValueIdMap.find(ValGUID);
- // If this GUID doesn't have an entry, assign one.
- if (VMI == GUIDToValueIdMap.end()) {
- GUIDToValueIdMap[ValGUID] = ++GlobalValueId;
- return GlobalValueId;
- } else {
- return VMI->second;
- }
+ auto VMI = GUIDToValueIdMap.find(ValGUID);
+ assert(VMI != GUIDToValueIdMap.end());
+ return VMI->second;
}
std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
};
@@ -1047,13 +1055,10 @@ static unsigned getEncodedUnnamedAddr(const GlobalValue &GV) {
void ModuleBitcodeWriter::writeComdats() {
SmallVector<unsigned, 64> Vals;
for (const Comdat *C : VE.getComdats()) {
- // COMDAT: [selection_kind, name]
+ // COMDAT: [strtab offset, strtab size, selection_kind]
+ Vals.push_back(StrtabBuilder.add(C->getName()));
+ Vals.push_back(C->getName().size());
Vals.push_back(getEncodedComdatSelectionKind(*C));
- size_t Size = C->getName().size();
- assert(isUInt<32>(Size));
- Vals.push_back(Size);
- for (char Chr : C->getName())
- Vals.push_back((unsigned char)Chr);
Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0);
Vals.clear();
}
@@ -1062,7 +1067,7 @@ void ModuleBitcodeWriter::writeComdats() {
/// Write a record that will eventually hold the word offset of the
/// module-level VST. For now the offset is 0, which will be backpatched
/// after the real VST is written. Saves the bit offset to backpatch.
-void BitcodeWriterBase::writeValueSymbolTableForwardDecl() {
+void ModuleBitcodeWriter::writeValueSymbolTableForwardDecl() {
// Write a placeholder value in for the offset of the real VST,
// which is written after the function blocks so that it can include
// the offset of each function. The placeholder offset will be
@@ -1165,6 +1170,8 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// Add an abbrev for common globals with no visibility or thread localness.
auto Abbv = std::make_shared<BitCodeAbbrev>();
Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
Log2_32_Ceil(MaxGlobalType+1)));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddrSpace << 2
@@ -1188,15 +1195,42 @@ void ModuleBitcodeWriter::writeModuleInfo() {
SimpleGVarAbbrev = Stream.EmitAbbrev(std::move(Abbv));
}
- // Emit the global variable information.
SmallVector<unsigned, 64> Vals;
+ // Emit the module's source file name.
+ {
+ StringEncoding Bits = getStringEncoding(M.getSourceFileName().data(),
+ M.getSourceFileName().size());
+ BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
+ if (Bits == SE_Char6)
+ AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
+ else if (Bits == SE_Fixed7)
+ AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7);
+
+ // MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(AbbrevOpToUse);
+ unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ for (const auto P : M.getSourceFileName())
+ Vals.push_back((unsigned char)P);
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev);
+ Vals.clear();
+ }
+
+ // Emit the global variable information.
for (const GlobalVariable &GV : M.globals()) {
unsigned AbbrevToUse = 0;
- // GLOBALVAR: [type, isconst, initid,
+ // GLOBALVAR: [strtab offset, strtab size, type, isconst, initid,
// linkage, alignment, section, visibility, threadlocal,
// unnamed_addr, externally_initialized, dllstorageclass,
// comdat]
+ Vals.push_back(StrtabBuilder.add(GV.getName()));
+ Vals.push_back(GV.getName().size());
Vals.push_back(VE.getTypeID(GV.getValueType()));
Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | GV.isConstant());
Vals.push_back(GV.isDeclaration() ? 0 :
@@ -1226,9 +1260,12 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// Emit the function proto information.
for (const Function &F : M) {
- // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
- // section, visibility, gc, unnamed_addr, prologuedata,
- // dllstorageclass, comdat, prefixdata, personalityfn]
+ // FUNCTION: [strtab offset, strtab size, type, callingconv, isproto,
+ // linkage, paramattrs, alignment, section, visibility, gc,
+ // unnamed_addr, prologuedata, dllstorageclass, comdat,
+ // prefixdata, personalityfn]
+ Vals.push_back(StrtabBuilder.add(F.getName()));
+ Vals.push_back(F.getName().size());
Vals.push_back(VE.getTypeID(F.getFunctionType()));
Vals.push_back(F.getCallingConv());
Vals.push_back(F.isDeclaration());
@@ -1255,8 +1292,10 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// Emit the alias information.
for (const GlobalAlias &A : M.aliases()) {
- // ALIAS: [alias type, aliasee val#, linkage, visibility, dllstorageclass,
- // threadlocal, unnamed_addr]
+ // ALIAS: [strtab offset, strtab size, alias type, aliasee val#, linkage,
+ // visibility, dllstorageclass, threadlocal, unnamed_addr]
+ Vals.push_back(StrtabBuilder.add(A.getName()));
+ Vals.push_back(A.getName().size());
Vals.push_back(VE.getTypeID(A.getValueType()));
Vals.push_back(A.getType()->getAddressSpace());
Vals.push_back(VE.getValueID(A.getAliasee()));
@@ -1272,7 +1311,10 @@ void ModuleBitcodeWriter::writeModuleInfo() {
// Emit the ifunc information.
for (const GlobalIFunc &I : M.ifuncs()) {
- // IFUNC: [ifunc type, address space, resolver val#, linkage, visibility]
+ // IFUNC: [strtab offset, strtab size, ifunc type, address space, resolver
+ // val#, linkage, visibility]
+ Vals.push_back(StrtabBuilder.add(I.getName()));
+ Vals.push_back(I.getName().size());
Vals.push_back(VE.getTypeID(I.getValueType()));
Vals.push_back(I.getType()->getAddressSpace());
Vals.push_back(VE.getValueID(I.getResolver()));
@@ -1282,34 +1324,6 @@ void ModuleBitcodeWriter::writeModuleInfo() {
Vals.clear();
}
- // Emit the module's source file name.
- {
- StringEncoding Bits = getStringEncoding(M.getSourceFileName().data(),
- M.getSourceFileName().size());
- BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
- if (Bits == SE_Char6)
- AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
- else if (Bits == SE_Fixed7)
- AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7);
-
- // MODULE_CODE_SOURCE_FILENAME: [namechar x N]
- auto Abbv = std::make_shared<BitCodeAbbrev>();
- Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(AbbrevOpToUse);
- unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
-
- for (const auto P : M.getSourceFileName())
- Vals.push_back((unsigned char)P);
-
- // Emit the finished record.
- Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev);
- Vals.clear();
- }
-
- // If we have a VST, write the VSTOFFSET record placeholder.
- if (M.getValueSymbolTable().empty())
- return;
writeValueSymbolTableForwardDecl();
}
@@ -1757,9 +1771,8 @@ void ModuleBitcodeWriter::writeDIExpression(const DIExpression *N,
SmallVectorImpl<uint64_t> &Record,
unsigned Abbrev) {
Record.reserve(N->getElements().size() + 1);
-
- const uint64_t HasOpFragmentFlag = 1 << 1;
- Record.push_back((uint64_t)N->isDistinct() | HasOpFragmentFlag);
+ const uint64_t Version = 2 << 1;
+ Record.push_back((uint64_t)N->isDistinct() | Version);
Record.append(N->elements_begin(), N->elements_end());
Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev);
@@ -2839,77 +2852,59 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.clear();
}
-/// Emit names for globals/functions etc. \p IsModuleLevel is true when
-/// we are writing the module-level VST, where we are including a function
-/// bitcode index and need to backpatch the VST forward declaration record.
-void ModuleBitcodeWriter::writeValueSymbolTable(
- const ValueSymbolTable &VST, bool IsModuleLevel,
- DenseMap<const Function *, uint64_t> *FunctionToBitcodeIndex) {
- if (VST.empty()) {
- // writeValueSymbolTableForwardDecl should have returned early as
- // well. Ensure this handling remains in sync by asserting that
- // the placeholder offset is not set.
- assert(!IsModuleLevel || !hasVSTOffsetPlaceholder());
- return;
- }
+/// Write a GlobalValue VST to the module. The purpose of this data structure is
+/// to allow clients to efficiently find the function body.
+void ModuleBitcodeWriter::writeGlobalValueSymbolTable(
+ DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) {
+ // Get the offset of the VST we are writing, and backpatch it into
+ // the VST forward declaration record.
+ uint64_t VSTOffset = Stream.GetCurrentBitNo();
+ // The BitcodeStartBit was the stream offset of the identification block.
+ VSTOffset -= bitcodeStartBit();
+ assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned");
+ // Note that we add 1 here because the offset is relative to one word
+ // before the start of the identification block, which was historically
+ // always the start of the regular bitcode header.
+ Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1);
+
+ Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
+
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
+ unsigned FnEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ for (const Function &F : M) {
+ uint64_t Record[2];
- if (IsModuleLevel && hasVSTOffsetPlaceholder()) {
- // Get the offset of the VST we are writing, and backpatch it into
- // the VST forward declaration record.
- uint64_t VSTOffset = Stream.GetCurrentBitNo();
- // The BitcodeStartBit was the stream offset of the identification block.
- VSTOffset -= bitcodeStartBit();
- assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned");
+ if (F.isDeclaration())
+ continue;
+
+ Record[0] = VE.getValueID(&F);
+
+ // Save the word offset of the function (from the start of the
+ // actual bitcode written to the stream).
+ uint64_t BitcodeIndex = FunctionToBitcodeIndex[&F] - bitcodeStartBit();
+ assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned");
// Note that we add 1 here because the offset is relative to one word
// before the start of the identification block, which was historically
// always the start of the regular bitcode header.
- Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1);
- }
-
- Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
+ Record[1] = BitcodeIndex / 32 + 1;
- // For the module-level VST, add abbrev Ids for the VST_CODE_FNENTRY
- // records, which are not used in the per-function VSTs.
- unsigned FnEntry8BitAbbrev;
- unsigned FnEntry7BitAbbrev;
- unsigned FnEntry6BitAbbrev;
- unsigned GUIDEntryAbbrev;
- if (IsModuleLevel && hasVSTOffsetPlaceholder()) {
- // 8-bit fixed-width VST_CODE_FNENTRY function strings.
- auto Abbv = std::make_shared<BitCodeAbbrev>();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
- FnEntry8BitAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+ Stream.EmitRecord(bitc::VST_CODE_FNENTRY, Record, FnEntryAbbrev);
+ }
- // 7-bit fixed width VST_CODE_FNENTRY function strings.
- Abbv = std::make_shared<BitCodeAbbrev>();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
- FnEntry7BitAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+ Stream.ExitBlock();
+}
- // 6-bit char6 VST_CODE_FNENTRY function strings.
- Abbv = std::make_shared<BitCodeAbbrev>();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
- FnEntry6BitAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+/// Emit names for arguments, instructions and basic blocks in a function.
+void ModuleBitcodeWriter::writeFunctionLevelValueSymbolTable(
+ const ValueSymbolTable &VST) {
+ if (VST.empty())
+ return;
- // FIXME: Change the name of this record as it is now used by
- // the per-module index as well.
- Abbv = std::make_shared<BitCodeAbbrev>();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_COMBINED_ENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // refguid
- GUIDEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv));
- }
+ Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
// FIXME: Set up the abbrev, we know how many values there are!
// FIXME: We know if the type names can use 7-bit ascii.
@@ -2923,38 +2918,13 @@ void ModuleBitcodeWriter::writeValueSymbolTable(
unsigned AbbrevToUse = VST_ENTRY_8_ABBREV;
NameVals.push_back(VE.getValueID(Name.getValue()));
- Function *F = dyn_cast<Function>(Name.getValue());
-
// VST_CODE_ENTRY: [valueid, namechar x N]
- // VST_CODE_FNENTRY: [valueid, funcoffset, namechar x N]
// VST_CODE_BBENTRY: [bbid, namechar x N]
unsigned Code;
if (isa<BasicBlock>(Name.getValue())) {
Code = bitc::VST_CODE_BBENTRY;
if (Bits == SE_Char6)
AbbrevToUse = VST_BBENTRY_6_ABBREV;
- } else if (F && !F->isDeclaration()) {
- // Must be the module-level VST, where we pass in the Index and
- // have a VSTOffsetPlaceholder. The function-level VST should not
- // contain any Function symbols.
- assert(FunctionToBitcodeIndex);
- assert(hasVSTOffsetPlaceholder());
-
- // Save the word offset of the function (from the start of the
- // actual bitcode written to the stream).
- uint64_t BitcodeIndex = (*FunctionToBitcodeIndex)[F] - bitcodeStartBit();
- assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned");
- // Note that we add 1 here because the offset is relative to one word
- // before the start of the identification block, which was historically
- // always the start of the regular bitcode header.
- NameVals.push_back(BitcodeIndex / 32 + 1);
-
- Code = bitc::VST_CODE_FNENTRY;
- AbbrevToUse = FnEntry8BitAbbrev;
- if (Bits == SE_Char6)
- AbbrevToUse = FnEntry6BitAbbrev;
- else if (Bits == SE_Fixed7)
- AbbrevToUse = FnEntry7BitAbbrev;
} else {
Code = bitc::VST_CODE_ENTRY;
if (Bits == SE_Char6)
@@ -2970,47 +2940,7 @@ void ModuleBitcodeWriter::writeValueSymbolTable(
Stream.EmitRecord(Code, NameVals, AbbrevToUse);
NameVals.clear();
}
- // Emit any GUID valueIDs created for indirect call edges into the
- // module-level VST.
- if (IsModuleLevel && hasVSTOffsetPlaceholder())
- for (const auto &GI : valueIds()) {
- NameVals.push_back(GI.second);
- NameVals.push_back(GI.first);
- Stream.EmitRecord(bitc::VST_CODE_COMBINED_ENTRY, NameVals,
- GUIDEntryAbbrev);
- NameVals.clear();
- }
- Stream.ExitBlock();
-}
-
-/// Emit function names and summary offsets for the combined index
-/// used by ThinLTO.
-void IndexBitcodeWriter::writeCombinedValueSymbolTable() {
- assert(hasVSTOffsetPlaceholder() && "Expected non-zero VSTOffsetPlaceholder");
- // Get the offset of the VST we are writing, and backpatch it into
- // the VST forward declaration record.
- uint64_t VSTOffset = Stream.GetCurrentBitNo();
- assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned");
- Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32);
-
- Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
-
- auto Abbv = std::make_shared<BitCodeAbbrev>();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_COMBINED_ENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // refguid
- unsigned EntryAbbrev = Stream.EmitAbbrev(std::move(Abbv));
- SmallVector<uint64_t, 64> NameVals;
- for (const auto &GVI : valueIds()) {
- // VST_CODE_COMBINED_ENTRY: [valueid, refguid]
- NameVals.push_back(GVI.second);
- NameVals.push_back(GVI.first);
-
- // Emit the finished record.
- Stream.EmitRecord(bitc::VST_CODE_COMBINED_ENTRY, NameVals, EntryAbbrev);
- NameVals.clear();
- }
Stream.ExitBlock();
}
@@ -3114,7 +3044,7 @@ void ModuleBitcodeWriter::writeFunction(
// Emit names for all the instructions etc.
if (auto *Symtab = F.getValueSymbolTable())
- writeValueSymbolTable(*Symtab);
+ writeFunctionLevelValueSymbolTable(*Symtab);
if (NeedsMetadataAttachment)
writeFunctionMetadataAttachment(F);
@@ -3502,6 +3432,11 @@ void ModuleBitcodeWriter::writePerModuleGlobalValueSummary() {
return;
}
+ for (const auto &GVI : valueIds()) {
+ Stream.EmitRecord(bitc::FS_VALUE_GUID,
+ ArrayRef<uint64_t>{GVI.second, GVI.first});
+ }
+
// Abbrev for FS_PERMODULE.
auto Abbv = std::make_shared<BitCodeAbbrev>();
Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE));
@@ -3594,6 +3529,39 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
Stream.EnterSubblock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID, 3);
Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION});
+ // Create value IDs for undefined references.
+ for (const auto &I : *this) {
+ if (auto *VS = dyn_cast<GlobalVarSummary>(I.second)) {
+ for (auto &RI : VS->refs())
+ assignValueId(RI.getGUID());
+ continue;
+ }
+
+ auto *FS = dyn_cast<FunctionSummary>(I.second);
+ if (!FS)
+ continue;
+ for (auto &RI : FS->refs())
+ assignValueId(RI.getGUID());
+
+ for (auto &EI : FS->calls()) {
+ GlobalValue::GUID GUID = EI.first.getGUID();
+ if (!hasValueId(GUID)) {
+ // For SamplePGO, the indirect call targets for local functions will
+ // have its original name annotated in profile. We try to find the
+ // corresponding PGOFuncName as the GUID.
+ GUID = Index.getGUIDFromOriginalID(GUID);
+ if (GUID == 0 || !hasValueId(GUID))
+ continue;
+ }
+ assignValueId(GUID);
+ }
+ }
+
+ for (const auto &GVI : valueIds()) {
+ Stream.EmitRecord(bitc::FS_VALUE_GUID,
+ ArrayRef<uint64_t>{GVI.second, GVI.first});
+ }
+
// Abbrev for FS_COMBINED.
auto Abbv = std::make_shared<BitCodeAbbrev>();
Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED));
@@ -3808,10 +3776,7 @@ void ModuleBitcodeWriter::write() {
Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
size_t BlockStartPos = Buffer.size();
- SmallVector<unsigned, 1> Vals;
- unsigned CurVersion = 1;
- Vals.push_back(CurVersion);
- Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
+ writeModuleVersion();
// Emit blockinfo, which defines the standard abbreviations etc.
writeBlockInfo();
@@ -3857,8 +3822,7 @@ void ModuleBitcodeWriter::write() {
if (Index)
writePerModuleGlobalValueSummary();
- writeValueSymbolTable(M.getValueSymbolTable(),
- /* IsModuleLevel */ true, &FunctionToBitcodeIndex);
+ writeGlobalValueSymbolTable(FunctionToBitcodeIndex);
writeModuleHash(BlockStartPos);
@@ -3946,13 +3910,45 @@ BitcodeWriter::BitcodeWriter(SmallVectorImpl<char> &Buffer)
writeBitcodeHeader(*Stream);
}
-BitcodeWriter::~BitcodeWriter() = default;
+BitcodeWriter::~BitcodeWriter() { assert(WroteStrtab); }
+
+void BitcodeWriter::writeBlob(unsigned Block, unsigned Record, StringRef Blob) {
+ Stream->EnterSubblock(Block, 3);
+
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(Record));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ auto AbbrevNo = Stream->EmitAbbrev(std::move(Abbv));
+
+ Stream->EmitRecordWithBlob(AbbrevNo, ArrayRef<uint64_t>{Record}, Blob);
+
+ Stream->ExitBlock();
+}
+
+void BitcodeWriter::writeStrtab() {
+ assert(!WroteStrtab);
+
+ std::vector<char> Strtab;
+ StrtabBuilder.finalizeInOrder();
+ Strtab.resize(StrtabBuilder.getSize());
+ StrtabBuilder.write((uint8_t *)Strtab.data());
+
+ writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB,
+ {Strtab.data(), Strtab.size()});
+
+ WroteStrtab = true;
+}
+
+void BitcodeWriter::copyStrtab(StringRef Strtab) {
+ writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, Strtab);
+ WroteStrtab = true;
+}
void BitcodeWriter::writeModule(const Module *M,
bool ShouldPreserveUseListOrder,
const ModuleSummaryIndex *Index,
bool GenerateHash, ModuleHash *ModHash) {
- ModuleBitcodeWriter ModuleWriter(M, Buffer, *Stream,
+ ModuleBitcodeWriter ModuleWriter(M, Buffer, StrtabBuilder, *Stream,
ShouldPreserveUseListOrder, Index,
GenerateHash, ModHash);
ModuleWriter.write();
@@ -3976,6 +3972,7 @@ void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out,
BitcodeWriter Writer(Buffer);
Writer.writeModule(M, ShouldPreserveUseListOrder, Index, GenerateHash,
ModHash);
+ Writer.writeStrtab();
if (TT.isOSDarwin() || TT.isOSBinFormatMachO())
emitDarwinBCHeaderAndTrailer(Buffer, TT);
@@ -3987,13 +3984,7 @@ void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out,
void IndexBitcodeWriter::write() {
Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
- SmallVector<unsigned, 1> Vals;
- unsigned CurVersion = 1;
- Vals.push_back(CurVersion);
- Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
-
- // If we have a VST, write the VSTOFFSET record placeholder.
- writeValueSymbolTableForwardDecl();
+ writeModuleVersion();
// Write the module paths in the combined index.
writeModStrings();
@@ -4001,10 +3992,6 @@ void IndexBitcodeWriter::write() {
// Write the summary combined index records.
writeCombinedGlobalValueSummary();
- // Need a special VST writer for the combined index (we don't have a
- // real VST and real values when this is invoked).
- writeCombinedValueSymbolTable();
-
Stream.ExitBlock();
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 6c18d56b8272..028c79f3ab6d 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -834,9 +834,9 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
OS << " <- ";
// The second operand is only an offset if it's an immediate.
- bool Deref = MI->getOperand(0).isReg() && MI->getOperand(1).isImm();
- int64_t Offset = Deref ? MI->getOperand(1).getImm() : 0;
-
+ bool Deref = false;
+ bool MemLoc = MI->getOperand(0).isReg() && MI->getOperand(1).isImm();
+ int64_t Offset = MemLoc ? MI->getOperand(1).getImm() : 0;
for (unsigned i = 0; i < Expr->getNumElements(); ++i) {
uint64_t Op = Expr->getElement(i);
if (Op == dwarf::DW_OP_LLVM_fragment) {
@@ -844,7 +844,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
break;
} else if (Deref) {
// We currently don't support extra Offsets or derefs after the first
- // one. Bail out early instead of emitting an incorrect comment
+ // one. Bail out early instead of emitting an incorrect comment.
OS << " [complex expression]";
AP.OutStreamer->emitRawComment(OS.str());
return true;
@@ -899,12 +899,12 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
AP.OutStreamer->emitRawComment(OS.str());
return true;
}
- if (Deref)
+ if (MemLoc || Deref)
OS << '[';
OS << PrintReg(Reg, AP.MF->getSubtarget().getRegisterInfo());
}
- if (Deref)
+ if (MemLoc || Deref)
OS << '+' << Offset << ']';
// NOTE: Want this comment at start of line, don't emit with AddComment.
@@ -1356,7 +1356,7 @@ bool AsmPrinter::doFinalization(Module &M) {
OutContext.getOrCreateSymbol(StringRef("__morestack_addr"));
OutStreamer->EmitLabel(AddrSymbol);
- unsigned PtrSize = M.getDataLayout().getPointerSize(0);
+ unsigned PtrSize = MAI->getCodePointerSize();
OutStreamer->EmitSymbolValue(GetExternalSymbolSymbol("__morestack"),
PtrSize);
}
@@ -2246,7 +2246,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) {
// chu[nk1 chu][nk2 chu] ... [nkN-1 chunkN]
ExtraBits = Realigned.getRawData()[0] &
(((uint64_t)-1) >> (64 - ExtraBitsSize));
- Realigned = Realigned.lshr(ExtraBitsSize);
+ Realigned.lshrInPlace(ExtraBitsSize);
} else
ExtraBits = Realigned.getRawData()[BitWidth / 64];
}
@@ -2781,7 +2781,7 @@ void AsmPrinter::emitXRayTable() {
// before the function's end, we assume that this is happening after
// the last return instruction.
- auto WordSizeBytes = TM.getPointerSize();
+ auto WordSizeBytes = MAI->getCodePointerSize();
MCSymbol *Tmp = OutContext.createTempSymbol("xray_synthetic_", true);
OutStreamer->EmitCodeAlignment(16);
OutStreamer->EmitSymbolValue(Tmp, WordSizeBytes, false);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 383b8cddb1a0..2571f6869651 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -1136,7 +1136,7 @@ TypeIndex CodeViewDebug::lowerTypeArray(const DICompositeType *Ty) {
DITypeRef ElementTypeRef = Ty->getBaseType();
TypeIndex ElementTypeIndex = getTypeIndex(ElementTypeRef);
// IndexType is size_t, which depends on the bitness of the target.
- TypeIndex IndexType = Asm->MAI->getPointerSize() == 8
+ TypeIndex IndexType = Asm->TM.getPointerSize() == 8
? TypeIndex(SimpleTypeKind::UInt64Quad)
: TypeIndex(SimpleTypeKind::UInt32Long);
@@ -1342,8 +1342,8 @@ TypeIndex CodeViewDebug::lowerTypeMemberPointer(const DIDerivedType *Ty) {
assert(Ty->getTag() == dwarf::DW_TAG_ptr_to_member_type);
TypeIndex ClassTI = getTypeIndex(Ty->getClassType());
TypeIndex PointeeTI = getTypeIndex(Ty->getBaseType(), Ty->getClassType());
- PointerKind PK = Asm->MAI->getPointerSize() == 8 ? PointerKind::Near64
- : PointerKind::Near32;
+ PointerKind PK = Asm->TM.getPointerSize() == 8 ? PointerKind::Near64
+ : PointerKind::Near32;
bool IsPMF = isa<DISubroutineType>(Ty->getBaseType());
PointerMode PM = IsPMF ? PointerMode::PointerToMemberFunction
: PointerMode::PointerToDataMember;
@@ -1458,7 +1458,8 @@ TypeIndex CodeViewDebug::lowerTypeMemberFunction(const DISubroutineType *Ty,
}
TypeIndex CodeViewDebug::lowerTypeVFTableShape(const DIDerivedType *Ty) {
- unsigned VSlotCount = Ty->getSizeInBits() / (8 * Asm->MAI->getPointerSize());
+ unsigned VSlotCount =
+ Ty->getSizeInBits() / (8 * Asm->MAI->getCodePointerSize());
SmallVector<VFTableSlotKind, 4> Slots(VSlotCount, VFTableSlotKind::Near);
VFTableShapeRecord VFTSR(Slots);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
index b510e0ef36ac..31c2b3b5e752 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -31,6 +31,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "dwarfdebug"
+
//===----------------------------------------------------------------------===//
// DIEAbbrevData Implementation
//===----------------------------------------------------------------------===//
@@ -79,15 +81,22 @@ void DIEAbbrev::Emit(const AsmPrinter *AP) const {
dwarf::AttributeString(AttrData.getAttribute()).data());
// Emit form type.
+#ifndef NDEBUG
+ // Could be an assertion, but this way we can see the failing form code
+ // easily, which helps track down where it came from.
+ if (!dwarf::isValidFormForVersion(AttrData.getForm(),
+ AP->getDwarfVersion())) {
+ DEBUG(dbgs() << "Invalid form " << format("0x%x", AttrData.getForm())
+ << " for DWARF version " << AP->getDwarfVersion() << "\n");
+ llvm_unreachable("Invalid form for specified DWARF version");
+ }
+#endif
AP->EmitULEB128(AttrData.getForm(),
dwarf::FormEncodingString(AttrData.getForm()).data());
// Emit value for DW_FORM_implicit_const.
- if (AttrData.getForm() == dwarf::DW_FORM_implicit_const) {
- assert(AP->getDwarfVersion() >= 5 &&
- "DW_FORM_implicit_const is supported starting from DWARFv5");
+ if (AttrData.getForm() == dwarf::DW_FORM_implicit_const)
AP->EmitSLEB128(AttrData.getValue());
- }
}
// Mark end of abbreviation.
@@ -518,7 +527,7 @@ unsigned DIELabel::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_sec_offset) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getPointerSize();
+ return AP->MAI->getCodePointerSize();
}
LLVM_DUMP_METHOD
@@ -540,7 +549,7 @@ unsigned DIEDelta::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_sec_offset) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getPointerSize();
+ return AP->MAI->getCodePointerSize();
}
LLVM_DUMP_METHOD
@@ -682,7 +691,7 @@ unsigned DIEEntry::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
return getULEB128Size(Entry->getOffset());
case dwarf::DW_FORM_ref_addr:
if (AP->getDwarfVersion() == 2)
- return AP->getPointerSize();
+ return AP->MAI->getCodePointerSize();
switch (AP->OutStreamer->getContext().getDwarfFormat()) {
case dwarf::DWARF32:
return 4;
@@ -808,7 +817,7 @@ unsigned DIELocList::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
return 4;
if (Form == dwarf::DW_FORM_sec_offset)
return 4;
- return AP->getPointerSize();
+ return AP->MAI->getCodePointerSize();
}
/// EmitValue - Emit label value.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index a550ff2fb90f..738e062cb93f 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -547,18 +547,19 @@ DIE *DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV,
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
for (auto &Fragment : DV.getFrameIndexExprs()) {
unsigned FrameReg = 0;
+ const DIExpression *Expr = Fragment.Expr;
const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering();
int Offset = TFI->getFrameIndexReference(*Asm->MF, Fragment.FI, FrameReg);
- DwarfExpr.addFragmentOffset(Fragment.Expr);
+ DwarfExpr.addFragmentOffset(Expr);
SmallVector<uint64_t, 8> Ops;
Ops.push_back(dwarf::DW_OP_plus);
Ops.push_back(Offset);
- Ops.push_back(dwarf::DW_OP_deref);
- Ops.append(Fragment.Expr->elements_begin(), Fragment.Expr->elements_end());
- DIExpressionCursor Expr(Ops);
+ Ops.append(Expr->elements_begin(), Expr->elements_end());
+ DIExpressionCursor Cursor(Ops);
+ DwarfExpr.setMemoryLocationKind();
DwarfExpr.addMachineRegExpression(
- *Asm->MF->getSubtarget().getRegisterInfo(), Expr, FrameReg);
- DwarfExpr.addExpression(std::move(Expr));
+ *Asm->MF->getSubtarget().getRegisterInfo(), Cursor, FrameReg);
+ DwarfExpr.addExpression(std::move(Cursor));
}
addBlock(*VariableDie, dwarf::DW_AT_location, DwarfExpr.finalize());
@@ -779,12 +780,13 @@ void DwarfCompileUnit::addAddress(DIE &Die, dwarf::Attribute Attribute,
const MachineLocation &Location) {
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
+ if (Location.isIndirect())
+ DwarfExpr.setMemoryLocationKind();
SmallVector<uint64_t, 8> Ops;
- if (Location.isIndirect()) {
+ if (Location.isIndirect() && Location.getOffset()) {
Ops.push_back(dwarf::DW_OP_plus);
Ops.push_back(Location.getOffset());
- Ops.push_back(dwarf::DW_OP_deref);
}
DIExpressionCursor Cursor(Ops);
const TargetRegisterInfo &TRI = *Asm->MF->getSubtarget().getRegisterInfo();
@@ -807,12 +809,13 @@ void DwarfCompileUnit::addComplexAddress(const DbgVariable &DV, DIE &Die,
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
const DIExpression *DIExpr = DV.getSingleExpression();
DwarfExpr.addFragmentOffset(DIExpr);
+ if (Location.isIndirect())
+ DwarfExpr.setMemoryLocationKind();
SmallVector<uint64_t, 8> Ops;
- if (Location.isIndirect()) {
+ if (Location.isIndirect() && Location.getOffset()) {
Ops.push_back(dwarf::DW_OP_plus);
Ops.push_back(Location.getOffset());
- Ops.push_back(dwarf::DW_OP_deref);
}
Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
DIExpressionCursor Cursor(Ops);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 5ce111309208..d72656bcc58d 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1517,13 +1517,12 @@ static void emitDebugLocValue(const AsmPrinter &AP, const DIBasicType *BT,
DwarfExpr.addUnsignedConstant(Value.getInt());
} else if (Value.isLocation()) {
MachineLocation Location = Value.getLoc();
-
+ if (Location.isIndirect())
+ DwarfExpr.setMemoryLocationKind();
SmallVector<uint64_t, 8> Ops;
- // FIXME: Should this condition be Location.isIndirect() instead?
- if (Location.getOffset()) {
+ if (Location.isIndirect() && Location.getOffset()) {
Ops.push_back(dwarf::DW_OP_plus);
Ops.push_back(Location.getOffset());
- Ops.push_back(dwarf::DW_OP_deref);
}
Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
DIExpressionCursor Cursor(Ops);
@@ -1578,7 +1577,7 @@ void DwarfDebug::emitDebugLoc() {
// Start the dwarf loc section.
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection());
- unsigned char Size = Asm->getDataLayout().getPointerSize();
+ unsigned char Size = Asm->MAI->getCodePointerSize();
for (const auto &List : DebugLocs.getLists()) {
Asm->OutStreamer->EmitLabel(List.Label);
const DwarfCompileUnit *CU = List.CU;
@@ -1708,7 +1707,7 @@ void DwarfDebug::emitDebugARanges() {
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfARangesSection());
- unsigned PtrSize = Asm->getDataLayout().getPointerSize();
+ unsigned PtrSize = Asm->MAI->getCodePointerSize();
// Build a list of CUs used.
std::vector<DwarfCompileUnit *> CUs;
@@ -1791,7 +1790,7 @@ void DwarfDebug::emitDebugRanges() {
Asm->getObjFileLowering().getDwarfRangesSection());
// Size for our labels.
- unsigned char Size = Asm->getDataLayout().getPointerSize();
+ unsigned char Size = Asm->MAI->getCodePointerSize();
// Grab the specific ranges for the compile units in the module.
for (const auto &I : CUMap) {
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index debe88f3b1ee..f65dc151f301 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -23,9 +23,12 @@
using namespace llvm;
void DwarfExpression::addReg(int DwarfReg, const char *Comment) {
- assert(DwarfReg >= 0 && "invalid negative dwarf register number");
- if (DwarfReg < 32) {
- emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment);
+ assert(DwarfReg >= 0 && "invalid negative dwarf register number");
+ assert((LocationKind == Unknown || LocationKind == Register) &&
+ "location description already locked down");
+ LocationKind = Register;
+ if (DwarfReg < 32) {
+ emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment);
} else {
emitOp(dwarf::DW_OP_regx, Comment);
emitUnsigned(DwarfReg);
@@ -34,6 +37,7 @@ void DwarfExpression::addReg(int DwarfReg, const char *Comment) {
void DwarfExpression::addBReg(int DwarfReg, int Offset) {
assert(DwarfReg >= 0 && "invalid negative dwarf register number");
+ assert(LocationKind != Register && "location description already locked down");
if (DwarfReg < 32) {
emitOp(dwarf::DW_OP_breg0 + DwarfReg);
} else {
@@ -156,18 +160,23 @@ void DwarfExpression::addStackValue() {
}
void DwarfExpression::addSignedConstant(int64_t Value) {
+ assert(LocationKind == Implicit || LocationKind == Unknown);
+ LocationKind = Implicit;
emitOp(dwarf::DW_OP_consts);
emitSigned(Value);
- addStackValue();
}
void DwarfExpression::addUnsignedConstant(uint64_t Value) {
+ assert(LocationKind == Implicit || LocationKind == Unknown);
+ LocationKind = Implicit;
emitOp(dwarf::DW_OP_constu);
emitUnsigned(Value);
- addStackValue();
}
void DwarfExpression::addUnsignedConstant(const APInt &Value) {
+ assert(LocationKind == Implicit || LocationKind == Unknown);
+ LocationKind = Implicit;
+
unsigned Size = Value.getBitWidth();
const uint64_t *Data = Value.getRawData();
@@ -178,7 +187,8 @@ void DwarfExpression::addUnsignedConstant(const APInt &Value) {
addUnsignedConstant(*Data++);
if (Offset == 0 && Size <= 64)
break;
- addOpPiece(std::min(Size-Offset, 64u), Offset);
+ addStackValue();
+ addOpPiece(std::min(Size - Offset, 64u), Offset);
Offset += 64;
}
}
@@ -206,7 +216,7 @@ bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
}
// Handle simple register locations.
- if (!HasComplexExpression) {
+ if (LocationKind != Memory && !HasComplexExpression) {
for (auto &Reg : DwarfRegs) {
if (Reg.DwarfRegNo >= 0)
addReg(Reg.DwarfRegNo, Reg.Comment);
@@ -216,62 +226,65 @@ bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
return true;
}
+ // Don't emit locations that cannot be expressed without DW_OP_stack_value.
+ if (DwarfVersion < 4)
+ if (std::any_of(ExprCursor.begin(), ExprCursor.end(),
+ [](DIExpression::ExprOperand Op) -> bool {
+ return Op.getOp() == dwarf::DW_OP_stack_value;
+ })) {
+ DwarfRegs.clear();
+ return false;
+ }
+
assert(DwarfRegs.size() == 1);
auto Reg = DwarfRegs[0];
- bool FBReg = isFrameRegister(TRI, MachineReg);
+ bool FBReg = isFrameRegister(TRI, MachineReg);
+ int SignedOffset = 0;
assert(Reg.Size == 0 && "subregister has same size as superregister");
// Pattern-match combinations for which more efficient representations exist.
- switch (Op->getOp()) {
- default: {
- if (FBReg)
- addFBReg(0);
- else
- addReg(Reg.DwarfRegNo, 0);
- break;
+ // [Reg, Offset, DW_OP_plus] --> [DW_OP_breg, Offset].
+ // [Reg, Offset, DW_OP_minus] --> [DW_OP_breg, -Offset].
+ // If Reg is a subregister we need to mask it out before subtracting.
+ if (Op && ((Op->getOp() == dwarf::DW_OP_plus) ||
+ (Op->getOp() == dwarf::DW_OP_minus && !SubRegisterSizeInBits))) {
+ int Offset = Op->getArg(0);
+ SignedOffset = (Op->getOp() == dwarf::DW_OP_plus) ? Offset : -Offset;
+ ExprCursor.take();
}
- case dwarf::DW_OP_plus:
- case dwarf::DW_OP_minus: {
- // [DW_OP_reg,Offset,DW_OP_plus, DW_OP_deref] --> [DW_OP_breg, Offset].
- // [DW_OP_reg,Offset,DW_OP_minus,DW_OP_deref] --> [DW_OP_breg,-Offset].
- auto N = ExprCursor.peekNext();
- if (N && N->getOp() == dwarf::DW_OP_deref) {
- int Offset = Op->getArg(0);
- int SignedOffset = (Op->getOp() == dwarf::DW_OP_plus) ? Offset : -Offset;
- if (FBReg)
- addFBReg(SignedOffset);
- else
- addBReg(Reg.DwarfRegNo, SignedOffset);
+ if (FBReg)
+ addFBReg(SignedOffset);
+ else
+ addBReg(Reg.DwarfRegNo, SignedOffset);
+ DwarfRegs.clear();
+ return true;
+}
- ExprCursor.consume(2);
+/// Assuming a well-formed expression, match "DW_OP_deref* DW_OP_LLVM_fragment?".
+static bool isMemoryLocation(DIExpressionCursor ExprCursor) {
+ while (ExprCursor) {
+ auto Op = ExprCursor.take();
+ switch (Op->getOp()) {
+ case dwarf::DW_OP_deref:
+ case dwarf::DW_OP_LLVM_fragment:
break;
+ default:
+ return false;
}
- addReg(Reg.DwarfRegNo, 0);
- break;
- }
- case dwarf::DW_OP_deref:
- // [DW_OP_reg,DW_OP_deref] --> [DW_OP_breg].
- if (FBReg)
- addFBReg(0);
- else
- addBReg(Reg.DwarfRegNo, 0);
- ExprCursor.take();
- break;
}
- DwarfRegs.clear();
return true;
}
void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor,
unsigned FragmentOffsetInBits) {
+ // If we need to mask out a subregister, do it now, unless the next
+ // operation would emit an OpPiece anyway.
+ auto N = ExprCursor.peek();
+ if (SubRegisterSizeInBits && N && (N->getOp() != dwarf::DW_OP_LLVM_fragment))
+ maskSubRegister();
+
while (ExprCursor) {
auto Op = ExprCursor.take();
-
- // If we need to mask out a subregister, do it now, unless the next
- // operation would emit an OpPiece anyway.
- if (SubRegisterSizeInBits && Op->getOp() != dwarf::DW_OP_LLVM_fragment)
- maskSubRegister();
-
switch (Op->getOp()) {
case dwarf::DW_OP_LLVM_fragment: {
unsigned SizeInBits = Op->getArg(1);
@@ -281,50 +294,74 @@ void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor,
// location.
assert(OffsetInBits >= FragmentOffset && "fragment offset not added?");
- // If \a addMachineReg already emitted DW_OP_piece operations to represent
+ // If addMachineReg already emitted DW_OP_piece operations to represent
// a super-register by splicing together sub-registers, subtract the size
// of the pieces that was already emitted.
SizeInBits -= OffsetInBits - FragmentOffset;
- // If \a addMachineReg requested a DW_OP_bit_piece to stencil out a
+ // If addMachineReg requested a DW_OP_bit_piece to stencil out a
// sub-register that is smaller than the current fragment's size, use it.
if (SubRegisterSizeInBits)
SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits);
-
+
+ // Emit a DW_OP_stack_value for implicit location descriptions.
+ if (LocationKind == Implicit)
+ addStackValue();
+
+ // Emit the DW_OP_piece.
addOpPiece(SizeInBits, SubRegisterOffsetInBits);
setSubRegisterPiece(0, 0);
- break;
+ // Reset the location description kind.
+ LocationKind = Unknown;
+ return;
}
case dwarf::DW_OP_plus:
+ assert(LocationKind != Register);
emitOp(dwarf::DW_OP_plus_uconst);
emitUnsigned(Op->getArg(0));
break;
case dwarf::DW_OP_minus:
- // There is no OP_minus_uconst.
+ assert(LocationKind != Register);
+ // There is no DW_OP_minus_uconst.
emitOp(dwarf::DW_OP_constu);
emitUnsigned(Op->getArg(0));
emitOp(dwarf::DW_OP_minus);
break;
- case dwarf::DW_OP_deref:
- emitOp(dwarf::DW_OP_deref);
+ case dwarf::DW_OP_deref: {
+ assert(LocationKind != Register);
+ if (LocationKind != Memory && isMemoryLocation(ExprCursor))
+ // Turning this into a memory location description makes the deref
+ // implicit.
+ LocationKind = Memory;
+ else
+ emitOp(dwarf::DW_OP_deref);
break;
+ }
case dwarf::DW_OP_constu:
+ assert(LocationKind != Register);
emitOp(dwarf::DW_OP_constu);
emitUnsigned(Op->getArg(0));
break;
case dwarf::DW_OP_stack_value:
- addStackValue();
+ assert(LocationKind == Unknown || LocationKind == Implicit);
+ LocationKind = Implicit;
break;
case dwarf::DW_OP_swap:
+ assert(LocationKind != Register);
emitOp(dwarf::DW_OP_swap);
break;
case dwarf::DW_OP_xderef:
+ assert(LocationKind != Register);
emitOp(dwarf::DW_OP_xderef);
break;
default:
llvm_unreachable("unhandled opcode found in expression");
}
}
+
+ if (LocationKind == Implicit)
+ // Turn this into an implicit location description.
+ addStackValue();
}
/// add masking operations to stencil out a subregister.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
index e8dc211eb3c2..de8613200067 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -72,6 +72,8 @@ public:
}
/// Determine whether there are any operations left in this expression.
operator bool() const { return Start != End; }
+ DIExpression::expr_op_iterator begin() const { return Start; }
+ DIExpression::expr_op_iterator end() const { return End; }
/// Retrieve the fragment information, if any.
Optional<DIExpression::FragmentInfo> getFragmentInfo() const {
@@ -102,6 +104,9 @@ protected:
unsigned SubRegisterSizeInBits = 0;
unsigned SubRegisterOffsetInBits = 0;
+ /// The kind of location description being produced.
+ enum { Unknown = 0, Register, Memory, Implicit } LocationKind = Unknown;
+
/// Push a DW_OP_piece / DW_OP_bit_piece for emitting later, if one is needed
/// to represent a subregister.
void setSubRegisterPiece(unsigned SizeInBits, unsigned OffsetInBits) {
@@ -122,7 +127,8 @@ protected:
/// current function.
virtual bool isFrameRegister(const TargetRegisterInfo &TRI, unsigned MachineReg) = 0;
- /// Emit a DW_OP_reg operation.
+ /// Emit a DW_OP_reg operation. Note that this is only legal inside a DWARF
+ /// register location description.
void addReg(int DwarfReg, const char *Comment = nullptr);
/// Emit a DW_OP_breg operation.
void addBReg(int DwarfReg, int Offset);
@@ -185,11 +191,18 @@ public:
/// Emit an unsigned constant.
void addUnsignedConstant(const APInt &Value);
+ /// Lock this down to become a memory location description.
+ void setMemoryLocationKind() {
+ assert(LocationKind == Unknown);
+ LocationKind = Memory;
+ }
+
/// Emit a machine register location. As an optimization this may also consume
/// the prefix of a DwarfExpression if a more efficient representation for
/// combining the register location and the first operation exists.
///
- /// \param FragmentOffsetInBits If this is one fragment out of a fragmented
+ /// \param FragmentOffsetInBits If this is one fragment out of a
+ /// fragmented
/// location, this is the offset of the
/// fragment inside the entire variable.
/// \return false if no DWARF register exists
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index bad5b09553cd..bac0c204d04f 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -27,6 +27,7 @@
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Metadata.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
@@ -73,8 +74,8 @@ bool DIEDwarfExpression::isFrameRegister(const TargetRegisterInfo &TRI,
DwarfUnit::DwarfUnit(dwarf::Tag UnitTag, const DICompileUnit *Node,
AsmPrinter *A, DwarfDebug *DW, DwarfFile *DWU)
- : DIEUnit(A->getDwarfVersion(), A->getPointerSize(), UnitTag), CUNode(Node),
- Asm(A), DD(DW), DU(DWU), IndexTyDie(nullptr) {
+ : DIEUnit(A->getDwarfVersion(), A->MAI->getCodePointerSize(), UnitTag),
+ CUNode(Node), Asm(A), DD(DW), DU(DWU), IndexTyDie(nullptr) {
}
DwarfTypeUnit::DwarfTypeUnit(DwarfCompileUnit &CU, AsmPrinter *A,
@@ -471,12 +472,13 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
// variable's location.
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
+ if (Location.isIndirect())
+ DwarfExpr.setMemoryLocationKind();
SmallVector<uint64_t, 9> Ops;
- if (Location.isIndirect()) {
+ if (Location.isIndirect() && Location.getOffset()) {
Ops.push_back(dwarf::DW_OP_plus);
Ops.push_back(Location.getOffset());
- Ops.push_back(dwarf::DW_OP_deref);
}
// If we started with a pointer to the __Block_byref... struct, then
// the first thing we need to do is dereference the pointer (DW_OP_deref).
@@ -1546,7 +1548,7 @@ void DwarfUnit::emitCommonHeader(bool UseOffsets, dwarf::UnitType UT) {
Asm->OutStreamer->AddComment("DWARF Unit Type");
Asm->EmitInt8(UT);
Asm->OutStreamer->AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
+ Asm->EmitInt8(Asm->MAI->getCodePointerSize());
}
// We share one abbreviations table across all units so it's always at the
@@ -1562,7 +1564,7 @@ void DwarfUnit::emitCommonHeader(bool UseOffsets, dwarf::UnitType UT) {
if (Version <= 4) {
Asm->OutStreamer->AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
+ Asm->EmitInt8(Asm->MAI->getCodePointerSize());
}
}
diff --git a/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp b/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 2bdd189557b4..c862cfd28add 100644
--- a/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -570,8 +570,14 @@ bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) {
ValueToValueMapTy VMap;
BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F);
- for (BasicBlock *Pred : OtherPreds)
- Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc);
+ for (BasicBlock *Pred : OtherPreds) {
+ // If the target is a loop to itself, then the terminator of the split
+ // block needs to be updated.
+ if (Pred == Target)
+ BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc);
+ else
+ Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc);
+ }
// Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that
// they are clones, so the number of PHIs are the same.
@@ -5059,16 +5065,14 @@ bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
if (!ShlC)
return false;
uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
- auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt);
- DemandBits |= ShlDemandBits;
+ DemandBits.setLowBits(BitWidth - ShiftAmt);
break;
}
case llvm::Instruction::Trunc: {
EVT TruncVT = TLI->getValueType(*DL, I->getType());
unsigned TruncBitWidth = TruncVT.getSizeInBits();
- auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth);
- DemandBits |= TruncBits;
+ DemandBits.setLowBits(TruncBitWidth);
break;
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 766187378446..5fb8dfc95d3f 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -381,18 +381,19 @@ bool IRTranslator::translateInsertValue(const User &U,
uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
unsigned Res = getOrCreateVReg(U);
- const Value &Inserted = *U.getOperand(1);
- MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
- Offset);
+ unsigned Inserted = getOrCreateVReg(*U.getOperand(1));
+ MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset);
return true;
}
bool IRTranslator::translateSelect(const User &U,
MachineIRBuilder &MIRBuilder) {
- MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
- getOrCreateVReg(*U.getOperand(1)),
- getOrCreateVReg(*U.getOperand(2)));
+ unsigned Res = getOrCreateVReg(U);
+ unsigned Tst = getOrCreateVReg(*U.getOperand(0));
+ unsigned Op0 = getOrCreateVReg(*U.getOperand(1));
+ unsigned Op1 = getOrCreateVReg(*U.getOperand(2));
+ MIRBuilder.buildSelect(Res, Tst, Op0, Op1);
return true;
}
@@ -984,9 +985,11 @@ bool IRTranslator::translateInsertElement(const User &U,
ValToVReg[&U] = Elt;
return true;
}
- MIRBuilder.buildInsertVectorElement(
- getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
- getOrCreateVReg(*U.getOperand(1)), getOrCreateVReg(*U.getOperand(2)));
+ unsigned Res = getOrCreateVReg(U);
+ unsigned Val = getOrCreateVReg(*U.getOperand(0));
+ unsigned Elt = getOrCreateVReg(*U.getOperand(1));
+ unsigned Idx = getOrCreateVReg(*U.getOperand(2));
+ MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
return true;
}
@@ -999,9 +1002,10 @@ bool IRTranslator::translateExtractElement(const User &U,
ValToVReg[&U] = Elt;
return true;
}
- MIRBuilder.buildExtractVectorElement(getOrCreateVReg(U),
- getOrCreateVReg(*U.getOperand(0)),
- getOrCreateVReg(*U.getOperand(1)));
+ unsigned Res = getOrCreateVReg(U);
+ unsigned Val = getOrCreateVReg(*U.getOperand(0));
+ unsigned Idx = getOrCreateVReg(*U.getOperand(1));
+ MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index fb9d01ef8542..942680b6fff3 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -68,23 +68,6 @@ bool InstructionSelector::constrainSelectedInstRegOperands(
return true;
}
-Optional<int64_t>
-InstructionSelector::getConstantVRegVal(unsigned VReg,
- const MachineRegisterInfo &MRI) const {
- MachineInstr *MI = MRI.getVRegDef(VReg);
- if (MI->getOpcode() != TargetOpcode::G_CONSTANT)
- return None;
-
- if (MI->getOperand(1).isImm())
- return MI->getOperand(1).getImm();
-
- if (MI->getOperand(1).isCImm() &&
- MI->getOperand(1).getCImm()->getBitWidth() <= 64)
- return MI->getOperand(1).getCImm()->getSExtValue();
-
- return None;
-}
-
bool InstructionSelector::isOperandImmEqual(
const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const {
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
index 657ddb307919..74ed58e8d049 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -24,6 +24,8 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <iterator>
+
#define DEBUG_TYPE "legalizer"
using namespace llvm;
@@ -161,7 +163,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
// convergence for performance reasons.
bool Changed = false;
MachineBasicBlock::iterator NextMI;
- for (auto &MBB : MF)
+ for (auto &MBB : MF) {
for (auto MI = MBB.begin(); MI != MBB.end(); MI = NextMI) {
// Get the next Instruction before we try to legalize, because there's a
// good chance MI will be deleted.
@@ -171,18 +173,21 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
// and are assumed to be legal.
if (!isPreISelGenericOpcode(MI->getOpcode()))
continue;
+ unsigned NumNewInsns = 0;
SmallVector<MachineInstr *, 4> WorkList;
- Helper.MIRBuilder.recordInsertions(
- [&](MachineInstr *MI) { WorkList.push_back(MI); });
+ Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) {
+ ++NumNewInsns;
+ WorkList.push_back(MI);
+ });
WorkList.push_back(&*MI);
+ bool Changed = false;
LegalizerHelper::LegalizeResult Res;
unsigned Idx = 0;
do {
Res = Helper.legalizeInstrStep(*WorkList[Idx]);
// Error out if we couldn't legalize this instruction. We may want to
- // fall
- // back to DAG ISel instead in the future.
+ // fall back to DAG ISel instead in the future.
if (Res == LegalizerHelper::UnableToLegalize) {
Helper.MIRBuilder.stopRecordingInsertions();
if (Res == LegalizerHelper::UnableToLegalize) {
@@ -194,10 +199,21 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
}
Changed |= Res == LegalizerHelper::Legalized;
++Idx;
+
+#ifndef NDEBUG
+ if (NumNewInsns) {
+ DEBUG(dbgs() << ".. .. Emitted " << NumNewInsns << " insns\n");
+ for (auto I = WorkList.end() - NumNewInsns, E = WorkList.end();
+ I != E; ++I)
+ DEBUG(dbgs() << ".. .. New MI: "; (*I)->print(dbgs()));
+ NumNewInsns = 0;
+ }
+#endif
} while (Idx < WorkList.size());
Helper.MIRBuilder.stopRecordingInsertions();
}
+ }
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
@@ -207,7 +223,11 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
// good chance MI will be deleted.
NextMI = std::next(MI);
- Changed |= combineExtracts(*MI, MRI, TII);
+ // combineExtracts erases MI.
+ if (combineExtracts(*MI, MRI, TII)) {
+ Changed = true;
+ continue;
+ }
Changed |= combineMerges(*MI, MRI, TII);
}
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 20358f7ee6c2..58778077bc0e 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -24,7 +24,7 @@
#include <sstream>
-#define DEBUG_TYPE "legalize-mir"
+#define DEBUG_TYPE "legalizer"
using namespace llvm;
@@ -35,24 +35,34 @@ LegalizerHelper::LegalizerHelper(MachineFunction &MF)
LegalizerHelper::LegalizeResult
LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
+ DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
+
auto Action = LI.getAction(MI, MRI);
switch (std::get<0>(Action)) {
case LegalizerInfo::Legal:
+ DEBUG(dbgs() << ".. Already legal\n");
return AlreadyLegal;
case LegalizerInfo::Libcall:
+ DEBUG(dbgs() << ".. Convert to libcall\n");
return libcall(MI);
case LegalizerInfo::NarrowScalar:
+ DEBUG(dbgs() << ".. Narrow scalar\n");
return narrowScalar(MI, std::get<1>(Action), std::get<2>(Action));
case LegalizerInfo::WidenScalar:
+ DEBUG(dbgs() << ".. Widen scalar\n");
return widenScalar(MI, std::get<1>(Action), std::get<2>(Action));
case LegalizerInfo::Lower:
+ DEBUG(dbgs() << ".. Lower\n");
return lower(MI, std::get<1>(Action), std::get<2>(Action));
case LegalizerInfo::FewerElements:
+ DEBUG(dbgs() << ".. Reduce number of elements\n");
return fewerElementsVector(MI, std::get<1>(Action), std::get<2>(Action));
case LegalizerInfo::Custom:
+ DEBUG(dbgs() << ".. Custom legalization\n");
return LI.legalizeCustom(MI, MRI, MIRBuilder) ? Legalized
: UnableToLegalize;
default:
+ DEBUG(dbgs() << ".. Unable to legalize\n");
return UnableToLegalize;
}
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 8d1a263395a0..54ef7e5c5a1b 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -592,7 +592,7 @@ MachineInstrBuilder MachineIRBuilder::buildInsertVectorElement(unsigned Res,
LLT EltTy = MRI->getType(Elt);
LLT IdxTy = MRI->getType(Idx);
assert(ResTy.isVector() && ValTy.isVector() && "invalid operand type");
- assert(EltTy.isScalar() && IdxTy.isScalar() && "invalid operand type");
+ assert(IdxTy.isScalar() && "invalid operand type");
assert(ResTy.getNumElements() == ValTy.getNumElements() && "type mismatch");
assert(ResTy.getElementType() == EltTy && "type mismatch");
#endif
@@ -612,7 +612,8 @@ MachineInstrBuilder MachineIRBuilder::buildExtractVectorElement(unsigned Res,
LLT ValTy = MRI->getType(Val);
LLT IdxTy = MRI->getType(Idx);
assert(ValTy.isVector() && "invalid operand type");
- assert(ResTy.isScalar() && IdxTy.isScalar() && "invalid operand type");
+ assert((ResTy.isScalar() || ResTy.isPointer()) && "invalid operand type");
+ assert(IdxTy.isScalar() && "invalid operand type");
assert(ValTy.getElementType() == ResTy && "type mismatch");
#endif
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 606a59680a3d..3c93f8123b0d 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Constants.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -93,3 +94,19 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
R << Msg << ": " << ore::MNV("Inst", MI);
reportGISelFailure(MF, TPC, MORE, R);
}
+
+Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
+ const MachineRegisterInfo &MRI) {
+ MachineInstr *MI = MRI.getVRegDef(VReg);
+ if (MI->getOpcode() != TargetOpcode::G_CONSTANT)
+ return None;
+
+ if (MI->getOperand(1).isImm())
+ return MI->getOperand(1).getImm();
+
+ if (MI->getOperand(1).isCImm() &&
+ MI->getOperand(1).getCImm()->getBitWidth() <= 64)
+ return MI->getOperand(1).getCImm()->getSExtValue();
+
+ return None;
+}
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
index a1cb0a0695bf..b7ab404070b1 100644
--- a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -888,20 +888,10 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
// Debug values are not allowed to affect codegen.
if (MI->isDebugValue()) {
// Modify DBG_VALUE now that the value is in a spill slot.
- bool IsIndirect = MI->isIndirectDebugValue();
- uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
- const MDNode *Var = MI->getDebugVariable();
- const MDNode *Expr = MI->getDebugExpression();
- DebugLoc DL = MI->getDebugLoc();
- DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
MachineBasicBlock *MBB = MI->getParent();
- assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
- "Expected inlined-at fields to agree");
- BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE))
- .addFrameIndex(StackSlot)
- .addImm(Offset)
- .addMetadata(Var)
- .addMetadata(Expr);
+ DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
+ buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
+ MBB->erase(MI);
continue;
}
diff --git a/contrib/llvm/lib/CodeGen/LowLevelType.cpp b/contrib/llvm/lib/CodeGen/LowLevelType.cpp
index c4b9068fa905..1c682e72fa49 100644
--- a/contrib/llvm/lib/CodeGen/LowLevelType.cpp
+++ b/contrib/llvm/lib/CodeGen/LowLevelType.cpp
@@ -21,10 +21,10 @@ using namespace llvm;
LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
if (auto VTy = dyn_cast<VectorType>(&Ty)) {
auto NumElements = VTy->getNumElements();
- auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL);
if (NumElements == 1)
- return LLT::scalar(ScalarSizeInBits);
- return LLT::vector(NumElements, ScalarSizeInBits);
+ return ScalarTy;
+ return LLT::vector(NumElements, ScalarTy);
} else if (auto PTy = dyn_cast<PointerType>(&Ty)) {
return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty));
} else if (Ty.isSized()) {
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index c0a8b95ed8a0..4bd5fbfe38e6 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -2351,3 +2351,31 @@ MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
BB.insert(I, MI);
return MachineInstrBuilder(MF, MI);
}
+
+MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I,
+ const MachineInstr &Orig,
+ int FrameIndex) {
+ const MDNode *Var = Orig.getDebugVariable();
+ auto *Expr = cast_or_null<DIExpression>(Orig.getDebugExpression());
+ bool IsIndirect = Orig.isIndirectDebugValue();
+ uint64_t Offset = IsIndirect ? Orig.getOperand(1).getImm() : 0;
+ DebugLoc DL = Orig.getDebugLoc();
+ assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
+ // If the DBG_VALUE already was a memory location, add an extra
+ // DW_OP_deref. Otherwise just turning this from a register into a
+ // memory/indirect location is sufficient.
+ if (IsIndirect) {
+ SmallVector<uint64_t, 8> Ops;
+ Ops.push_back(dwarf::DW_OP_deref);
+ if (Expr)
+ Ops.append(Expr->elements_begin(), Expr->elements_end());
+ Expr = DIExpression::get(Expr->getContext(), Ops);
+ }
+ return BuildMI(BB, I, DL, Orig.getDesc())
+ .addFrameIndex(FrameIndex)
+ .addImm(Offset)
+ .addMetadata(Var)
+ .addMetadata(Expr);
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index d392c044bd71..84bd670105e1 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2030,6 +2030,8 @@ namespace {
void MachineVerifier::verifyStackFrame() {
unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
+ if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
+ return;
SmallVector<StackStateOfBB, 8> SPState;
SPState.resize(MF->getNumBlockIDs());
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index fd759bc372b2..283d84629f8e 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -304,19 +304,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
LiveDbgValueMap[LRI->VirtReg];
for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) {
MachineInstr *DBG = LRIDbgValues[li];
- const MDNode *Var = DBG->getDebugVariable();
- const MDNode *Expr = DBG->getDebugExpression();
- bool IsIndirect = DBG->isIndirectDebugValue();
- uint64_t Offset = IsIndirect ? DBG->getOperand(1).getImm() : 0;
- DebugLoc DL = DBG->getDebugLoc();
- assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
- "Expected inlined-at fields to agree");
- MachineInstr *NewDV =
- BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::DBG_VALUE))
- .addFrameIndex(FI)
- .addImm(Offset)
- .addMetadata(Var)
- .addMetadata(Expr);
+ MachineInstr *NewDV = buildDbgValueForSpill(*MBB, MI, *DBG, FI);
assert(NewDV->getParent() == MBB && "dangling parent pointer");
(void)NewDV;
DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
diff --git a/contrib/llvm/lib/CodeGen/SafeStack.cpp b/contrib/llvm/lib/CodeGen/SafeStack.cpp
index fa68411284e7..7fa379d80c6c 100644
--- a/contrib/llvm/lib/CodeGen/SafeStack.cpp
+++ b/contrib/llvm/lib/CodeGen/SafeStack.cpp
@@ -550,7 +550,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
// Replace alloc with the new location.
replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB,
- /*Deref=*/true, -Offset);
+ /*Deref=*/false, -Offset);
Arg->replaceAllUsesWith(NewArg);
IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode());
IRB.CreateMemCpy(Off, Arg, Size, Arg->getParamAlignment());
@@ -565,7 +565,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
if (Size == 0)
Size = 1; // Don't create zero-sized stack objects.
- replaceDbgDeclareForAlloca(AI, BasePointer, DIB, /*Deref=*/true, -Offset);
+ replaceDbgDeclareForAlloca(AI, BasePointer, DIB, /*Deref=*/false, -Offset);
replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
// Replace uses of the alloca with the new location.
@@ -655,7 +655,7 @@ void SafeStack::moveDynamicAllocasToUnsafeStack(
if (AI->hasName() && isa<Instruction>(NewAI))
NewAI->takeName(AI);
- replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
+ replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/false);
AI->replaceAllUsesWith(NewAI);
AI->eraseFromParent();
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4d468551ae24..4702d63cb617 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2146,7 +2146,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (N->getFlags()->hasNoUnsignedWrap())
return N0;
- if (DAG.MaskedValueIsZero(N1, ~APInt::getSignBit(BitWidth))) {
+ if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) {
// N1 is either 0 or the minimum signed value. If the sub is NSW, then
// N1 must be 0 because negating the minimum signed value is undefined.
if (N->getFlags()->hasNoSignedWrap())
@@ -3705,7 +3705,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
// fold (and (sra)) -> (and (srl)) when possible.
- if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0)))
+ if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (zext_inreg (extload x)) -> (zextload x)
@@ -4225,8 +4225,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
return Load;
// Simplify the operands using demanded-bits information.
- if (!VT.isVector() &&
- SimplifyDemandedBits(SDValue(N, 0)))
+ if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
@@ -5058,8 +5057,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
return Tmp;
// Simplify the expression using non-local knowledge.
- if (!VT.isVector() &&
- SimplifyDemandedBits(SDValue(N, 0)))
+ if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
return SDValue();
@@ -5350,7 +5348,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c2 - c1, DL, N1.getValueType()));
} else {
- Mask = Mask.lshr(c1 - c2);
+ Mask.lshrInPlace(c1 - c2);
SDLoc DL(N);
Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
DAG.getConstant(c1 - c2, DL, N1.getValueType()));
@@ -5660,7 +5658,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
DAG.getConstant(ShiftAmt, DL0,
getShiftAmountTy(SmallVT)));
AddToWorklist(SmallShift.getNode());
- APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt);
+ APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt);
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
@@ -8300,11 +8298,11 @@ static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG,
switch (N0.getOpcode()) {
case ISD::AND:
FPOpcode = ISD::FABS;
- SignMask = ~APInt::getSignBit(SourceVT.getSizeInBits());
+ SignMask = ~APInt::getSignMask(SourceVT.getSizeInBits());
break;
case ISD::XOR:
FPOpcode = ISD::FNEG;
- SignMask = APInt::getSignBit(SourceVT.getSizeInBits());
+ SignMask = APInt::getSignMask(SourceVT.getSizeInBits());
break;
// TODO: ISD::OR --> ISD::FNABS?
default:
@@ -8415,7 +8413,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
assert(VT.getSizeInBits() == 128);
SDValue SignBit = DAG.getConstant(
- APInt::getSignBit(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64);
+ APInt::getSignMask(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64);
SDValue FlipBit;
if (N0.getOpcode() == ISD::FNEG) {
FlipBit = SignBit;
@@ -8435,7 +8433,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
AddToWorklist(FlipBits.getNode());
return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits);
}
- APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
+ APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
if (N0.getOpcode() == ISD::FNEG)
return DAG.getNode(ISD::XOR, DL, VT,
NewConv, DAG.getConstant(SignBit, DL, VT));
@@ -8483,7 +8481,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
}
if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
- APInt SignBit = APInt::getSignBit(VT.getSizeInBits() / 2);
+ APInt SignBit = APInt::getSignMask(VT.getSizeInBits() / 2);
SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
AddToWorklist(Cst.getNode());
SDValue X = DAG.getBitcast(VT, N0.getOperand(1));
@@ -8504,7 +8502,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
AddToWorklist(FlipBits.getNode());
return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits);
}
- APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
+ APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
X = DAG.getNode(ISD::AND, SDLoc(X), VT,
X, DAG.getConstant(SignBit, SDLoc(X), VT));
AddToWorklist(X.getNode());
@@ -8687,7 +8685,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
APInt ThisVal = OpVal.trunc(DstBitSize);
Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
- OpVal = OpVal.lshr(DstBitSize);
+ OpVal.lshrInPlace(DstBitSize);
}
// For big endian targets, swap the order of the pieces of each element.
@@ -10315,11 +10313,11 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
if (N0.getValueType().isVector()) {
// For a vector, get a mask such as 0x80... per scalar element
// and splat it.
- SignMask = APInt::getSignBit(N0.getScalarValueSizeInBits());
+ SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
} else {
// For a scalar, just generate 0x80...
- SignMask = APInt::getSignBit(IntVT.getSizeInBits());
+ SignMask = APInt::getSignMask(IntVT.getSizeInBits());
}
SDLoc DL0(N0);
Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int,
@@ -10420,11 +10418,11 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
if (N0.getValueType().isVector()) {
// For a vector, get a mask such as 0x7f... per scalar element
// and splat it.
- SignMask = ~APInt::getSignBit(N0.getScalarValueSizeInBits());
+ SignMask = ~APInt::getSignMask(N0.getScalarValueSizeInBits());
SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
} else {
// For a scalar, just generate 0x7f...
- SignMask = ~APInt::getSignBit(IntVT.getSizeInBits());
+ SignMask = ~APInt::getSignMask(IntVT.getSizeInBits());
}
SDLoc DL(N0);
Int = DAG.getNode(ISD::AND, DL, IntVT, Int,
@@ -12375,6 +12373,27 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
return LHS.OffsetFromBase < RHS.OffsetFromBase;
});
+ // Store Merge attempts to merge the lowest stores. This generally
+ // works out as if successful, as the remaining stores are checked
+ // after the first collection of stores is merged. However, in the
+ // case that a non-mergeable store is found first, e.g., {p[-2],
+ // p[0], p[1], p[2], p[3]}, we would fail and miss the subsequent
+ // mergeable cases. To prevent this, we prune such stores from the
+ // front of StoreNodes here.
+
+ unsigned StartIdx = 0;
+ while ((StartIdx + 1 < StoreNodes.size()) &&
+ StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
+ StoreNodes[StartIdx + 1].OffsetFromBase)
+ ++StartIdx;
+
+ // Bail if we don't have enough candidates to merge.
+ if (StartIdx + 1 >= StoreNodes.size())
+ return false;
+
+ if (StartIdx)
+ StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);
+
// Scan the memory operations on the chain and find the first non-consecutive
// store memory address.
unsigned NumConsecutiveStores = 0;
@@ -12485,39 +12504,52 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
// When extracting multiple vector elements, try to store them
// in one vector store rather than a sequence of scalar stores.
if (IsExtractVecSrc) {
- LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
- unsigned FirstStoreAS = FirstInChain->getAddressSpace();
- unsigned FirstStoreAlign = FirstInChain->getAlignment();
- unsigned NumStoresToMerge = 0;
- bool IsVec = MemVT.isVector();
- for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
- StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
- unsigned StoreValOpcode = St->getValue().getOpcode();
- // This restriction could be loosened.
- // Bail out if any stored values are not elements extracted from a vector.
- // It should be possible to handle mixed sources, but load sources need
- // more careful handling (see the block of code below that handles
- // consecutive loads).
- if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT &&
- StoreValOpcode != ISD::EXTRACT_SUBVECTOR)
- return false;
+ bool RV = false;
+ while (StoreNodes.size() >= 2) {
+ LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
+ unsigned FirstStoreAS = FirstInChain->getAddressSpace();
+ unsigned FirstStoreAlign = FirstInChain->getAlignment();
+ unsigned NumStoresToMerge = 0;
+ bool IsVec = MemVT.isVector();
+ for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
+ StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
+ unsigned StoreValOpcode = St->getValue().getOpcode();
+ // This restriction could be loosened.
+ // Bail out if any stored values are not elements extracted from a
+ // vector. It should be possible to handle mixed sources, but load
+ // sources need more careful handling (see the block of code below that
+ // handles consecutive loads).
+ if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT &&
+ StoreValOpcode != ISD::EXTRACT_SUBVECTOR)
+ return false;
- // Find a legal type for the vector store.
- unsigned Elts = i + 1;
- if (IsVec) {
- // When merging vector stores, get the total number of elements.
- Elts *= MemVT.getVectorNumElements();
+ // Find a legal type for the vector store.
+ unsigned Elts = i + 1;
+ if (IsVec) {
+ // When merging vector stores, get the total number of elements.
+ Elts *= MemVT.getVectorNumElements();
+ }
+ EVT Ty =
+ EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
+ bool IsFast;
+ if (TLI.isTypeLegal(Ty) &&
+ TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
+ FirstStoreAlign, &IsFast) &&
+ IsFast)
+ NumStoresToMerge = i + 1;
}
- EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
- bool IsFast;
- if (TLI.isTypeLegal(Ty) &&
- TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
- FirstStoreAlign, &IsFast) && IsFast)
- NumStoresToMerge = i + 1;
- }
- return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumStoresToMerge,
- false, true);
+ bool Merged = MergeStoresOfConstantsOrVecElts(
+ StoreNodes, MemVT, NumStoresToMerge, false, true);
+ if (!Merged)
+ break;
+ // Remove merged stores for next iteration.
+ StoreNodes.erase(StoreNodes.begin(),
+ StoreNodes.begin() + NumStoresToMerge);
+ RV = true;
+ NumConsecutiveStores -= NumStoresToMerge;
+ }
+ return RV;
}
// Below we handle the case of multiple consecutive stores that
@@ -15122,9 +15154,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
// Extract the sub element from the constant bit mask.
if (DAG.getDataLayout().isBigEndian()) {
- Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits);
+ Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits);
} else {
- Bits = Bits.lshr(SubIdx * NumSubBits);
+ Bits.lshrInPlace(SubIdx * NumSubBits);
}
if (Split > 1)
@@ -16004,7 +16036,7 @@ SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags) {
/// Return true if base is a frame index, which is known not to alias with
/// anything but itself. Provides base object and offset as results.
-static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
+static bool findBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
const GlobalValue *&GV, const void *&CV) {
// Assume it is a primitive operation.
Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr;
@@ -16057,53 +16089,56 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
return false;
// Gather base node and offset information.
- SDValue Base1, Base2;
- int64_t Offset1, Offset2;
- const GlobalValue *GV1, *GV2;
- const void *CV1, *CV2;
- bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(),
+ SDValue Base0, Base1;
+ int64_t Offset0, Offset1;
+ const GlobalValue *GV0, *GV1;
+ const void *CV0, *CV1;
+ bool IsFrameIndex0 = findBaseOffset(Op0->getBasePtr(),
+ Base0, Offset0, GV0, CV0);
+ bool IsFrameIndex1 = findBaseOffset(Op1->getBasePtr(),
Base1, Offset1, GV1, CV1);
- bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(),
- Base2, Offset2, GV2, CV2);
- // If they have a same base address then check to see if they overlap.
- if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
- return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
- (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
+ // If they have the same base address, then check to see if they overlap.
+ unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3;
+ unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3;
+ if (Base0 == Base1 || (GV0 && (GV0 == GV1)) || (CV0 && (CV0 == CV1)))
+ return !((Offset0 + NumBytes0) <= Offset1 ||
+ (Offset1 + NumBytes1) <= Offset0);
// It is possible for different frame indices to alias each other, mostly
// when tail call optimization reuses return address slots for arguments.
// To catch this case, look up the actual index of frame indices to compute
// the real alias relationship.
- if (isFrameIndex1 && isFrameIndex2) {
+ if (IsFrameIndex0 && IsFrameIndex1) {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ Offset0 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base0)->getIndex());
Offset1 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
- Offset2 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
- return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
- (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
+ return !((Offset0 + NumBytes0) <= Offset1 ||
+ (Offset1 + NumBytes1) <= Offset0);
}
// Otherwise, if we know what the bases are, and they aren't identical, then
// we know they cannot alias.
- if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
+ if ((IsFrameIndex0 || CV0 || GV0) && (IsFrameIndex1 || CV1 || GV1))
return false;
// If we know required SrcValue1 and SrcValue2 have relatively large alignment
// compared to the size and offset of the access, we may be able to prove they
- // do not alias. This check is conservative for now to catch cases created by
+ // do not alias. This check is conservative for now to catch cases created by
// splitting vector types.
- if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) &&
- (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) &&
- (Op0->getMemoryVT().getSizeInBits() >> 3 ==
- Op1->getMemoryVT().getSizeInBits() >> 3) &&
- (Op0->getOriginalAlignment() > (Op0->getMemoryVT().getSizeInBits() >> 3))) {
- int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment();
- int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment();
+ int64_t SrcValOffset0 = Op0->getSrcValueOffset();
+ int64_t SrcValOffset1 = Op1->getSrcValueOffset();
+ unsigned OrigAlignment0 = Op0->getOriginalAlignment();
+ unsigned OrigAlignment1 = Op1->getOriginalAlignment();
+ if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
+ NumBytes0 == NumBytes1 && OrigAlignment0 > NumBytes0) {
+ int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
+ int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
// There is no overlap between these relatively aligned accesses of similar
- // size, return no alias.
- if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 ||
- (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1)
+ // size. Return no alias.
+ if ((OffAlign0 + NumBytes0) <= OffAlign1 ||
+ (OffAlign1 + NumBytes1) <= OffAlign0)
return false;
}
@@ -16115,19 +16150,17 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
UseAA = false;
#endif
+
if (UseAA &&
Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) {
// Use alias analysis information.
- int64_t MinOffset = std::min(Op0->getSrcValueOffset(),
- Op1->getSrcValueOffset());
- int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) +
- Op0->getSrcValueOffset() - MinOffset;
- int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
- Op1->getSrcValueOffset() - MinOffset;
+ int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
+ int64_t Overlap0 = NumBytes0 + SrcValOffset0 - MinOffset;
+ int64_t Overlap1 = NumBytes1 + SrcValOffset1 - MinOffset;
AliasResult AAResult =
- AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1,
+ AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap0,
UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
- MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2,
+ MemoryLocation(Op1->getMemOperand()->getValue(), Overlap1,
UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
if (AAResult == NoAlias)
return false;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 0584ab9f60d1..6fb26fc3b73d 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1164,9 +1164,11 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
"Expected inlined-at fields to agree");
if (Op->isReg()) {
Op->setIsDebug(true);
+ // A dbg.declare describes the address of a source variable, so lower it
+ // into an indirect DBG_VALUE.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
- DI->getVariable(), DI->getExpression());
+ TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
+ Op->getReg(), 0, DI->getVariable(), DI->getExpression());
} else
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE))
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index fc7cd020fe2e..3bae3bf9ab7c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1343,7 +1343,7 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
// Convert to an integer of the same size.
if (TLI.isTypeLegal(IVT)) {
State.IntValue = DAG.getNode(ISD::BITCAST, DL, IVT, Value);
- State.SignMask = APInt::getSignBit(NumBits);
+ State.SignMask = APInt::getSignMask(NumBits);
State.SignBit = NumBits - 1;
return;
}
@@ -2984,7 +2984,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT NVT = Node->getValueType(0);
APFloat apf(DAG.EVTToAPFloatSemantics(VT),
APInt::getNullValue(VT.getSizeInBits()));
- APInt x = APInt::getSignBit(NVT.getSizeInBits());
+ APInt x = APInt::getSignMask(NVT.getSizeInBits());
(void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
Tmp1 = DAG.getConstantFP(apf, dl, VT);
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(VT),
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 6f2b1b94ce46..c1cb5d9b5235 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -72,7 +72,7 @@ bool DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break;
case ISD::ConstantFP: R = SoftenFloatRes_ConstantFP(N, ResNo); break;
case ISD::EXTRACT_VECTOR_ELT:
- R = SoftenFloatRes_EXTRACT_VECTOR_ELT(N); break;
+ R = SoftenFloatRes_EXTRACT_VECTOR_ELT(N, ResNo); break;
case ISD::FABS: R = SoftenFloatRes_FABS(N, ResNo); break;
case ISD::FMINNUM: R = SoftenFloatRes_FMINNUM(N); break;
case ISD::FMAXNUM: R = SoftenFloatRes_FMAXNUM(N); break;
@@ -171,7 +171,10 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo) {
}
}
-SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
+SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N, unsigned ResNo) {
+ // When LegalInHWReg, keep the extracted value in register.
+ if (isLegalInHWReg(N->getValueType(ResNo)))
+ return SDValue(N, ResNo);
SDValue NewOp = BitConvertVectorToIntegerVector(N->getOperand(0));
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
NewOp.getValueType().getVectorElementType(),
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 0a2b680e1c66..154af46c9446 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -925,9 +925,9 @@ SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
assert(Op.getValueType().isVector() && "Only applies to vectors!");
unsigned EltWidth = Op.getScalarValueSizeInBits();
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
- unsigned NumElts = Op.getValueType().getVectorNumElements();
+ auto EltCnt = Op.getValueType().getVectorElementCount();
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
- EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op);
+ EVT::getVectorVT(*DAG.getContext(), EltNVT, EltCnt), Op);
}
SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 80c939700518..af55a22972a6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -428,7 +428,7 @@ private:
SDValue SoftenFloatRes_BITCAST(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N);
SDValue SoftenFloatRes_ConstantFP(SDNode *N, unsigned ResNo);
- SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N);
+ SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_FABS(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_FMINNUM(SDNode *N);
SDValue SoftenFloatRes_FMAXNUM(SDNode *N);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 78fddb5ce8f5..1a7d7b7af5fa 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1293,12 +1293,9 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
if ((NumElements & 1) == 0 &&
SrcVT.getSizeInBits() * 2 < DestVT.getSizeInBits()) {
LLVMContext &Ctx = *DAG.getContext();
- EVT NewSrcVT = EVT::getVectorVT(
- Ctx, EVT::getIntegerVT(
- Ctx, SrcVT.getScalarSizeInBits() * 2),
- NumElements);
- EVT SplitSrcVT =
- EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2);
+ EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx);
+ EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx);
+
EVT SplitLoVT, SplitHiVT;
std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT);
if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) &&
@@ -3012,8 +3009,8 @@ SDValue DAGTypeLegalizer::WidenVSELECTAndMask(SDNode *N) {
// Don't touch if this will be scalarized.
EVT FinalVT = VSelVT;
while (getTypeAction(FinalVT) == TargetLowering::TypeSplitVector)
- FinalVT = EVT::getVectorVT(Ctx, FinalVT.getVectorElementType(),
- FinalVT.getVectorNumElements() / 2);
+ FinalVT = FinalVT.getHalfNumVectorElementsVT(Ctx);
+
if (FinalVT.getVectorNumElements() == 1)
return SDValue();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 003ea5030bfc..523f409e6b2c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -639,12 +639,15 @@ void SelectionDAG::DeallocateNode(SDNode *N) {
// If we have operands, deallocate them.
removeOperands(N);
+ NodeAllocator.Deallocate(AllNodes.remove(N));
+
// Set the opcode to DELETED_NODE to help catch bugs when node
// memory is reallocated.
+ // FIXME: There are places in SDag that have grown a dependency on the opcode
+ // value in the released node.
+ __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
N->NodeType = ISD::DELETED_NODE;
- NodeAllocator.Deallocate(AllNodes.remove(N));
-
// If any of the SDDbgValue nodes refer to this SDNode, invalidate
// them and forget about that node.
DbgInfo->erase(N);
@@ -1826,7 +1829,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
- return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
+ return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
}
SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
@@ -1839,7 +1842,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
- return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
+ return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
}
SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
@@ -1955,7 +1958,7 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
/// use this predicate to simplify operations downstream.
bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
unsigned BitWidth = Op.getScalarValueSizeInBits();
- return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
+ return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
}
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
@@ -2330,8 +2333,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
Depth + 1);
- KnownZero = KnownZero.lshr(*ShAmt);
- KnownOne = KnownOne.lshr(*ShAmt);
+ KnownZero.lshrInPlace(*ShAmt);
+ KnownOne.lshrInPlace(*ShAmt);
// High bits are known zero.
KnownZero.setHighBits(ShAmt->getZExtValue());
}
@@ -2340,15 +2343,15 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
Depth + 1);
- KnownZero = KnownZero.lshr(*ShAmt);
- KnownOne = KnownOne.lshr(*ShAmt);
+ KnownZero.lshrInPlace(*ShAmt);
+ KnownOne.lshrInPlace(*ShAmt);
// If we know the value of the sign bit, then we know it is copied across
// the high bits by the shift amount.
- APInt SignBit = APInt::getSignBit(BitWidth);
- SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask.
- if (KnownZero.intersects(SignBit)) {
+ APInt SignMask = APInt::getSignMask(BitWidth);
+ SignMask.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask.
+ if (KnownZero.intersects(SignMask)) {
KnownZero.setHighBits(ShAmt->getZExtValue());// New bits are known zero.
- } else if (KnownOne.intersects(SignBit)) {
+ } else if (KnownOne.intersects(SignMask)) {
KnownOne.setHighBits(ShAmt->getZExtValue()); // New bits are known one.
}
}
@@ -2361,14 +2364,14 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// present in the input.
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
- APInt InSignBit = APInt::getSignBit(EBits);
+ APInt InSignMask = APInt::getSignMask(EBits);
APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
// If the sign extended bits are demanded, we know that the sign
// bit is demanded.
- InSignBit = InSignBit.zext(BitWidth);
+ InSignMask = InSignMask.zext(BitWidth);
if (NewBits.getBoolValue())
- InputDemandedBits |= InSignBit;
+ InputDemandedBits |= InSignMask;
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
Depth + 1);
@@ -2377,10 +2380,10 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
- if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
+ if (KnownZero.intersects(InSignMask)) { // Input sign bit known clear
KnownZero |= NewBits;
KnownOne &= ~NewBits;
- } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
+ } else if (KnownOne.intersects(InSignMask)) { // Input sign bit known set
KnownOne |= NewBits;
KnownZero &= ~NewBits;
} else { // Input sign bit unknown
@@ -2745,7 +2748,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// a set bit that isn't the sign bit (otherwise it could be INT_MIN).
KnownOne2.clearBit(BitWidth - 1);
if (KnownOne2.getBoolValue()) {
- KnownZero = APInt::getSignBit(BitWidth);
+ KnownZero = APInt::getSignMask(BitWidth);
break;
}
break;
@@ -2833,7 +2836,7 @@ SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
computeKnownBits(N0, N0Zero, N0One);
bool overflow;
- (~N0Zero).uadd_ov(~N1Zero, overflow);
+ (void)(~N0Zero).uadd_ov(~N1Zero, overflow);
if (!overflow)
return OFK_Never;
}
@@ -2874,7 +2877,7 @@ bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
// one bit set.
if (Val.getOpcode() == ISD::SRL) {
auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
- if (C && C->getAPIntValue().isSignBit())
+ if (C && C->getAPIntValue().isSignMask())
return true;
}
@@ -2967,7 +2970,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
return std::max(Tmp, Tmp2);
case ISD::SRA:
- Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
+ Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
// SRA X, C -> adds C sign bits.
if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
APInt ShiftVal = C->getAPIntValue();
@@ -3130,40 +3133,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
// result. Otherwise it gives either negative or > bitwidth result
return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
}
- case ISD::INSERT_VECTOR_ELT: {
- SDValue InVec = Op.getOperand(0);
- SDValue InVal = Op.getOperand(1);
- SDValue EltNo = Op.getOperand(2);
- unsigned NumElts = InVec.getValueType().getVectorNumElements();
-
- ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
- if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
- // If we know the element index, split the demand between the
- // source vector and the inserted element.
- unsigned EltIdx = CEltNo->getZExtValue();
-
- // If we demand the inserted element then get its sign bits.
- Tmp = UINT_MAX;
- if (DemandedElts[EltIdx])
- Tmp = ComputeNumSignBits(InVal, Depth + 1);
-
- // If we demand the source vector then get its sign bits, and determine
- // the minimum.
- APInt VectorElts = DemandedElts;
- VectorElts.clearBit(EltIdx);
- if (!!VectorElts) {
- Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
- Tmp = std::min(Tmp, Tmp2);
- }
- } else {
- // Unknown element index, so ignore DemandedElts and demand them all.
- Tmp = ComputeNumSignBits(InVec, Depth + 1);
- Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
- Tmp = std::min(Tmp, Tmp2);
- }
- assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
- return Tmp;
- }
case ISD::EXTRACT_VECTOR_ELT: {
SDValue InVec = Op.getOperand(0);
SDValue EltNo = Op.getOperand(1);
@@ -7607,14 +7576,11 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
// Currently all types are split in half.
EVT LoVT, HiVT;
- if (!VT.isVector()) {
+ if (!VT.isVector())
LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
- } else {
- unsigned NumElements = VT.getVectorNumElements();
- assert(!(NumElements & 1) && "Splitting vector, but not in half!");
- LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
- NumElements/2);
- }
+ else
+ LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
+
return std::make_pair(LoVT, HiVT);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8708f58f1e63..2c58953ee908 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1151,7 +1151,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
return DAG.getFrameIndex(SI->second,
- TLI.getPointerTy(DAG.getDataLayout()));
+ TLI.getFrameIndexTy(DAG.getDataLayout()));
}
// If this is an instruction which fast-isel has deferred, select it now.
@@ -4674,7 +4674,7 @@ static unsigned getUnderlyingArgReg(const SDValue &N) {
/// At the end of instruction selection, they will be inserted to the entry BB.
bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
const Value *V, DILocalVariable *Variable, DIExpression *Expr,
- DILocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) {
+ DILocation *DL, int64_t Offset, bool IsDbgDeclare, const SDValue &N) {
const Argument *Arg = dyn_cast<Argument>(V);
if (!Arg)
return false;
@@ -4688,6 +4688,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction()))
return false;
+ bool IsIndirect = false;
Optional<MachineOperand> Op;
// Some arguments' frame index is recorded during argument lowering.
if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
@@ -4701,15 +4702,19 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (PR)
Reg = PR;
}
- if (Reg)
+ if (Reg) {
Op = MachineOperand::CreateReg(Reg, false);
+ IsIndirect = IsDbgDeclare;
+ }
}
if (!Op) {
// Check if ValueMap has reg number.
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
- if (VMI != FuncInfo.ValueMap.end())
+ if (VMI != FuncInfo.ValueMap.end()) {
Op = MachineOperand::CreateReg(VMI->second, false);
+ IsIndirect = IsDbgDeclare;
+ }
}
if (!Op && N.getNode())
@@ -4955,8 +4960,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} else if (isa<Argument>(Address)) {
// Address is an argument, so try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
- EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
- N);
+ EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true, N);
return nullptr;
} else {
SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
@@ -4966,7 +4970,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} else {
// If Address is an argument then try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
- if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
+ if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true,
N)) {
// If variable is pinned by a alloca in dominating bb then
// use StaticAllocaMap.
@@ -5613,7 +5617,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Ops[2];
Ops[0] = getRoot();
Ops[1] =
- DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()), true);
+ DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true);
unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
@@ -6626,7 +6630,7 @@ static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
unsigned Align = DL.getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
- SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy(DL));
+ SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
MachinePointerInfo::getFixedStack(MF, SSFI));
OpInfo.CallOperand = StackSlot;
@@ -7389,7 +7393,7 @@ static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
Ops.push_back(Builder.DAG.getTargetFrameIndex(
- FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout())));
+ FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
} else
Ops.push_back(OpVal);
}
@@ -7657,7 +7661,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
- DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy(DL));
+ DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
ArgListEntry Entry;
Entry.Node = DemoteStackSlot;
Entry.Ty = StackSlotPtrType;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index c6acc09b6602..9e34590cc39c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -928,7 +928,7 @@ private:
/// instruction selection, they will be inserted to the entry BB.
bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
DIExpression *Expr, DILocation *DL,
- int64_t Offset, bool IsIndirect,
+ int64_t Offset, bool IsDbgDeclare,
const SDValue &N);
/// Return the next block after MBB, or nullptr if there is none.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 2756e276c6a9..93c6738f650d 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -574,7 +574,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// using the bits from the RHS. Below, we use knowledge about the RHS to
// simplify the LHS, here we're using information from the LHS to simplify
// the RHS.
- if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
+ if (ConstantSDNode *RHSC = isConstOrConstSplat(Op.getOperand(1))) {
SDValue Op0 = Op.getOperand(0);
APInt LHSZero, LHSOne;
// Do not increment Depth here; that can cause an infinite loop.
@@ -715,7 +715,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// If the RHS is a constant, see if we can simplify it.
// for XOR, we prefer to force bits to 1 if they will make a -1.
// If we can't force bits, try to shrink the constant.
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
+ if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
APInt Expanded = C->getAPIntValue() | (~NewMask);
// If we can expand it to have all bits set, do it.
if (Expanded.isAllOnesValue()) {
@@ -778,7 +778,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// If (1) we only need the sign-bit, (2) the setcc operands are the same
// width as the setcc result, and (3) the result of a setcc conforms to 0 or
// -1, we may be able to bypass the setcc.
- if (NewMask.isSignBit() && Op0.getScalarValueSizeInBits() == BitWidth &&
+ if (NewMask.isSignMask() && Op0.getScalarValueSizeInBits() == BitWidth &&
getBooleanContents(Op.getValueType()) ==
BooleanContent::ZeroOrNegativeOneBooleanContent) {
// If we're testing X < 0, then this compare isn't needed - just use X!
@@ -839,7 +839,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
SDValue InnerOp = InOp.getNode()->getOperand(0);
EVT InnerVT = InnerOp.getValueType();
unsigned InnerBits = InnerVT.getSizeInBits();
- if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
+ if (ShAmt < InnerBits && NewMask.getActiveBits() <= InnerBits &&
isTypeDesirableForOp(ISD::SHL, InnerVT)) {
EVT ShTy = getShiftAmountTy(InnerVT, DL);
if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
@@ -861,12 +861,12 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
InnerOp.getOpcode() == ISD::SRL &&
InnerOp.hasOneUse() &&
isa<ConstantSDNode>(InnerOp.getOperand(1))) {
- uint64_t InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
+ unsigned InnerShAmt = cast<ConstantSDNode>(InnerOp.getOperand(1))
->getZExtValue();
if (InnerShAmt < ShAmt &&
InnerShAmt < InnerBits &&
- NewMask.lshr(InnerBits - InnerShAmt + ShAmt) == 0 &&
- NewMask.trunc(ShAmt) == 0) {
+ NewMask.getActiveBits() <= (InnerBits - InnerShAmt + ShAmt) &&
+ NewMask.countTrailingZeros() >= ShAmt) {
SDValue NewSA =
TLO.DAG.getConstant(ShAmt - InnerShAmt, dl,
Op.getOperand(1).getValueType());
@@ -929,8 +929,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero = KnownZero.lshr(ShAmt);
- KnownOne = KnownOne.lshr(ShAmt);
+ KnownZero.lshrInPlace(ShAmt);
+ KnownOne.lshrInPlace(ShAmt);
KnownZero.setHighBits(ShAmt); // High bits known zero.
}
@@ -964,21 +964,21 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// demand the input sign bit.
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
if (HighBits.intersects(NewMask))
- InDemandedMask |= APInt::getSignBit(VT.getScalarSizeInBits());
+ InDemandedMask |= APInt::getSignMask(VT.getScalarSizeInBits());
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero = KnownZero.lshr(ShAmt);
- KnownOne = KnownOne.lshr(ShAmt);
+ KnownZero.lshrInPlace(ShAmt);
+ KnownOne.lshrInPlace(ShAmt);
// Handle the sign bit, adjusted to where it is now in the mask.
- APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
+ APInt SignMask = APInt::getSignMask(BitWidth).lshr(ShAmt);
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
- if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
+ if (KnownZero.intersects(SignMask) || (HighBits & ~NewMask) == HighBits) {
SDNodeFlags Flags;
Flags.setExact(cast<BinaryWithFlagsSDNode>(Op)->Flags.hasExact());
return TLO.CombineTo(Op,
@@ -996,7 +996,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
Op.getOperand(0), NewSA));
}
- if (KnownOne.intersects(SignBit))
+ if (KnownOne.intersects(SignMask))
// New bits are known one.
KnownOne |= HighBits;
}
@@ -1040,7 +1040,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
return TLO.CombineTo(Op, Op.getOperand(0));
APInt InSignBit =
- APInt::getSignBit(ExVT.getScalarSizeInBits()).zext(BitWidth);
+ APInt::getSignMask(ExVT.getScalarSizeInBits()).zext(BitWidth);
APInt InputDemandedBits =
APInt::getLowBitsSet(BitWidth,
ExVT.getScalarSizeInBits()) &
@@ -1205,20 +1205,23 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
getShiftAmountTy(Op.getValueType(), DL));
}
- APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
- OperandBitWidth - BitWidth);
- HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
-
- if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
- // None of the shifted in bits are needed. Add a truncate of the
- // shift input, then shift it.
- SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
- Op.getValueType(),
- In.getOperand(0));
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
- Op.getValueType(),
- NewTrunc,
- Shift));
+ if (ShAmt->getZExtValue() < BitWidth) {
+ APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
+ OperandBitWidth - BitWidth);
+ HighBits.lshrInPlace(ShAmt->getZExtValue());
+ HighBits = HighBits.trunc(BitWidth);
+
+ if (!(HighBits & NewMask)) {
+ // None of the shifted in bits are needed. Add a truncate of the
+ // shift input, then shift it.
+ SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
+ Op.getValueType(),
+ In.getOperand(0));
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
+ Op.getValueType(),
+ NewTrunc,
+ Shift));
+ }
}
break;
}
@@ -1247,7 +1250,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (!TLO.LegalOperations() &&
!Op.getValueType().isVector() &&
!Op.getOperand(0).getValueType().isVector() &&
- NewMask == APInt::getSignBit(Op.getValueSizeInBits()) &&
+ NewMask == APInt::getSignMask(Op.getValueSizeInBits()) &&
Op.getOperand(0).getValueType().isFloatingPoint()) {
bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32);
@@ -2055,7 +2058,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
} else {
ShiftBits = C1.countTrailingZeros();
}
- NewC = NewC.lshr(ShiftBits);
+ NewC.lshrInPlace(ShiftBits);
if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
isLegalICmpImmediate(NewC.getSExtValue())) {
auto &DL = DAG.getDataLayout();
@@ -3353,7 +3356,7 @@ bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT);
SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT);
SDValue Bias = DAG.getConstant(127, dl, IntVT);
- SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()), dl,
+ SDValue SignMask = DAG.getConstant(APInt::getSignMask(VT.getSizeInBits()), dl,
IntVT);
SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, dl, IntVT);
SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT);
diff --git a/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
index cbce2dc89deb..bbb19b5e998d 100644
--- a/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -579,7 +579,7 @@ DWARFContext::getInliningInfoForAddress(uint64_t Address,
return InliningInfo;
}
- uint32_t CallFile = 0, CallLine = 0, CallColumn = 0;
+ uint32_t CallFile = 0, CallLine = 0, CallColumn = 0, CallDiscriminator = 0;
for (uint32_t i = 0, n = InlinedChain.size(); i != n; i++) {
DWARFDie &FunctionDIE = InlinedChain[i];
DILineInfo Frame;
@@ -605,10 +605,12 @@ DWARFContext::getInliningInfoForAddress(uint64_t Address,
Spec.FLIKind, Frame.FileName);
Frame.Line = CallLine;
Frame.Column = CallColumn;
+ Frame.Discriminator = CallDiscriminator;
}
// Get call file/line/column of a current DIE.
if (i + 1 < n) {
- FunctionDIE.getCallerFrame(CallFile, CallLine, CallColumn);
+ FunctionDIE.getCallerFrame(CallFile, CallLine, CallColumn,
+ CallDiscriminator);
}
}
InliningInfo.addFrame(Frame);
diff --git a/contrib/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp b/contrib/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp
index 4308cc2e2639..24039eb35209 100644
--- a/contrib/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp
@@ -290,10 +290,12 @@ uint64_t DWARFDie::getDeclLine() const {
}
void DWARFDie::getCallerFrame(uint32_t &CallFile, uint32_t &CallLine,
- uint32_t &CallColumn) const {
+ uint32_t &CallColumn,
+ uint32_t &CallDiscriminator) const {
CallFile = toUnsigned(find(DW_AT_call_file), 0);
CallLine = toUnsigned(find(DW_AT_call_line), 0);
CallColumn = toUnsigned(find(DW_AT_call_column), 0);
+ CallDiscriminator = toUnsigned(find(DW_AT_GNU_discriminator), 0);
}
void DWARFDie::dump(raw_ostream &OS, unsigned RecurseDepth,
@@ -350,32 +352,6 @@ void DWARFDie::dump(raw_ostream &OS, unsigned RecurseDepth,
}
}
-void DWARFDie::getInlinedChainForAddress(
- const uint64_t Address, SmallVectorImpl<DWARFDie> &InlinedChain) const {
- if (isNULL())
- return;
- DWARFDie DIE(*this);
- while (DIE) {
- // Append current DIE to inlined chain only if it has correct tag
- // (e.g. it is not a lexical block).
- if (DIE.isSubroutineDIE())
- InlinedChain.push_back(DIE);
-
- // Try to get child which also contains provided address.
- DWARFDie Child = DIE.getFirstChild();
- while (Child) {
- if (Child.addressRangeContainsAddress(Address)) {
- // Assume there is only one such child.
- break;
- }
- Child = Child.getSibling();
- }
- DIE = Child;
- }
- // Reverse the obtained chain to make the root of inlined chain last.
- std::reverse(InlinedChain.begin(), InlinedChain.end());
-}
-
DWARFDie DWARFDie::getParent() const {
if (isValid())
return U->getParent(Die);
diff --git a/contrib/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp b/contrib/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
index 4ee8e8f46d2e..c3f467745402 100644
--- a/contrib/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARF/DWARFUnit.cpp
@@ -343,37 +343,63 @@ void DWARFUnit::collectAddressRanges(DWARFAddressRangesVector &CURanges) {
clearDIEs(true);
}
-DWARFDie
-DWARFUnit::getSubprogramForAddress(uint64_t Address) {
- extractDIEsIfNeeded(false);
- for (const DWARFDebugInfoEntry &D : DieArray) {
- DWARFDie DIE(this, &D);
- if (DIE.isSubprogramDIE() &&
- DIE.addressRangeContainsAddress(Address)) {
- return DIE;
+void DWARFUnit::updateAddressDieMap(DWARFDie Die) {
+ if (Die.isSubroutineDIE()) {
+ for (const auto &R : Die.getAddressRanges()) {
+ // Ignore 0-sized ranges.
+ if (R.first == R.second)
+ continue;
+ auto B = AddrDieMap.upper_bound(R.first);
+ if (B != AddrDieMap.begin() && R.first < (--B)->second.first) {
+ // The range is a sub-range of existing ranges, we need to split the
+ // existing range.
+ if (R.second < B->second.first)
+ AddrDieMap[R.second] = B->second;
+ if (R.first > B->first)
+ AddrDieMap[B->first].first = R.first;
+ }
+ AddrDieMap[R.first] = std::make_pair(R.second, Die);
}
}
- return DWARFDie();
+ // Parent DIEs are added to the AddrDieMap prior to the Children DIEs to
+ // simplify the logic to update AddrDieMap. The child's range will always
+ // be equal or smaller than the parent's range. With this assumption, when
+ // adding one range into the map, it will at most split a range into 3
+ // sub-ranges.
+ for (DWARFDie Child = Die.getFirstChild(); Child; Child = Child.getSibling())
+ updateAddressDieMap(Child);
+}
+
+DWARFDie DWARFUnit::getSubroutineForAddress(uint64_t Address) {
+ extractDIEsIfNeeded(false);
+ if (AddrDieMap.empty())
+ updateAddressDieMap(getUnitDIE());
+ auto R = AddrDieMap.upper_bound(Address);
+ if (R == AddrDieMap.begin())
+ return DWARFDie();
+ // upper_bound's previous item contains Address.
+ --R;
+ if (Address >= R->second.first)
+ return DWARFDie();
+ return R->second.second;
}
void
DWARFUnit::getInlinedChainForAddress(uint64_t Address,
SmallVectorImpl<DWARFDie> &InlinedChain) {
- // First, find a subprogram that contains the given address (the root
- // of inlined chain).
- DWARFDie SubprogramDIE;
+ assert(InlinedChain.empty());
// Try to look for subprogram DIEs in the DWO file.
parseDWO();
- if (DWO)
- SubprogramDIE = DWO->getUnit()->getSubprogramForAddress(Address);
- else
- SubprogramDIE = getSubprogramForAddress(Address);
-
- // Get inlined chain rooted at this subprogram DIE.
- if (SubprogramDIE)
- SubprogramDIE.getInlinedChainForAddress(Address, InlinedChain);
- else
- InlinedChain.clear();
+ // First, find the subroutine that contains the given address (the leaf
+ // of inlined chain).
+ DWARFDie SubroutineDIE =
+ (DWO ? DWO->getUnit() : this)->getSubroutineForAddress(Address);
+
+ while (SubroutineDIE) {
+ if (SubroutineDIE.isSubroutineDIE())
+ InlinedChain.push_back(SubroutineDIE);
+ SubroutineDIE = SubroutineDIE.getParent();
+ }
}
const DWARFUnitIndex &llvm::getDWARFUnitIndex(DWARFContext &Context,
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index e29e9fc2c702..10b4e98b6079 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1580,7 +1580,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
GenericValue Elt;
Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
- Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
+ Elt.IntVal.lshrInPlace(ShiftAmt);
// it could be DstBitSize == SrcBitSize, so check it
if (DstBitSize < SrcBitSize)
Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
diff --git a/contrib/llvm/lib/IR/Attributes.cpp b/contrib/llvm/lib/IR/Attributes.cpp
index 2b7359dab807..d690111ef210 100644
--- a/contrib/llvm/lib/IR/Attributes.cpp
+++ b/contrib/llvm/lib/IR/Attributes.cpp
@@ -984,20 +984,23 @@ AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
}
AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
- AttributeSet AS) const {
- if (!AS.hasAttributes())
+ const AttrBuilder &B) const {
+ if (!B.hasAttributes())
return *this;
+ if (!pImpl)
+ return AttributeList::get(C, {{Index, AttributeSet::get(C, B)}});
+
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment. For now, say
// we can't change a known alignment.
unsigned OldAlign = getParamAlignment(Index);
- unsigned NewAlign = AS.getAlignment();
+ unsigned NewAlign = B.getAlignment();
assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
"Attempt to change alignment!");
#endif
- SmallVector<std::pair<unsigned, AttributeSet>, 4> AttrSet;
+ SmallVector<IndexAttrPair, 4> AttrVec;
uint64_t NumAttrs = pImpl->getNumSlots();
unsigned I;
@@ -1005,31 +1008,25 @@ AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
for (I = 0; I < NumAttrs; ++I) {
if (getSlotIndex(I) >= Index)
break;
- AttrSet.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I));
+ AttrVec.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I));
}
+ AttrBuilder NewAttrs;
if (I < NumAttrs && getSlotIndex(I) == Index) {
- // We need to merge two AttributeSets.
- AttributeSet Merged = AttributeSet::get(
- C, AttrBuilder(pImpl->getSlotNode(I)).merge(AttrBuilder(AS)));
- AttrSet.emplace_back(Index, Merged);
+ // We need to merge the attribute sets.
+ NewAttrs.merge(pImpl->getSlotNode(I));
++I;
- } else {
- // Otherwise, there were no attributes at this position in the original
- // list. Add the set as is.
- AttrSet.emplace_back(Index, AS);
}
+ NewAttrs.merge(B);
+
+ // Add the new or merged attribute set at this index.
+ AttrVec.emplace_back(Index, AttributeSet::get(C, NewAttrs));
// Add the remaining entries.
for (; I < NumAttrs; ++I)
- AttrSet.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I));
-
- return get(C, AttrSet);
-}
+ AttrVec.emplace_back(getSlotIndex(I), pImpl->getSlotNode(I));
-AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
- const AttrBuilder &B) const {
- return get(C, Index, AttributeSet::get(C, B));
+ return get(C, AttrVec);
}
AttributeList AttributeList::removeAttribute(LLVMContext &C, unsigned Index,
@@ -1046,46 +1043,7 @@ AttributeList AttributeList::removeAttribute(LLVMContext &C, unsigned Index,
AttributeList AttributeList::removeAttributes(LLVMContext &C, unsigned Index,
AttributeList Attrs) const {
- if (!pImpl)
- return AttributeList();
- if (!Attrs.pImpl) return *this;
-
- // FIXME it is not obvious how this should work for alignment.
- // For now, say we can't pass in alignment, which no current use does.
- assert(!Attrs.hasAttribute(Index, Attribute::Alignment) &&
- "Attempt to change alignment!");
-
- // Add the attribute slots before the one we're trying to add.
- SmallVector<AttributeList, 4> AttrSet;
- uint64_t NumAttrs = pImpl->getNumSlots();
- AttributeList AL;
- uint64_t LastIndex = 0;
- for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
- if (getSlotIndex(I) >= Index) {
- if (getSlotIndex(I) == Index) AL = getSlotAttributes(LastIndex++);
- break;
- }
- LastIndex = I + 1;
- AttrSet.push_back(getSlotAttributes(I));
- }
-
- // Now remove the attribute from the correct slot. There may already be an
- // AttributeList there.
- AttrBuilder B(AL, Index);
-
- for (unsigned I = 0, E = Attrs.pImpl->getNumSlots(); I != E; ++I)
- if (Attrs.getSlotIndex(I) == Index) {
- B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Index);
- break;
- }
-
- AttrSet.push_back(AttributeList::get(C, Index, B));
-
- // Add the remaining attribute slots.
- for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
- AttrSet.push_back(getSlotAttributes(I));
-
- return get(C, AttrSet);
+ return removeAttributes(C, Index, AttrBuilder(Attrs.getAttributes(Index)));
}
AttributeList AttributeList::removeAttributes(LLVMContext &C, unsigned Index,
@@ -1098,31 +1056,30 @@ AttributeList AttributeList::removeAttributes(LLVMContext &C, unsigned Index,
assert(!Attrs.hasAlignmentAttr() && "Attempt to change alignment!");
// Add the attribute slots before the one we're trying to add.
- SmallVector<AttributeList, 4> AttrSet;
+ SmallVector<IndexAttrPair, 4> AttrSets;
uint64_t NumAttrs = pImpl->getNumSlots();
- AttributeList AL;
+ AttrBuilder B;
uint64_t LastIndex = 0;
for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
if (getSlotIndex(I) >= Index) {
- if (getSlotIndex(I) == Index) AL = getSlotAttributes(LastIndex++);
+ if (getSlotIndex(I) == Index)
+ B = AttrBuilder(pImpl->getSlotNode(LastIndex++));
break;
}
LastIndex = I + 1;
- AttrSet.push_back(getSlotAttributes(I));
+ AttrSets.push_back({getSlotIndex(I), pImpl->getSlotNode(I)});
}
- // Now remove the attribute from the correct slot. There may already be an
- // AttributeList there.
- AttrBuilder B(AL, Index);
+ // Remove the attributes from the existing set and add them.
B.remove(Attrs);
-
- AttrSet.push_back(AttributeList::get(C, Index, B));
+ if (B.hasAttributes())
+ AttrSets.push_back({Index, AttributeSet::get(C, B)});
// Add the remaining attribute slots.
for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
- AttrSet.push_back(getSlotAttributes(I));
+ AttrSets.push_back({getSlotIndex(I), pImpl->getSlotNode(I)});
- return get(C, AttrSet);
+ return get(C, AttrSets);
}
AttributeList AttributeList::removeAttributes(LLVMContext &C,
@@ -1406,18 +1363,7 @@ AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
}
AttrBuilder &AttrBuilder::removeAttributes(AttributeList A, uint64_t Index) {
- unsigned Slot = ~0U;
- for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
- if (A.getSlotIndex(I) == Index) {
- Slot = I;
- break;
- }
-
- assert(Slot != ~0U && "Couldn't find index in AttributeList!");
-
- for (AttributeList::iterator I = A.begin(Slot), E = A.end(Slot); I != E;
- ++I) {
- Attribute Attr = *I;
+ for (Attribute Attr : A.getAttributes(Index)) {
if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
removeAttribute(Attr.getKindAsEnum());
} else {
diff --git a/contrib/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm/lib/IR/ConstantFold.cpp
index bba230677ebf..80b117015ede 100644
--- a/contrib/llvm/lib/IR/ConstantFold.cpp
+++ b/contrib/llvm/lib/IR/ConstantFold.cpp
@@ -223,7 +223,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
APInt V = CI->getValue();
if (ByteStart)
- V = V.lshr(ByteStart*8);
+ V.lshrInPlace(ByteStart*8);
V = V.trunc(ByteSize*8);
return ConstantInt::get(CI->getContext(), V);
}
diff --git a/contrib/llvm/lib/IR/ConstantRange.cpp b/contrib/llvm/lib/IR/ConstantRange.cpp
index 8dfd6c8036c4..0cc38b025209 100644
--- a/contrib/llvm/lib/IR/ConstantRange.cpp
+++ b/contrib/llvm/lib/IR/ConstantRange.cpp
@@ -29,8 +29,6 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-/// Initialize a full (the default) or empty set for the specified type.
-///
ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) {
if (Full)
Lower = Upper = APInt::getMaxValue(BitWidth);
@@ -38,8 +36,6 @@ ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) {
Lower = Upper = APInt::getMinValue(BitWidth);
}
-/// Initialize a range to hold the single specified value.
-///
ConstantRange::ConstantRange(APInt V)
: Lower(std::move(V)), Upper(Lower + 1) {}
@@ -232,35 +228,23 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
return Result;
}
-/// isFullSet - Return true if this set contains all of the elements possible
-/// for this data-type
bool ConstantRange::isFullSet() const {
return Lower == Upper && Lower.isMaxValue();
}
-/// isEmptySet - Return true if this set contains no members.
-///
bool ConstantRange::isEmptySet() const {
return Lower == Upper && Lower.isMinValue();
}
-/// isWrappedSet - Return true if this set wraps around the top of the range,
-/// for example: [100, 8)
-///
bool ConstantRange::isWrappedSet() const {
return Lower.ugt(Upper);
}
-/// isSignWrappedSet - Return true if this set wraps around the INT_MIN of
-/// its bitwidth, for example: i8 [120, 140).
-///
bool ConstantRange::isSignWrappedSet() const {
return contains(APInt::getSignedMaxValue(getBitWidth())) &&
contains(APInt::getSignedMinValue(getBitWidth()));
}
-/// getSetSize - Return the number of elements in this set.
-///
APInt ConstantRange::getSetSize() const {
if (isFullSet()) {
APInt Size(getBitWidth()+1, 0);
@@ -272,12 +256,6 @@ APInt ConstantRange::getSetSize() const {
return (Upper - Lower).zext(getBitWidth()+1);
}
-/// isSizeStrictlySmallerThanOf - Compare set size of this range with the range
-/// CR.
-/// This function is faster than comparing results of getSetSize for the two
-/// ranges, because we don't need to extend bitwidth of APInts we're operating
-/// with.
-///
bool
ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const {
assert(getBitWidth() == Other.getBitWidth());
@@ -288,58 +266,44 @@ ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const {
return (Upper - Lower).ult(Other.Upper - Other.Lower);
}
-/// getUnsignedMax - Return the largest unsigned value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getUnsignedMax() const {
if (isFullSet() || isWrappedSet())
return APInt::getMaxValue(getBitWidth());
return getUpper() - 1;
}
-/// getUnsignedMin - Return the smallest unsigned value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getUnsignedMin() const {
if (isFullSet() || (isWrappedSet() && getUpper() != 0))
return APInt::getMinValue(getBitWidth());
return getLower();
}
-/// getSignedMax - Return the largest signed value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getSignedMax() const {
APInt SignedMax(APInt::getSignedMaxValue(getBitWidth()));
if (!isWrappedSet()) {
- if (getLower().sle(getUpper() - 1))
- return getUpper() - 1;
- return SignedMax;
+ APInt UpperMinusOne = getUpper() - 1;
+ if (getLower().sle(UpperMinusOne))
+ return UpperMinusOne;
+ return APInt::getSignedMaxValue(getBitWidth());
}
if (getLower().isNegative() == getUpper().isNegative())
- return SignedMax;
+ return APInt::getSignedMaxValue(getBitWidth());
return getUpper() - 1;
}
-/// getSignedMin - Return the smallest signed value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getSignedMin() const {
- APInt SignedMin(APInt::getSignedMinValue(getBitWidth()));
if (!isWrappedSet()) {
if (getLower().sle(getUpper() - 1))
return getLower();
- return SignedMin;
+ return APInt::getSignedMinValue(getBitWidth());
}
if ((getUpper() - 1).slt(getLower())) {
- if (getUpper() != SignedMin)
- return SignedMin;
+ if (!getUpper().isMinSignedValue())
+ return APInt::getSignedMinValue(getBitWidth());
}
return getLower();
}
-/// contains - Return true if the specified value is in the set.
-///
bool ConstantRange::contains(const APInt &V) const {
if (Lower == Upper)
return isFullSet();
@@ -349,10 +313,6 @@ bool ConstantRange::contains(const APInt &V) const {
return Lower.ule(V) || V.ult(Upper);
}
-/// contains - Return true if the argument is a subset of this range.
-/// Two equal sets contain each other. The empty set contained by all other
-/// sets.
-///
bool ConstantRange::contains(const ConstantRange &Other) const {
if (isFullSet() || Other.isEmptySet()) return true;
if (isEmptySet() || Other.isFullSet()) return false;
@@ -371,8 +331,6 @@ bool ConstantRange::contains(const ConstantRange &Other) const {
return Other.getUpper().ule(Upper) && Lower.ule(Other.getLower());
}
-/// subtract - Subtract the specified constant from the endpoints of this
-/// constant range.
ConstantRange ConstantRange::subtract(const APInt &Val) const {
assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width");
// If the set is empty or full, don't modify the endpoints.
@@ -381,17 +339,10 @@ ConstantRange ConstantRange::subtract(const APInt &Val) const {
return ConstantRange(Lower - Val, Upper - Val);
}
-/// \brief Subtract the specified range from this range (aka relative complement
-/// of the sets).
ConstantRange ConstantRange::difference(const ConstantRange &CR) const {
return intersectWith(CR.inverse());
}
-/// intersectWith - Return the range that results from the intersection of this
-/// range with another range. The resultant range is guaranteed to include all
-/// elements contained in both input ranges, and to have the smallest possible
-/// set size that does so. Because there may be two intersections with the
-/// same set size, A.intersectWith(B) might not be equal to B.intersectWith(A).
ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
assert(getBitWidth() == CR.getBitWidth() &&
"ConstantRange types don't agree!");
@@ -466,13 +417,6 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
return CR;
}
-
-/// unionWith - Return the range that results from the union of this range with
-/// another range. The resultant range is guaranteed to include the elements of
-/// both sets, but may contain more. For example, [3, 9) union [12,15) is
-/// [3, 15), which includes 9, 10, and 11, which were not included in either
-/// set before.
-///
ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
assert(getBitWidth() == CR.getBitWidth() &&
"ConstantRange types don't agree!");
@@ -593,10 +537,6 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
};
}
-/// zeroExtend - Return a new range in the specified integer type, which must
-/// be strictly larger than the current type. The returned range will
-/// correspond to the possible range of values as if the source range had been
-/// zero extended.
ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
@@ -613,10 +553,6 @@ ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize));
}
-/// signExtend - Return a new range in the specified integer type, which must
-/// be strictly larger than the current type. The returned range will
-/// correspond to the possible range of values as if the source range had been
-/// sign extended.
ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
@@ -635,10 +571,6 @@ ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
return ConstantRange(Lower.sext(DstTySize), Upper.sext(DstTySize));
}
-/// truncate - Return a new range in the specified integer type, which must be
-/// strictly smaller than the current type. The returned range will
-/// correspond to the possible range of values as if the source range had been
-/// truncated to the specified type.
ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
assert(getBitWidth() > DstTySize && "Not a value truncation");
if (isEmptySet())
@@ -690,8 +622,6 @@ ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
return ConstantRange(DstTySize, /*isFullSet=*/true);
}
-/// zextOrTrunc - make this range have the bit width given by \p DstTySize. The
-/// value is zero extended, truncated, or left alone to make it that width.
ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
if (SrcTySize > DstTySize)
@@ -701,8 +631,6 @@ ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
return *this;
}
-/// sextOrTrunc - make this range have the bit width given by \p DstTySize. The
-/// value is sign extended, truncated, or left alone to make it that width.
ConstantRange ConstantRange::sextOrTrunc(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
if (SrcTySize > DstTySize)
@@ -999,8 +927,6 @@ ConstantRange ConstantRange::inverse() const {
return ConstantRange(Upper, Lower);
}
-/// print - Print out the bounds to a stream...
-///
void ConstantRange::print(raw_ostream &OS) const {
if (isFullSet())
OS << "full-set";
@@ -1011,8 +937,6 @@ void ConstantRange::print(raw_ostream &OS) const {
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-/// dump - Allow printing from a debugger easily...
-///
LLVM_DUMP_METHOD void ConstantRange::dump() const {
print(dbgs());
}
diff --git a/contrib/llvm/lib/IR/Constants.cpp b/contrib/llvm/lib/IR/Constants.cpp
index c5f93c9f4db0..ffc8f2e4303b 100644
--- a/contrib/llvm/lib/IR/Constants.cpp
+++ b/contrib/llvm/lib/IR/Constants.cpp
@@ -518,27 +518,19 @@ ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
}
Constant *ConstantInt::getTrue(Type *Ty) {
- VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (!VTy) {
- assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1.");
- return ConstantInt::getTrue(Ty->getContext());
- }
- assert(VTy->getElementType()->isIntegerTy(1) &&
- "True must be vector of i1 or i1.");
- return ConstantVector::getSplat(VTy->getNumElements(),
- ConstantInt::getTrue(Ty->getContext()));
+ assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1.");
+ ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), TrueC);
+ return TrueC;
}
Constant *ConstantInt::getFalse(Type *Ty) {
- VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (!VTy) {
- assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1.");
- return ConstantInt::getFalse(Ty->getContext());
- }
- assert(VTy->getElementType()->isIntegerTy(1) &&
- "False must be vector of i1 or i1.");
- return ConstantVector::getSplat(VTy->getNumElements(),
- ConstantInt::getFalse(Ty->getContext()));
+ assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1.");
+ ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), FalseC);
+ return FalseC;
}
// Get a ConstantInt from an APInt.
diff --git a/contrib/llvm/lib/IR/Core.cpp b/contrib/llvm/lib/IR/Core.cpp
index b5ed30b85c8a..50292b6e20bf 100644
--- a/contrib/llvm/lib/IR/Core.cpp
+++ b/contrib/llvm/lib/IR/Core.cpp
@@ -863,6 +863,19 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) {
return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count);
}
+LLVMValueRef LLVMMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) {
+ return wrap(MetadataAsValue::get(*unwrap(C), unwrap(MD)));
+}
+
+LLVMMetadataRef LLVMValueAsMetadata(LLVMValueRef Val) {
+ auto *V = unwrap(Val);
+ if (auto *C = dyn_cast<Constant>(V))
+ return wrap(ConstantAsMetadata::get(C));
+ if (auto *MAV = dyn_cast<MetadataAsValue>(V))
+ return wrap(MAV->getMetadata());
+ return wrap(ValueAsMetadata::get(V));
+}
+
const char *LLVMGetMDString(LLVMValueRef V, unsigned *Length) {
if (const auto *MD = dyn_cast<MetadataAsValue>(unwrap(V)))
if (const MDString *S = dyn_cast<MDString>(MD->getMetadata())) {
@@ -1883,13 +1896,8 @@ void LLVMRemoveStringAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A,
const char *V) {
Function *Func = unwrap<Function>(Fn);
- AttributeList::AttrIndex Idx =
- AttributeList::AttrIndex(AttributeList::FunctionIndex);
- AttrBuilder B;
-
- B.addAttribute(A, V);
- AttributeList Set = AttributeList::get(Func->getContext(), Idx, B);
- Func->addAttributes(Idx, Set);
+ Attribute Attr = Attribute::get(Func->getContext(), A, V);
+ Func->addAttribute(AttributeList::FunctionIndex, Attr);
}
/*--.. Operations on parameters ............................................--*/
@@ -1949,9 +1957,7 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) {
Argument *A = unwrap<Argument>(Arg);
- AttrBuilder B;
- B.addAlignmentAttr(align);
- A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(Attribute::getWithAlignment(A->getContext(), align));
}
/*--.. Operations on basic blocks ..........................................--*/
@@ -2158,11 +2164,8 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned align) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
- AttrBuilder B;
- B.addAlignmentAttr(align);
- Call.setAttributes(Call.getAttributes().addAttributes(
- Call->getContext(), index,
- AttributeList::get(Call->getContext(), index, B)));
+ Attribute AlignAttr = Attribute::getWithAlignment(Call->getContext(), align);
+ Call.addAttribute(index, AlignAttr);
}
void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
diff --git a/contrib/llvm/lib/IR/DataLayout.cpp b/contrib/llvm/lib/IR/DataLayout.cpp
index 6f90ce598568..93bacdd2e80f 100644
--- a/contrib/llvm/lib/IR/DataLayout.cpp
+++ b/contrib/llvm/lib/IR/DataLayout.cpp
@@ -608,11 +608,8 @@ unsigned DataLayout::getPointerSize(unsigned AS) const {
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
assert(Ty->isPtrOrPtrVectorTy() &&
"This should only be called with a pointer or pointer vector type");
-
- if (Ty->isPointerTy())
- return getTypeSizeInBits(Ty);
-
- return getTypeSizeInBits(Ty->getScalarType());
+ Ty = Ty->getScalarType();
+ return getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace());
}
/*!
@@ -624,7 +621,7 @@ unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
== false) for the requested type \a Ty.
*/
unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
- int AlignType = -1;
+ AlignTypeEnum AlignType;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
@@ -673,8 +670,7 @@ unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
llvm_unreachable("Bad type for getAlignment!!!");
}
- return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
- abi_or_pref, Ty);
+ return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty), abi_or_pref, Ty);
}
unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
diff --git a/contrib/llvm/lib/IR/Function.cpp b/contrib/llvm/lib/IR/Function.cpp
index c4bb9e83acd7..e1f5fdea44e4 100644
--- a/contrib/llvm/lib/IR/Function.cpp
+++ b/contrib/llvm/lib/IR/Function.cpp
@@ -138,13 +138,18 @@ bool Argument::onlyReadsMemory() const {
Attrs.hasParamAttribute(getArgNo(), Attribute::ReadNone);
}
-void Argument::addAttr(AttributeList AS) {
- assert(AS.getNumSlots() <= 1 &&
- "Trying to add more than one attribute set to an argument!");
- AttrBuilder B(AS, AS.getSlotIndex(0));
- getParent()->addAttributes(
- getArgNo() + 1,
- AttributeList::get(Parent->getContext(), getArgNo() + 1, B));
+void Argument::addAttrs(AttrBuilder &B) {
+ AttributeList AL = getParent()->getAttributes();
+ AL = AL.addAttributes(Parent->getContext(), getArgNo() + 1, B);
+ getParent()->setAttributes(AL);
+}
+
+void Argument::addAttr(Attribute::AttrKind Kind) {
+ getParent()->addAttribute(getArgNo() + 1, Kind);
+}
+
+void Argument::addAttr(Attribute Attr) {
+ getParent()->addAttribute(getArgNo() + 1, Attr);
}
void Argument::removeAttr(AttributeList AS) {
@@ -156,6 +161,10 @@ void Argument::removeAttr(AttributeList AS) {
AttributeList::get(Parent->getContext(), getArgNo() + 1, B));
}
+void Argument::removeAttr(Attribute::AttrKind Kind) {
+ getParent()->removeAttribute(getArgNo() + 1, Kind);
+}
+
bool Argument::hasAttribute(Attribute::AttrKind Kind) const {
return getParent()->hasParamAttribute(getArgNo(), Kind);
}
diff --git a/contrib/llvm/lib/IR/Instructions.cpp b/contrib/llvm/lib/IR/Instructions.cpp
index c10c144122e2..76582e334d1f 100644
--- a/contrib/llvm/lib/IR/Instructions.cpp
+++ b/contrib/llvm/lib/IR/Instructions.cpp
@@ -1855,7 +1855,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
return false;
// Mask must be vector of i32.
- VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
+ auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
return false;
@@ -1863,10 +1863,10 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
return true;
- if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
+ if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (Value *Op : MV->operands()) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ if (auto *CI = dyn_cast<ConstantInt>(Op)) {
if (CI->uge(V1Size*2))
return false;
} else if (!isa<UndefValue>(Op)) {
@@ -1876,8 +1876,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
return true;
}
- if (const ConstantDataSequential *CDS =
- dyn_cast<ConstantDataSequential>(Mask)) {
+ if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
if (CDS->getElementAsInteger(i) >= V1Size*2)
@@ -1889,7 +1888,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
// used as the shuffle mask. When this occurs, the shuffle mask will
// fall into this case and fail. To avoid this error, do this bit of
// ugliness to allow such a mask pass.
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask))
+ if (const auto *CE = dyn_cast<ConstantExpr>(Mask))
if (CE->getOpcode() == Instruction::UserOp1)
return true;
@@ -1898,7 +1897,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) {
assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
- if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask))
+ if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask))
return CDS->getElementAsInteger(i);
Constant *C = Mask->getAggregateElement(i);
if (isa<UndefValue>(C))
@@ -1910,7 +1909,7 @@ void ShuffleVectorInst::getShuffleMask(Constant *Mask,
SmallVectorImpl<int> &Result) {
unsigned NumElts = Mask->getType()->getVectorNumElements();
- if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) {
+ if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
for (unsigned i = 0; i != NumElts; ++i)
Result.push_back(CDS->getElementAsInteger(i));
return;
diff --git a/contrib/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm/lib/MC/MCDwarf.cpp
index cc32e90ad36e..1a320b0165fa 100644
--- a/contrib/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm/lib/MC/MCDwarf.cpp
@@ -168,7 +168,7 @@ EmitDwarfLineTable(MCObjectStreamer *MCOS, MCSection *Section,
// and the current Label.
const MCAsmInfo *asmInfo = MCOS->getContext().getAsmInfo();
MCOS->EmitDwarfAdvanceLineAddr(LineDelta, LastLabel, Label,
- asmInfo->getPointerSize());
+ asmInfo->getCodePointerSize());
Discriminator = 0;
LastLine = LineEntry.getLine();
@@ -188,7 +188,7 @@ EmitDwarfLineTable(MCObjectStreamer *MCOS, MCSection *Section,
const MCAsmInfo *AsmInfo = Ctx.getAsmInfo();
MCOS->EmitDwarfAdvanceLineAddr(INT64_MAX, LastLabel, SectionEnd,
- AsmInfo->getPointerSize());
+ AsmInfo->getCodePointerSize());
}
//
@@ -594,7 +594,7 @@ static void EmitGenDwarfAranges(MCStreamer *MCOS,
// Figure the padding after the header before the table of address and size
// pairs who's values are PointerSize'ed.
const MCAsmInfo *asmInfo = context.getAsmInfo();
- int AddrSize = asmInfo->getPointerSize();
+ int AddrSize = asmInfo->getCodePointerSize();
int Pad = 2 * AddrSize - (Length & (2 * AddrSize - 1));
if (Pad == 2 * AddrSize)
Pad = 0;
@@ -677,7 +677,7 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
// The DWARF v5 header has unit type, address size, abbrev offset.
// Earlier versions have abbrev offset, address size.
const MCAsmInfo &AsmInfo = *context.getAsmInfo();
- int AddrSize = AsmInfo.getPointerSize();
+ int AddrSize = AsmInfo.getCodePointerSize();
if (context.getDwarfVersion() >= 5) {
MCOS->EmitIntValue(dwarf::DW_UT_compile, 1);
MCOS->EmitIntValue(AddrSize, 1);
@@ -823,7 +823,7 @@ static void EmitGenDwarfRanges(MCStreamer *MCOS) {
auto &Sections = context.getGenDwarfSectionSyms();
const MCAsmInfo *AsmInfo = context.getAsmInfo();
- int AddrSize = AsmInfo->getPointerSize();
+ int AddrSize = AsmInfo->getCodePointerSize();
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfRangesSection());
@@ -981,7 +981,7 @@ static unsigned getSizeForEncoding(MCStreamer &streamer,
default: llvm_unreachable("Unknown Encoding");
case dwarf::DW_EH_PE_absptr:
case dwarf::DW_EH_PE_signed:
- return context.getAsmInfo()->getPointerSize();
+ return context.getAsmInfo()->getCodePointerSize();
case dwarf::DW_EH_PE_udata2:
case dwarf::DW_EH_PE_sdata2:
return 2;
@@ -1318,7 +1318,7 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(const MCSymbol *personality,
if (CIEVersion >= 4) {
// Address Size
- Streamer.EmitIntValue(context.getAsmInfo()->getPointerSize(), 1);
+ Streamer.EmitIntValue(context.getAsmInfo()->getCodePointerSize(), 1);
// Segment Descriptor Size
Streamer.EmitIntValue(0, 1);
@@ -1384,7 +1384,7 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(const MCSymbol *personality,
InitialCFAOffset = CFAOffset;
// Padding
- Streamer.EmitValueToAlignment(IsEH ? 4 : MAI->getPointerSize());
+ Streamer.EmitValueToAlignment(IsEH ? 4 : MAI->getCodePointerSize());
Streamer.EmitLabel(sectionEnd);
return *sectionStart;
@@ -1453,7 +1453,7 @@ void FrameEmitterImpl::EmitFDE(const MCSymbol &cieStart,
// The size of a .eh_frame section has to be a multiple of the alignment
// since a null CIE is interpreted as the end. Old systems overaligned
// .eh_frame, so we do too and account for it in the last FDE.
- unsigned Align = LastInSection ? asmInfo->getPointerSize() : PCSize;
+ unsigned Align = LastInSection ? asmInfo->getCodePointerSize() : PCSize;
Streamer.EmitValueToAlignment(Align);
Streamer.EmitLabel(fdeEnd);
@@ -1514,6 +1514,7 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
MCContext &Context = Streamer.getContext();
const MCObjectFileInfo *MOFI = Context.getObjectFileInfo();
+ const MCAsmInfo *AsmInfo = Context.getAsmInfo();
FrameEmitterImpl Emitter(IsEH, Streamer);
ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getDwarfFrameInfos();
@@ -1525,7 +1526,7 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
if (Frame.CompactUnwindEncoding == 0) continue;
if (!SectionEmitted) {
Streamer.SwitchSection(MOFI->getCompactUnwindSection());
- Streamer.EmitValueToAlignment(Context.getAsmInfo()->getPointerSize());
+ Streamer.EmitValueToAlignment(AsmInfo->getCodePointerSize());
SectionEmitted = true;
}
NeedsEHFrameSection |=
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index e65ce9f0b936..42e8ad340281 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -1755,8 +1755,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
case DK_8BYTE:
return parseDirectiveValue(IDVal, 8);
case DK_DC_A:
- return parseDirectiveValue(IDVal,
- getContext().getAsmInfo()->getPointerSize());
+ return parseDirectiveValue(
+ IDVal, getContext().getAsmInfo()->getCodePointerSize());
case DK_OCTA:
return parseDirectiveOctaValue(IDVal);
case DK_SINGLE:
diff --git a/contrib/llvm/lib/Object/Archive.cpp b/contrib/llvm/lib/Object/Archive.cpp
index f2021f796d12..c4924f85a907 100644
--- a/contrib/llvm/lib/Object/Archive.cpp
+++ b/contrib/llvm/lib/Object/Archive.cpp
@@ -1,4 +1,4 @@
-//===- Archive.cpp - ar File Format implementation --------------*- C++ -*-===//
+//===- Archive.cpp - ar File Format implementation ------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,12 +11,29 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Object/Archive.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Support/Chrono.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <string>
+#include <system_error>
using namespace llvm;
using namespace object;
@@ -25,7 +42,7 @@ using namespace llvm::support::endian;
static const char *const Magic = "!<arch>\n";
static const char *const ThinMagic = "!<thin>\n";
-void Archive::anchor() { }
+void Archive::anchor() {}
static Error
malformedError(Twine Msg) {
@@ -61,8 +78,8 @@ ArchiveMemberHeader::ArchiveMemberHeader(const Archive *Parent,
if (Err) {
std::string Buf;
raw_string_ostream OS(Buf);
- OS.write_escaped(llvm::StringRef(ArMemHdr->Terminator,
- sizeof(ArMemHdr->Terminator)));
+ OS.write_escaped(StringRef(ArMemHdr->Terminator,
+ sizeof(ArMemHdr->Terminator)));
OS.flush();
std::string Msg("terminator characters in archive member \"" + Buf +
"\" not the correct \"`\\n\" values for the archive "
@@ -97,13 +114,13 @@ Expected<StringRef> ArchiveMemberHeader::getRawName() const {
EndCond = ' ';
else
EndCond = '/';
- llvm::StringRef::size_type end =
- llvm::StringRef(ArMemHdr->Name, sizeof(ArMemHdr->Name)).find(EndCond);
- if (end == llvm::StringRef::npos)
+ StringRef::size_type end =
+ StringRef(ArMemHdr->Name, sizeof(ArMemHdr->Name)).find(EndCond);
+ if (end == StringRef::npos)
end = sizeof(ArMemHdr->Name);
assert(end <= sizeof(ArMemHdr->Name) && end > 0);
// Don't include the EndCond if there is one.
- return llvm::StringRef(ArMemHdr->Name, end);
+ return StringRef(ArMemHdr->Name, end);
}
// This gets the name looking up long names. Size is the size of the archive
@@ -205,12 +222,12 @@ Expected<StringRef> ArchiveMemberHeader::getName(uint64_t Size) const {
Expected<uint32_t> ArchiveMemberHeader::getSize() const {
uint32_t Ret;
- if (llvm::StringRef(ArMemHdr->Size,
- sizeof(ArMemHdr->Size)).rtrim(" ").getAsInteger(10, Ret)) {
+ if (StringRef(ArMemHdr->Size,
+ sizeof(ArMemHdr->Size)).rtrim(" ").getAsInteger(10, Ret)) {
std::string Buf;
raw_string_ostream OS(Buf);
- OS.write_escaped(llvm::StringRef(ArMemHdr->Size,
- sizeof(ArMemHdr->Size)).rtrim(" "));
+ OS.write_escaped(StringRef(ArMemHdr->Size,
+ sizeof(ArMemHdr->Size)).rtrim(" "));
OS.flush();
uint64_t Offset = reinterpret_cast<const char *>(ArMemHdr) -
Parent->getData().data();
@@ -227,8 +244,8 @@ Expected<sys::fs::perms> ArchiveMemberHeader::getAccessMode() const {
sizeof(ArMemHdr->AccessMode)).rtrim(' ').getAsInteger(8, Ret)) {
std::string Buf;
raw_string_ostream OS(Buf);
- OS.write_escaped(llvm::StringRef(ArMemHdr->AccessMode,
- sizeof(ArMemHdr->AccessMode)).rtrim(" "));
+ OS.write_escaped(StringRef(ArMemHdr->AccessMode,
+ sizeof(ArMemHdr->AccessMode)).rtrim(" "));
OS.flush();
uint64_t Offset = reinterpret_cast<const char *>(ArMemHdr) -
Parent->getData().data();
@@ -247,8 +264,8 @@ ArchiveMemberHeader::getLastModified() const {
.getAsInteger(10, Seconds)) {
std::string Buf;
raw_string_ostream OS(Buf);
- OS.write_escaped(llvm::StringRef(ArMemHdr->LastModified,
- sizeof(ArMemHdr->LastModified)).rtrim(" "));
+ OS.write_escaped(StringRef(ArMemHdr->LastModified,
+ sizeof(ArMemHdr->LastModified)).rtrim(" "));
OS.flush();
uint64_t Offset = reinterpret_cast<const char *>(ArMemHdr) -
Parent->getData().data();
diff --git a/contrib/llvm/lib/Object/Binary.cpp b/contrib/llvm/lib/Object/Binary.cpp
index 8467d349cd95..2b44c4a82d2c 100644
--- a/contrib/llvm/lib/Object/Binary.cpp
+++ b/contrib/llvm/lib/Object/Binary.cpp
@@ -1,4 +1,4 @@
-//===- Binary.cpp - A generic binary file -----------------------*- C++ -*-===//
+//===- Binary.cpp - A generic binary file ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,21 +11,25 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Object/Binary.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
-
-// Include headers for createBinary.
#include "llvm/Object/Archive.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/Error.h"
#include "llvm/Object/MachOUniversal.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <memory>
+#include <system_error>
using namespace llvm;
using namespace object;
-Binary::~Binary() {}
+Binary::~Binary() = default;
Binary::Binary(unsigned int Type, MemoryBufferRef Source)
: TypeID(Type), Data(Source) {}
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
index a2d8f12449e6..1866aba9b21a 100644
--- a/contrib/llvm/lib/Object/COFFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -1,4 +1,4 @@
-//===- COFFObjectFile.cpp - COFF object file implementation -----*- C++ -*-===//
+//===- COFFObjectFile.cpp - COFF object file implementation ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,16 +11,28 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Object/COFF.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/COFF.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cctype>
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
#include <limits>
+#include <memory>
+#include <system_error>
using namespace llvm;
using namespace object;
@@ -116,7 +128,7 @@ const coff_symbol_type *COFFObjectFile::toSymb(DataRefImpl Ref) const {
const coff_section *COFFObjectFile::toSec(DataRefImpl Ref) const {
const coff_section *Addr = reinterpret_cast<const coff_section*>(Ref.p);
-# ifndef NDEBUG
+#ifndef NDEBUG
// Verify that the section points to a valid entry in the section table.
if (Addr < SectionTable || Addr >= (SectionTable + getNumberOfSections()))
report_fatal_error("Section was outside of section table.");
@@ -124,7 +136,7 @@ const coff_section *COFFObjectFile::toSec(DataRefImpl Ref) const {
uintptr_t Offset = uintptr_t(Addr) - uintptr_t(SectionTable);
assert(Offset % sizeof(coff_section) == 0 &&
"Section did not point to the beginning of a section");
-# endif
+#endif
return Addr;
}
@@ -985,7 +997,7 @@ COFFObjectFile::getSymbolAuxData(COFFSymbolRef Symbol) const {
if (Symbol.getNumberOfAuxSymbols() > 0) {
// AUX data comes immediately after the symbol in COFF
Aux = reinterpret_cast<const uint8_t *>(Symbol.getRawPtr()) + SymbolSize;
-# ifndef NDEBUG
+#ifndef NDEBUG
// Verify that the Aux symbol points to a valid entry in the symbol table.
uintptr_t Offset = uintptr_t(Aux) - uintptr_t(base());
if (Offset < getPointerToSymbolTable() ||
@@ -995,7 +1007,7 @@ COFFObjectFile::getSymbolAuxData(COFFSymbolRef Symbol) const {
assert((Offset - getPointerToSymbolTable()) % SymbolSize == 0 &&
"Aux Symbol data did not point to the beginning of a symbol");
-# endif
+#endif
}
return makeArrayRef(Aux, Symbol.getNumberOfAuxSymbols() * SymbolSize);
}
diff --git a/contrib/llvm/lib/Object/IRSymtab.cpp b/contrib/llvm/lib/Object/IRSymtab.cpp
index da1ef9946b50..bb3d1b2cf695 100644
--- a/contrib/llvm/lib/Object/IRSymtab.cpp
+++ b/contrib/llvm/lib/Object/IRSymtab.cpp
@@ -28,14 +28,12 @@ struct Builder {
Builder(SmallVector<char, 0> &Symtab, SmallVector<char, 0> &Strtab)
: Symtab(Symtab), Strtab(Strtab) {}
- StringTableBuilder StrtabBuilder{StringTableBuilder::ELF};
+ StringTableBuilder StrtabBuilder{StringTableBuilder::RAW};
BumpPtrAllocator Alloc;
StringSaver Saver{Alloc};
DenseMap<const Comdat *, unsigned> ComdatMap;
- ModuleSymbolTable Msymtab;
- SmallPtrSet<GlobalValue *, 8> Used;
Mangler Mang;
Triple TT;
@@ -49,6 +47,7 @@ struct Builder {
void setStr(storage::Str &S, StringRef Value) {
S.Offset = StrtabBuilder.add(Value);
+ S.Size = Value.size();
}
template <typename T>
void writeRange(storage::Range<T> &R, const std::vector<T> &Objs) {
@@ -59,18 +58,24 @@ struct Builder {
}
Error addModule(Module *M);
- Error addSymbol(ModuleSymbolTable::Symbol Sym);
+ Error addSymbol(const ModuleSymbolTable &Msymtab,
+ const SmallPtrSet<GlobalValue *, 8> &Used,
+ ModuleSymbolTable::Symbol Sym);
Error build(ArrayRef<Module *> Mods);
};
Error Builder::addModule(Module *M) {
+ SmallPtrSet<GlobalValue *, 8> Used;
collectUsedGlobalVariables(*M, Used, /*CompilerUsed*/ false);
- storage::Module Mod;
- Mod.Begin = Msymtab.symbols().size();
+ ModuleSymbolTable Msymtab;
Msymtab.addModule(M);
- Mod.End = Msymtab.symbols().size();
+
+ storage::Module Mod;
+ Mod.Begin = Syms.size();
+ Mod.End = Syms.size() + Msymtab.symbols().size();
+ Mod.UncBegin = Uncommons.size();
Mods.push_back(Mod);
if (TT.isOSBinFormatCOFF()) {
@@ -84,20 +89,25 @@ Error Builder::addModule(Module *M) {
}
}
+ for (ModuleSymbolTable::Symbol Msym : Msymtab.symbols())
+ if (Error Err = addSymbol(Msymtab, Used, Msym))
+ return Err;
+
return Error::success();
}
-Error Builder::addSymbol(ModuleSymbolTable::Symbol Msym) {
+Error Builder::addSymbol(const ModuleSymbolTable &Msymtab,
+ const SmallPtrSet<GlobalValue *, 8> &Used,
+ ModuleSymbolTable::Symbol Msym) {
Syms.emplace_back();
storage::Symbol &Sym = Syms.back();
Sym = {};
- Sym.UncommonIndex = -1;
storage::Uncommon *Unc = nullptr;
auto Uncommon = [&]() -> storage::Uncommon & {
if (Unc)
return *Unc;
- Sym.UncommonIndex = Uncommons.size();
+ Sym.Flags |= 1 << storage::Symbol::FB_has_uncommon;
Uncommons.emplace_back();
Unc = &Uncommons.back();
*Unc = {};
@@ -194,15 +204,10 @@ Error Builder::build(ArrayRef<Module *> IRMods) {
setStr(Hdr.SourceFileName, IRMods[0]->getSourceFileName());
TT = Triple(IRMods[0]->getTargetTriple());
- // This adds the symbols for each module to Msymtab.
for (auto *M : IRMods)
if (Error Err = addModule(M))
return Err;
- for (ModuleSymbolTable::Symbol Msym : Msymtab.symbols())
- if (Error Err = addSymbol(Msym))
- return Err;
-
COFFLinkerOptsOS.flush();
setStr(Hdr.COFFLinkerOpts, COFFLinkerOpts);
diff --git a/contrib/llvm/lib/Object/ObjectFile.cpp b/contrib/llvm/lib/Object/ObjectFile.cpp
index f36388b677f3..1f60e7157bd9 100644
--- a/contrib/llvm/lib/Object/ObjectFile.cpp
+++ b/contrib/llvm/lib/Object/ObjectFile.cpp
@@ -1,4 +1,4 @@
-//===- ObjectFile.cpp - File format independent object file -----*- C++ -*-===//
+//===- ObjectFile.cpp - File format independent object file ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,20 +11,28 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Object/ObjectFile.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/Binary.h"
#include "llvm/Object/COFF.h"
+#include "llvm/Object/Error.h"
#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/Wasm.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
#include <system_error>
using namespace llvm;
using namespace object;
-void ObjectFile::anchor() { }
+void ObjectFile::anchor() {}
ObjectFile::ObjectFile(unsigned int Type, MemoryBufferRef Source)
: SymbolicFile(Type, Source) {}
diff --git a/contrib/llvm/lib/Object/SymbolicFile.cpp b/contrib/llvm/lib/Object/SymbolicFile.cpp
index 4b51a49cf342..16cff5c228bd 100644
--- a/contrib/llvm/lib/Object/SymbolicFile.cpp
+++ b/contrib/llvm/lib/Object/SymbolicFile.cpp
@@ -1,4 +1,4 @@
-//===- SymbolicFile.cpp - Interface that only provides symbols --*- C++ -*-===//
+//===- SymbolicFile.cpp - Interface that only provides symbols ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,12 +11,20 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Object/COFF.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Object/COFFImportFile.h"
+#include "llvm/Object/Error.h"
#include "llvm/Object/IRObjectFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <memory>
using namespace llvm;
using namespace object;
@@ -24,7 +32,7 @@ using namespace object;
SymbolicFile::SymbolicFile(unsigned int Type, MemoryBufferRef Source)
: Binary(Type, Source) {}
-SymbolicFile::~SymbolicFile() {}
+SymbolicFile::~SymbolicFile() = default;
Expected<std::unique_ptr<SymbolicFile>> SymbolicFile::createSymbolicFile(
MemoryBufferRef Object, sys::fs::file_magic Type, LLVMContext *Context) {
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index 9778628911cd..c4c892f0352a 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -3442,7 +3442,7 @@ void IEEEFloat::toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
// Ignore trailing binary zeros.
int trailingZeros = significand.countTrailingZeros();
exp += trailingZeros;
- significand = significand.lshr(trailingZeros);
+ significand.lshrInPlace(trailingZeros);
// Change the exponent from 2^e to 10^e.
if (exp == 0) {
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index 0c7da1dad0d2..2d049a1cff85 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -125,16 +125,16 @@ APInt::APInt(unsigned numbits, StringRef Str, uint8_t radix)
fromString(numbits, Str, radix);
}
-APInt& APInt::AssignSlowCase(const APInt& RHS) {
+void APInt::AssignSlowCase(const APInt& RHS) {
// Don't do anything for X = X
if (this == &RHS)
- return *this;
+ return;
if (BitWidth == RHS.getBitWidth()) {
// assume same bit-width single-word case is already handled
assert(!isSingleWord());
memcpy(pVal, RHS.pVal, getNumWords() * APINT_WORD_SIZE);
- return *this;
+ return;
}
if (isSingleWord()) {
@@ -154,7 +154,7 @@ APInt& APInt::AssignSlowCase(const APInt& RHS) {
memcpy(pVal, RHS.pVal, RHS.getNumWords() * APINT_WORD_SIZE);
}
BitWidth = RHS.BitWidth;
- return clearUnusedBits();
+ clearUnusedBits();
}
/// This method 'profiles' an APInt for use with FoldingSet.
@@ -339,19 +339,16 @@ APInt& APInt::operator*=(const APInt& RHS) {
return *this;
}
-APInt& APInt::AndAssignSlowCase(const APInt& RHS) {
+void APInt::AndAssignSlowCase(const APInt& RHS) {
tcAnd(pVal, RHS.pVal, getNumWords());
- return *this;
}
-APInt& APInt::OrAssignSlowCase(const APInt& RHS) {
+void APInt::OrAssignSlowCase(const APInt& RHS) {
tcOr(pVal, RHS.pVal, getNumWords());
- return *this;
}
-APInt& APInt::XorAssignSlowCase(const APInt& RHS) {
+void APInt::XorAssignSlowCase(const APInt& RHS) {
tcXor(pVal, RHS.pVal, getNumWords());
- return *this;
}
APInt APInt::operator*(const APInt& RHS) const {
@@ -367,14 +364,6 @@ bool APInt::EqualSlowCase(const APInt& RHS) const {
return std::equal(pVal, pVal + getNumWords(), RHS.pVal);
}
-bool APInt::EqualSlowCase(uint64_t Val) const {
- unsigned n = getActiveBits();
- if (n <= APINT_BITS_PER_WORD)
- return pVal[0] == Val;
- else
- return false;
-}
-
bool APInt::ult(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be same for comparison");
if (isSingleWord())
@@ -733,6 +722,22 @@ unsigned APInt::countPopulationSlowCase() const {
return Count;
}
+bool APInt::intersectsSlowCase(const APInt &RHS) const {
+ for (unsigned i = 0, e = getNumWords(); i != e; ++i)
+ if ((pVal[i] & RHS.pVal[i]) != 0)
+ return true;
+
+ return false;
+}
+
+bool APInt::isSubsetOfSlowCase(const APInt &RHS) const {
+ for (unsigned i = 0, e = getNumWords(); i != e; ++i)
+ if ((pVal[i] & ~RHS.pVal[i]) != 0)
+ return false;
+
+ return true;
+}
+
APInt APInt::byteSwap() const {
assert(BitWidth >= 16 && BitWidth % 16 == 0 && "Cannot byteswap!");
if (BitWidth == 16)
@@ -774,14 +779,12 @@ APInt APInt::reverseBits() const {
}
APInt Val(*this);
- APInt Reversed(*this);
- int S = BitWidth - 1;
-
- const APInt One(BitWidth, 1);
+ APInt Reversed(BitWidth, 0);
+ unsigned S = BitWidth;
- for ((Val = Val.lshr(1)); Val != 0; (Val = Val.lshr(1))) {
+ for (; Val != 0; Val.lshrInPlace(1)) {
Reversed <<= 1;
- Reversed |= (Val & One);
+ Reversed |= Val[0];
--S;
}
@@ -1136,63 +1139,14 @@ APInt APInt::ashr(unsigned shiftAmt) const {
/// Logical right-shift this APInt by shiftAmt.
/// @brief Logical right-shift function.
-APInt APInt::lshr(const APInt &shiftAmt) const {
- return lshr((unsigned)shiftAmt.getLimitedValue(BitWidth));
-}
-
-/// Perform a logical right-shift from Src to Dst of Words words, by Shift,
-/// which must be less than 64. If the source and destination ranges overlap,
-/// we require that Src >= Dst (put another way, we require that the overall
-/// operation is a right shift on the combined range).
-static void lshrWords(APInt::WordType *Dst, APInt::WordType *Src,
- unsigned Words, unsigned Shift) {
- assert(Shift < APInt::APINT_BITS_PER_WORD);
-
- if (!Words)
- return;
-
- if (Shift == 0) {
- std::memmove(Dst, Src, Words * APInt::APINT_WORD_SIZE);
- return;
- }
-
- uint64_t Low = Src[0];
- for (unsigned I = 1; I != Words; ++I) {
- uint64_t High = Src[I];
- Dst[I - 1] =
- (Low >> Shift) | (High << (APInt::APINT_BITS_PER_WORD - Shift));
- Low = High;
- }
- Dst[Words - 1] = Low >> Shift;
+void APInt::lshrInPlace(const APInt &shiftAmt) {
+ lshrInPlace((unsigned)shiftAmt.getLimitedValue(BitWidth));
}
/// Logical right-shift this APInt by shiftAmt.
/// @brief Logical right-shift function.
-void APInt::lshrInPlace(unsigned shiftAmt) {
- if (isSingleWord()) {
- if (shiftAmt >= BitWidth)
- VAL = 0;
- else
- VAL >>= shiftAmt;
- return;
- }
-
- // Don't bother performing a no-op shift.
- if (!shiftAmt)
- return;
-
- // Find number of complete words being shifted out and zeroed.
- const unsigned Words = getNumWords();
- const unsigned ShiftFullWords =
- std::min(shiftAmt / APINT_BITS_PER_WORD, Words);
-
- // Fill in first Words - ShiftFullWords by shifting.
- lshrWords(pVal, pVal + ShiftFullWords, Words - ShiftFullWords,
- shiftAmt % APINT_BITS_PER_WORD);
-
- // The remaining high words are all zero.
- for (unsigned I = Words - ShiftFullWords; I != Words; ++I)
- pVal[I] = 0;
+void APInt::lshrSlowCase(unsigned ShiftAmt) {
+ tcShiftRight(pVal, getNumWords(), ShiftAmt);
}
/// Left-shift this APInt by shiftAmt.
@@ -1202,60 +1156,9 @@ APInt APInt::shl(const APInt &shiftAmt) const {
return shl((unsigned)shiftAmt.getLimitedValue(BitWidth));
}
-APInt APInt::shlSlowCase(unsigned shiftAmt) const {
- // If all the bits were shifted out, the result is 0. This avoids issues
- // with shifting by the size of the integer type, which produces undefined
- // results. We define these "undefined results" to always be 0.
- if (shiftAmt == BitWidth)
- return APInt(BitWidth, 0);
-
- // If none of the bits are shifted out, the result is *this. This avoids a
- // lshr by the words size in the loop below which can produce incorrect
- // results. It also avoids the expensive computation below for a common case.
- if (shiftAmt == 0)
- return *this;
-
- // Create some space for the result.
- uint64_t * val = new uint64_t[getNumWords()];
-
- // If we are shifting less than a word, do it the easy way
- if (shiftAmt < APINT_BITS_PER_WORD) {
- uint64_t carry = 0;
- for (unsigned i = 0; i < getNumWords(); i++) {
- val[i] = pVal[i] << shiftAmt | carry;
- carry = pVal[i] >> (APINT_BITS_PER_WORD - shiftAmt);
- }
- APInt Result(val, BitWidth);
- Result.clearUnusedBits();
- return Result;
- }
-
- // Compute some values needed by the remaining shift algorithms
- unsigned wordShift = shiftAmt % APINT_BITS_PER_WORD;
- unsigned offset = shiftAmt / APINT_BITS_PER_WORD;
-
- // If we are shifting whole words, just move whole words
- if (wordShift == 0) {
- for (unsigned i = 0; i < offset; i++)
- val[i] = 0;
- for (unsigned i = offset; i < getNumWords(); i++)
- val[i] = pVal[i-offset];
- APInt Result(val, BitWidth);
- Result.clearUnusedBits();
- return Result;
- }
-
- // Copy whole words from this to Result.
- unsigned i = getNumWords() - 1;
- for (; i > offset; --i)
- val[i] = pVal[i-offset] << wordShift |
- pVal[i-offset-1] >> (APINT_BITS_PER_WORD - wordShift);
- val[offset] = pVal[0] << wordShift;
- for (i = 0; i < offset; ++i)
- val[i] = 0;
- APInt Result(val, BitWidth);
- Result.clearUnusedBits();
- return Result;
+void APInt::shlSlowCase(unsigned ShiftAmt) {
+ tcShiftLeft(pVal, getNumWords(), ShiftAmt);
+ clearUnusedBits();
}
// Calculate the rotate amount modulo the bit width.
@@ -2239,7 +2142,7 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix,
while (Tmp != 0) {
unsigned Digit = unsigned(Tmp.getRawData()[0]) & MaskAmt;
Str.push_back(Digits[Digit]);
- Tmp = Tmp.lshr(ShiftAmt);
+ Tmp.lshrInPlace(ShiftAmt);
}
} else {
APInt divisor(Radix == 10? 4 : 8, Radix);
@@ -2698,63 +2601,58 @@ int APInt::tcDivide(WordType *lhs, const WordType *rhs,
return false;
}
-/* Shift a bignum left COUNT bits in-place. Shifted in bits are zero.
- There are no restrictions on COUNT. */
-void APInt::tcShiftLeft(WordType *dst, unsigned parts, unsigned count) {
- if (count) {
- /* Jump is the inter-part jump; shift is is intra-part shift. */
- unsigned jump = count / APINT_BITS_PER_WORD;
- unsigned shift = count % APINT_BITS_PER_WORD;
-
- while (parts > jump) {
- WordType part;
+/// Shift a bignum left Cound bits in-place. Shifted in bits are zero. There are
+/// no restrictions on Count.
+void APInt::tcShiftLeft(WordType *Dst, unsigned Words, unsigned Count) {
+ // Don't bother performing a no-op shift.
+ if (!Count)
+ return;
- parts--;
+ /* WordShift is the inter-part shift; BitShift is is intra-part shift. */
+ unsigned WordShift = std::min(Count / APINT_BITS_PER_WORD, Words);
+ unsigned BitShift = Count % APINT_BITS_PER_WORD;
- /* dst[i] comes from the two parts src[i - jump] and, if we have
- an intra-part shift, src[i - jump - 1]. */
- part = dst[parts - jump];
- if (shift) {
- part <<= shift;
- if (parts >= jump + 1)
- part |= dst[parts - jump - 1] >> (APINT_BITS_PER_WORD - shift);
- }
-
- dst[parts] = part;
+ // Fastpath for moving by whole words.
+ if (BitShift == 0) {
+ std::memmove(Dst + WordShift, Dst, (Words - WordShift) * APINT_WORD_SIZE);
+ } else {
+ while (Words-- > WordShift) {
+ Dst[Words] = Dst[Words - WordShift] << BitShift;
+ if (Words > WordShift)
+ Dst[Words] |=
+ Dst[Words - WordShift - 1] >> (APINT_BITS_PER_WORD - BitShift);
}
-
- while (parts > 0)
- dst[--parts] = 0;
}
+
+ // Fill in the remainder with 0s.
+ std::memset(Dst, 0, WordShift * APINT_WORD_SIZE);
}
-/* Shift a bignum right COUNT bits in-place. Shifted in bits are
- zero. There are no restrictions on COUNT. */
-void APInt::tcShiftRight(WordType *dst, unsigned parts, unsigned count) {
- if (count) {
- /* Jump is the inter-part jump; shift is is intra-part shift. */
- unsigned jump = count / APINT_BITS_PER_WORD;
- unsigned shift = count % APINT_BITS_PER_WORD;
+/// Shift a bignum right Count bits in-place. Shifted in bits are zero. There
+/// are no restrictions on Count.
+void APInt::tcShiftRight(WordType *Dst, unsigned Words, unsigned Count) {
+ // Don't bother performing a no-op shift.
+ if (!Count)
+ return;
- /* Perform the shift. This leaves the most significant COUNT bits
- of the result at zero. */
- for (unsigned i = 0; i < parts; i++) {
- WordType part;
+ // WordShift is the inter-part shift; BitShift is is intra-part shift.
+ unsigned WordShift = std::min(Count / APINT_BITS_PER_WORD, Words);
+ unsigned BitShift = Count % APINT_BITS_PER_WORD;
- if (i + jump >= parts) {
- part = 0;
- } else {
- part = dst[i + jump];
- if (shift) {
- part >>= shift;
- if (i + jump + 1 < parts)
- part |= dst[i + jump + 1] << (APINT_BITS_PER_WORD - shift);
- }
- }
-
- dst[i] = part;
+ unsigned WordsToMove = Words - WordShift;
+ // Fastpath for moving by whole words.
+ if (BitShift == 0) {
+ std::memmove(Dst, Dst + WordShift, WordsToMove * APINT_WORD_SIZE);
+ } else {
+ for (unsigned i = 0; i != WordsToMove; ++i) {
+ Dst[i] = Dst[i + WordShift] >> BitShift;
+ if (i + 1 != WordsToMove)
+ Dst[i] |= Dst[i + WordShift + 1] << (APINT_BITS_PER_WORD - BitShift);
}
}
+
+ // Fill in the remainder with 0s.
+ std::memset(Dst + WordsToMove, 0, WordShift * APINT_WORD_SIZE);
}
/* Bitwise and of two bignums. */
diff --git a/contrib/llvm/lib/Support/CommandLine.cpp b/contrib/llvm/lib/Support/CommandLine.cpp
index f4a9108b8544..34345901eab1 100644
--- a/contrib/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm/lib/Support/CommandLine.cpp
@@ -2069,12 +2069,15 @@ public:
#ifndef NDEBUG
OS << " with assertions";
#endif
+#if LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO
std::string CPU = sys::getHostCPUName();
if (CPU == "generic")
CPU = "(unknown)";
OS << ".\n"
<< " Default target: " << sys::getDefaultTargetTriple() << '\n'
- << " Host CPU: " << CPU << '\n';
+ << " Host CPU: " << CPU;
+#endif
+ OS << '\n';
}
void operator=(bool OptionWasSpecified) {
if (!OptionWasSpecified)
diff --git a/contrib/llvm/lib/Support/Dwarf.cpp b/contrib/llvm/lib/Support/Dwarf.cpp
index f13da62e4a87..200546857de7 100644
--- a/contrib/llvm/lib/Support/Dwarf.cpp
+++ b/contrib/llvm/lib/Support/Dwarf.cpp
@@ -22,7 +22,7 @@ StringRef llvm::dwarf::TagString(unsigned Tag) {
switch (Tag) {
default:
return StringRef();
-#define HANDLE_DW_TAG(ID, NAME) \
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \
case DW_TAG_##NAME: \
return "DW_TAG_" #NAME;
#include "llvm/Support/Dwarf.def"
@@ -31,11 +31,34 @@ StringRef llvm::dwarf::TagString(unsigned Tag) {
unsigned llvm::dwarf::getTag(StringRef TagString) {
return StringSwitch<unsigned>(TagString)
-#define HANDLE_DW_TAG(ID, NAME) .Case("DW_TAG_" #NAME, DW_TAG_##NAME)
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \
+ .Case("DW_TAG_" #NAME, DW_TAG_##NAME)
#include "llvm/Support/Dwarf.def"
.Default(DW_TAG_invalid);
}
+unsigned llvm::dwarf::TagVersion(dwarf::Tag Tag) {
+ switch (Tag) {
+ default:
+ return 0;
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \
+ case DW_TAG_##NAME: \
+ return VERSION;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
+unsigned llvm::dwarf::TagVendor(dwarf::Tag Tag) {
+ switch (Tag) {
+ default:
+ return 0;
+#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR) \
+ case DW_TAG_##NAME: \
+ return DWARF_VENDOR_##VENDOR;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
StringRef llvm::dwarf::ChildrenString(unsigned Children) {
switch (Children) {
case DW_CHILDREN_no: return "DW_CHILDREN_no";
@@ -48,29 +71,73 @@ StringRef llvm::dwarf::AttributeString(unsigned Attribute) {
switch (Attribute) {
default:
return StringRef();
-#define HANDLE_DW_AT(ID, NAME) \
- case DW_AT_##NAME: \
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) \
+ case DW_AT_##NAME: \
return "DW_AT_" #NAME;
#include "llvm/Support/Dwarf.def"
}
}
+unsigned llvm::dwarf::AttributeVersion(dwarf::Attribute Attribute) {
+ switch (Attribute) {
+ default:
+ return 0;
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) \
+ case DW_AT_##NAME: \
+ return VERSION;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
+unsigned llvm::dwarf::AttributeVendor(dwarf::Attribute Attribute) {
+ switch (Attribute) {
+ default:
+ return 0;
+#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) \
+ case DW_AT_##NAME: \
+ return DWARF_VENDOR_##VENDOR;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
StringRef llvm::dwarf::FormEncodingString(unsigned Encoding) {
switch (Encoding) {
default:
return StringRef();
-#define HANDLE_DW_FORM(ID, NAME) \
- case DW_FORM_##NAME: \
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) \
+ case DW_FORM_##NAME: \
return "DW_FORM_" #NAME;
#include "llvm/Support/Dwarf.def"
}
}
+unsigned llvm::dwarf::FormVersion(dwarf::Form Form) {
+ switch (Form) {
+ default:
+ return 0;
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) \
+ case DW_FORM_##NAME: \
+ return VERSION;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
+unsigned llvm::dwarf::FormVendor(dwarf::Form Form) {
+ switch (Form) {
+ default:
+ return 0;
+#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) \
+ case DW_FORM_##NAME: \
+ return DWARF_VENDOR_##VENDOR;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
StringRef llvm::dwarf::OperationEncodingString(unsigned Encoding) {
switch (Encoding) {
default:
return StringRef();
-#define HANDLE_DW_OP(ID, NAME) \
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \
case DW_OP_##NAME: \
return "DW_OP_" #NAME;
#include "llvm/Support/Dwarf.def"
@@ -81,17 +148,40 @@ StringRef llvm::dwarf::OperationEncodingString(unsigned Encoding) {
unsigned llvm::dwarf::getOperationEncoding(StringRef OperationEncodingString) {
return StringSwitch<unsigned>(OperationEncodingString)
-#define HANDLE_DW_OP(ID, NAME) .Case("DW_OP_" #NAME, DW_OP_##NAME)
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \
+ .Case("DW_OP_" #NAME, DW_OP_##NAME)
#include "llvm/Support/Dwarf.def"
.Case("DW_OP_LLVM_fragment", DW_OP_LLVM_fragment)
.Default(0);
}
+unsigned llvm::dwarf::OperationVersion(dwarf::LocationAtom Op) {
+ switch (Op) {
+ default:
+ return 0;
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \
+ case DW_OP_##NAME: \
+ return VERSION;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
+unsigned llvm::dwarf::OperationVendor(dwarf::LocationAtom Op) {
+ switch (Op) {
+ default:
+ return 0;
+#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \
+ case DW_OP_##NAME: \
+ return DWARF_VENDOR_##VENDOR;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
StringRef llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
switch (Encoding) {
default:
return StringRef();
-#define HANDLE_DW_ATE(ID, NAME) \
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \
case DW_ATE_##NAME: \
return "DW_ATE_" #NAME;
#include "llvm/Support/Dwarf.def"
@@ -100,11 +190,34 @@ StringRef llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
unsigned llvm::dwarf::getAttributeEncoding(StringRef EncodingString) {
return StringSwitch<unsigned>(EncodingString)
-#define HANDLE_DW_ATE(ID, NAME) .Case("DW_ATE_" #NAME, DW_ATE_##NAME)
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \
+ .Case("DW_ATE_" #NAME, DW_ATE_##NAME)
#include "llvm/Support/Dwarf.def"
.Default(0);
}
+unsigned llvm::dwarf::AttributeEncodingVersion(dwarf::TypeKind ATE) {
+ switch (ATE) {
+ default:
+ return 0;
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \
+ case DW_ATE_##NAME: \
+ return VERSION;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
+unsigned llvm::dwarf::AttributeEncodingVendor(dwarf::TypeKind ATE) {
+ switch (ATE) {
+ default:
+ return 0;
+#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) \
+ case DW_ATE_##NAME: \
+ return DWARF_VENDOR_##VENDOR;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
StringRef llvm::dwarf::DecimalSignString(unsigned Sign) {
switch (Sign) {
case DW_DS_unsigned: return "DW_DS_unsigned";
@@ -169,7 +282,7 @@ StringRef llvm::dwarf::LanguageString(unsigned Language) {
switch (Language) {
default:
return StringRef();
-#define HANDLE_DW_LANG(ID, NAME) \
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \
case DW_LANG_##NAME: \
return "DW_LANG_" #NAME;
#include "llvm/Support/Dwarf.def"
@@ -178,11 +291,34 @@ StringRef llvm::dwarf::LanguageString(unsigned Language) {
unsigned llvm::dwarf::getLanguage(StringRef LanguageString) {
return StringSwitch<unsigned>(LanguageString)
-#define HANDLE_DW_LANG(ID, NAME) .Case("DW_LANG_" #NAME, DW_LANG_##NAME)
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \
+ .Case("DW_LANG_" #NAME, DW_LANG_##NAME)
#include "llvm/Support/Dwarf.def"
.Default(0);
}
+unsigned llvm::dwarf::LanguageVersion(dwarf::SourceLanguage Lang) {
+ switch (Lang) {
+ default:
+ return 0;
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \
+ case DW_LANG_##NAME: \
+ return VERSION;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
+unsigned llvm::dwarf::LanguageVendor(dwarf::SourceLanguage Lang) {
+ switch (Lang) {
+ default:
+ return 0;
+#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \
+ case DW_LANG_##NAME: \
+ return DWARF_VENDOR_##VENDOR;
+#include "llvm/Support/Dwarf.def"
+ }
+}
+
StringRef llvm::dwarf::CaseString(unsigned Case) {
switch (Case) {
case DW_ID_case_sensitive: return "DW_ID_case_sensitive";
@@ -394,3 +530,12 @@ StringRef llvm::dwarf::AttributeValueString(uint16_t Attr, unsigned Val) {
return StringRef();
}
+
+bool llvm::dwarf::isValidFormForVersion(Form F, unsigned Version,
+ bool ExtensionsOk) {
+ if (FormVendor(F) == DWARF_VENDOR_DWARF) {
+ unsigned FV = FormVersion(F);
+ return FV > 0 && FV <= Version;
+ }
+ return ExtensionsOk;
+}
diff --git a/contrib/llvm/lib/Support/LowLevelType.cpp b/contrib/llvm/lib/Support/LowLevelType.cpp
index 4290d69cd197..0ee3f1d0119e 100644
--- a/contrib/llvm/lib/Support/LowLevelType.cpp
+++ b/contrib/llvm/lib/Support/LowLevelType.cpp
@@ -18,25 +18,25 @@ using namespace llvm;
LLT::LLT(MVT VT) {
if (VT.isVector()) {
- SizeInBits = VT.getVectorElementType().getSizeInBits();
- ElementsOrAddrSpace = VT.getVectorNumElements();
- Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
+ init(/*isPointer=*/false, VT.getVectorNumElements() > 1,
+ VT.getVectorNumElements(), VT.getVectorElementType().getSizeInBits(),
+ /*AddressSpace=*/0);
} else if (VT.isValid()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
- Kind = Scalar;
- SizeInBits = VT.getSizeInBits();
- ElementsOrAddrSpace = 1;
- assert(SizeInBits != 0 && "invalid zero-sized type");
+ assert(VT.getSizeInBits() != 0 && "invalid zero-sized type");
+ init(/*isPointer=*/false, /*isVector=*/false, /*NumElements=*/0,
+ VT.getSizeInBits(), /*AddressSpace=*/0);
} else {
- Kind = Invalid;
- SizeInBits = ElementsOrAddrSpace = 0;
+ IsPointer = false;
+ IsVector = false;
+ RawData = 0;
}
}
void LLT::print(raw_ostream &OS) const {
if (isVector())
- OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
+ OS << "<" << getNumElements() << " x " << getElementType() << ">";
else if (isPointer())
OS << "p" << getAddressSpace();
else if (isValid()) {
@@ -45,3 +45,12 @@ void LLT::print(raw_ostream &OS) const {
} else
llvm_unreachable("trying to print an invalid type");
}
+
+const constexpr LLT::BitFieldInfo LLT::ScalarSizeFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::PointerSizeFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::PointerAddressSpaceFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::VectorElementsFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::VectorSizeFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::PointerVectorElementsFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::PointerVectorSizeFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::PointerVectorAddressSpaceFieldInfo;
diff --git a/contrib/llvm/lib/Support/Regex.cpp b/contrib/llvm/lib/Support/Regex.cpp
index 68ba79e11766..b1087fd8853c 100644
--- a/contrib/llvm/lib/Support/Regex.cpp
+++ b/contrib/llvm/lib/Support/Regex.cpp
@@ -48,7 +48,7 @@ Regex::~Regex() {
}
}
-bool Regex::isValid(std::string &Error) {
+bool Regex::isValid(std::string &Error) const {
if (!error)
return true;
diff --git a/contrib/llvm/lib/Support/TargetParser.cpp b/contrib/llvm/lib/Support/TargetParser.cpp
index 639d2ece263a..bba7c6d0d604 100644
--- a/contrib/llvm/lib/Support/TargetParser.cpp
+++ b/contrib/llvm/lib/Support/TargetParser.cpp
@@ -210,7 +210,7 @@ bool llvm::ARM::getHWDivFeatures(unsigned HWDivKind,
else
Features.push_back("-hwdiv-arm");
- if (HWDivKind & ARM::AEK_HWDIV)
+ if (HWDivKind & ARM::AEK_HWDIVTHUMB)
Features.push_back("+hwdiv");
else
Features.push_back("-hwdiv");
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index ae01ea477bb9..7141e77fcd25 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1865,7 +1865,7 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
getUsefulBits(Op, OpUsefulBits, Depth + 1);
// The interesting part was at zero in the argument
- OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
+ OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
}
UsefulBits &= OpUsefulBits;
@@ -1894,13 +1894,13 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Mask = Mask.shl(ShiftAmt);
getUsefulBits(Op, Mask, Depth + 1);
- Mask = Mask.lshr(ShiftAmt);
+ Mask.lshrInPlace(ShiftAmt);
} else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
// Shift Right
// We do not handle AArch64_AM::ASR, because the sign will change the
// number of useful bits
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
- Mask = Mask.lshr(ShiftAmt);
+ Mask.lshrInPlace(ShiftAmt);
getUsefulBits(Op, Mask, Depth + 1);
Mask = Mask.shl(ShiftAmt);
} else
@@ -1954,7 +1954,7 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
if (Op.getOperand(1) == Orig) {
// Copy the bits from the result to the zero bits.
Mask = ResultUsefulBits & OpUsefulBits;
- Mask = Mask.lshr(LSB);
+ Mask.lshrInPlace(LSB);
}
if (Op.getOperand(0) == Orig)
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0d3289ac84c3..4ddc95199d4c 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3239,30 +3239,26 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- Subtarget->isTargetMachO()) {
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ auto GV = G->getGlobal();
+ if (Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()) ==
+ AArch64II::MO_GOT) {
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT);
+ Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
+ } else {
const GlobalValue *GV = G->getGlobal();
- bool InternalLinkage = GV->hasInternalLinkage();
- if (InternalLinkage)
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
- else {
- Callee =
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT);
- Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
- }
- } else if (ExternalSymbolSDNode *S =
- dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
+ }
+ } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ if (getTargetMachine().getCodeModel() == CodeModel::Large &&
+ Subtarget->isTargetMachO()) {
const char *Sym = S->getSymbol();
Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
+ } else {
+ const char *Sym = S->getSymbol();
+ Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
}
- } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
- } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const char *Sym = S->getSymbol();
- Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
}
// We don't usually want to end the call-sequence here because we would tidy
@@ -7130,7 +7126,7 @@ bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
if (I->getOpcode() != Instruction::FMul)
return true;
- if (I->getNumUses() != 1)
+ if (!I->hasOneUse())
return true;
Instruction *User = I->user_back();
@@ -10395,7 +10391,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
// call. This will cause the optimizers to attempt to move, or duplicate,
// return instructions to help enable tail call optimizations for this
// instruction.
-bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
return CI->isTailCall();
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 2ad6c8b23df8..a023b4373835 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -593,7 +593,7 @@ private:
}
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
ISD::MemIndexedMode &AM, bool &IsInc,
SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 4449412532f3..82e9c5a88e3b 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2586,6 +2586,11 @@ def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
Sched<[WriteF]>;
}
+// Similarly add aliases
+def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
+ Requires<[HasFullFP16]>;
+def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
+def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
//===----------------------------------------------------------------------===//
// Floating point conversion instruction.
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 878dac6bff1e..5e01b6cd2b46 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -20,6 +20,7 @@
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index 20a5979f9b4b..6f9021c4a030 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -482,7 +482,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
auto &MO = MI.getOperand(Idx);
- if (!MO.isReg())
+ if (!MO.isReg() || !MO.getReg())
continue;
LLT Ty = MRI.getType(MO.getReg());
@@ -537,7 +537,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
- if (MI.getOperand(Idx).isReg()) {
+ if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
if (!Mapping->isValid())
return InstructionMapping();
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td b/contrib/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td
index 6bce4ef6b652..4bd77d344488 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td
@@ -265,6 +265,12 @@ def : InstRW<[FalkorWr_2LD_2VXVY_2LD_1XYZ_2VXVY_4cyc, WriteAdr],(instregex "^LD4
// Arithmetic and Logical Instructions
// -----------------------------------------------------------------------------
def : InstRW<[FalkorWr_ADD], (instregex "^ADD(S)?(W|X)r(s|x)$")>;
+def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^AND(S)?(W|X)r(i|r|s)$")>;
+def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^BIC(S)?(W|X)r(r|s)$")>;
+def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^EON(W|X)r(r|s)$")>;
+def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^EOR(W|X)r(i|r|s)$")>;
+def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^ORN(W|X)r(r|s)$")>;
+def : InstRW<[FalkorWr_1XYZ_1cyc], (instregex "^ORR(W|X)r(i|r|s)$")>;
def : InstRW<[FalkorWr_2XYZ_2cyc], (instregex "^SUB(S)?(W|X)r(s|x)$")>;
// SIMD Miscellaneous Instructions
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index b3aba4781db8..042755bd36d0 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -35,6 +35,11 @@ static cl::opt<bool>
UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of "
"an address is ignored"), cl::init(false), cl::Hidden);
+static cl::opt<bool>
+ UseNonLazyBind("aarch64-enable-nonlazybind",
+ cl::desc("Call nonlazybind functions via direct GOT load"),
+ cl::init(false), cl::Hidden);
+
AArch64Subtarget &
AArch64Subtarget::initializeSubtargetDependencies(StringRef FS,
StringRef CPUString) {
@@ -155,6 +160,23 @@ AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
return AArch64II::MO_NO_FLAG;
}
+unsigned char AArch64Subtarget::classifyGlobalFunctionReference(
+ const GlobalValue *GV, const TargetMachine &TM) const {
+ // MachO large model always goes via a GOT, because we don't have the
+ // relocations available to do anything else..
+ if (TM.getCodeModel() == CodeModel::Large && isTargetMachO() &&
+ !GV->hasInternalLinkage())
+ return AArch64II::MO_GOT;
+
+ // NonLazyBind goes via GOT unless we know it's available locally.
+ auto *F = dyn_cast<Function>(GV);
+ if (UseNonLazyBind && F && F->hasFnAttribute(Attribute::NonLazyBind) &&
+ !TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
+ return AArch64II::MO_GOT;
+
+ return AArch64II::MO_NO_FLAG;
+}
+
/// This function returns the name of a function which has an interface
/// like the non-standard bzero function, if such a function exists on
/// the current subtarget and it is considered prefereable over
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h
index 40ad9185012c..3d66a9ea8ce6 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -271,6 +271,9 @@ public:
unsigned char ClassifyGlobalReference(const GlobalValue *GV,
const TargetMachine &TM) const;
+ unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
+ const TargetMachine &TM) const;
+
/// This function returns the name of a function which has an interface
/// like the non-standard bzero function, if such a function exists on
/// the current subtarget and it is considered prefereable over
diff --git a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index cbab68979c56..d7bbc2bcd22c 100644
--- a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -2100,27 +2100,9 @@ AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
bool isNegative = parseOptionalToken(AsmToken::Minus);
const AsmToken &Tok = Parser.getTok();
- if (Tok.is(AsmToken::Real)) {
- APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
- if (isNegative)
- RealVal.changeSign();
-
- uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
- int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
- Parser.Lex(); // Eat the token.
- // Check for out of range values. As an exception, we let Zero through,
- // as we handle that special case in post-processing before matching in
- // order to use the zero register for it.
- if (Val == -1 && !RealVal.isPosZero()) {
- TokError("expected compatible register or floating-point constant");
- return MatchOperand_ParseFail;
- }
- Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
- return MatchOperand_Success;
- }
- if (Tok.is(AsmToken::Integer)) {
+ if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
int64_t Val;
- if (!isNegative && Tok.getString().startswith("0x")) {
+ if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
Val = Tok.getIntVal();
if (Val > 255 || Val < 0) {
TokError("encoded floating point value out of range");
@@ -2128,10 +2110,24 @@ AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
}
} else {
APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
+ if (isNegative)
+ RealVal.changeSign();
+
uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
- // If we had a '-' in front, toggle the sign bit.
- IntVal ^= (uint64_t)isNegative << 63;
Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
+
+ // Check for out of range values. As an exception we let Zero through,
+ // but as tokens instead of an FPImm so that it can be matched by the
+ // appropriate alias if one exists.
+ if (RealVal.isPosZero()) {
+ Parser.Lex(); // Eat the token.
+ Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
+ Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
+ return MatchOperand_Success;
+ } else if (Val == -1) {
+ TokError("expected compatible register or floating-point constant");
+ return MatchOperand_ParseFail;
+ }
}
Parser.Lex(); // Eat the token.
Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
@@ -3655,21 +3651,6 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
}
}
- // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
- if (NumOperands == 3 && Tok == "fmov") {
- AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
- AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
- if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
- unsigned zreg =
- !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
- RegOp.getReg())
- ? AArch64::WZR
- : AArch64::XZR;
- Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
- Op.getEndLoc(), getContext());
- }
- }
-
MCInst Inst;
// First try to match against the secondary set of tables containing the
// short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index 8fc822329595..94112849f84e 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -39,7 +39,7 @@ AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() {
PrivateLabelPrefix = "L";
SeparatorString = "%%";
CommentString = ";";
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
AlignmentIsInBytes = false;
UsesELFSectionDirectiveForBSS = true;
@@ -71,7 +71,7 @@ AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(const Triple &T) {
// We prefer NEON instructions to be printed in the short form.
AssemblerDialect = AsmWriterVariant == Default ? 0 : AsmWriterVariant;
- PointerSize = 8;
+ CodePointerSize = 8;
// ".comm align is in bytes but .align is pow-2."
AlignmentIsInBytes = false;
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 0446655830d1..a81bcb56dfdc 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -144,6 +144,10 @@ bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough(
}
void AMDGPUAsmPrinter::EmitFunctionBodyStart() {
+ const AMDGPUMachineFunction *MFI = MF->getInfo<AMDGPUMachineFunction>();
+ if (!MFI->isEntryFunction())
+ return;
+
const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
SIProgramInfo KernelInfo;
amd_kernel_code_t KernelCode;
@@ -184,9 +188,11 @@ void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
}
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
// The starting address of all shader programs must be 256 bytes aligned.
- MF.setAlignment(8);
+ // Regular functions just need the basic required instruction alignment.
+ MF.setAlignment(MFI->isEntryFunction() ? 8 : 2);
SetupMachineFunction(MF);
@@ -220,13 +226,19 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
OutStreamer->SwitchSection(CommentSection);
if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
- OutStreamer->emitRawComment(" Kernel info:", false);
- OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen),
- false);
+ if (MFI->isEntryFunction()) {
+ OutStreamer->emitRawComment(" Kernel info:", false);
+ } else {
+ OutStreamer->emitRawComment(" Function info:", false);
+ }
+
+ OutStreamer->emitRawComment(" codeLenInByte = " +
+ Twine(getFunctionCodeSize(MF)), false);
OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR),
false);
OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR),
false);
+
OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode),
false);
OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode),
@@ -236,6 +248,9 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) +
" bytes/workgroup (compile time only)", false);
+ if (!MFI->isEntryFunction())
+ return false;
+
OutStreamer->emitRawComment(" SGPRBlocks: " +
Twine(KernelInfo.SGPRBlocks), false);
OutStreamer->emitRawComment(" VGPRBlocks: " +
@@ -317,7 +332,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
const MachineOperand &MO = MI.getOperand(op_idx);
if (!MO.isReg())
continue;
- unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff;
+ unsigned HWReg = RI->getHWRegIndex(MO.getReg());
// Register with value > 127 aren't GPR
if (HWReg > 127)
@@ -360,18 +375,12 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
}
}
-void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
- const MachineFunction &MF) const {
+uint64_t AMDGPUAsmPrinter::getFunctionCodeSize(const MachineFunction &MF) const {
const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
- const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- uint64_t CodeSize = 0;
- unsigned MaxSGPR = 0;
- unsigned MaxVGPR = 0;
- bool VCCUsed = false;
- bool FlatUsed = false;
- const SIRegisterInfo *RI = STM.getRegisterInfo();
const SIInstrInfo *TII = STM.getInstrInfo();
+ uint64_t CodeSize = 0;
+
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
// TODO: CodeSize should account for multiple functions.
@@ -380,122 +389,86 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
if (MI.isDebugValue())
continue;
- if (isVerbose())
- CodeSize += TII->getInstSizeInBytes(MI);
+ CodeSize += TII->getInstSizeInBytes(MI);
+ }
+ }
- unsigned numOperands = MI.getNumOperands();
- for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
- const MachineOperand &MO = MI.getOperand(op_idx);
- unsigned width = 0;
- bool isSGPR = false;
+ return CodeSize;
+}
- if (!MO.isReg())
- continue;
+static bool hasAnyNonFlatUseOfReg(const MachineRegisterInfo &MRI,
+ const SIInstrInfo &TII,
+ unsigned Reg) {
+ for (const MachineOperand &UseOp : MRI.reg_operands(Reg)) {
+ if (!UseOp.isImplicit() || !TII.isFLAT(*UseOp.getParent()))
+ return true;
+ }
- unsigned reg = MO.getReg();
- switch (reg) {
- case AMDGPU::EXEC:
- case AMDGPU::EXEC_LO:
- case AMDGPU::EXEC_HI:
- case AMDGPU::SCC:
- case AMDGPU::M0:
- case AMDGPU::SRC_SHARED_BASE:
- case AMDGPU::SRC_SHARED_LIMIT:
- case AMDGPU::SRC_PRIVATE_BASE:
- case AMDGPU::SRC_PRIVATE_LIMIT:
- continue;
+ return false;
+}
- case AMDGPU::VCC:
- case AMDGPU::VCC_LO:
- case AMDGPU::VCC_HI:
- VCCUsed = true;
- continue;
+void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
+ const MachineFunction &MF) const {
+ const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIInstrInfo *TII = STM.getInstrInfo();
+ const SIRegisterInfo *RI = &TII->getRegisterInfo();
- case AMDGPU::FLAT_SCR:
- case AMDGPU::FLAT_SCR_LO:
- case AMDGPU::FLAT_SCR_HI:
- // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat
- // instructions aren't used to access the scratch buffer.
- if (MFI->hasFlatScratchInit())
- FlatUsed = true;
- continue;
- case AMDGPU::TBA:
- case AMDGPU::TBA_LO:
- case AMDGPU::TBA_HI:
- case AMDGPU::TMA:
- case AMDGPU::TMA_LO:
- case AMDGPU::TMA_HI:
- llvm_unreachable("trap handler registers should not be used");
-
- default:
- break;
- }
-
- if (AMDGPU::SReg_32RegClass.contains(reg)) {
- assert(!AMDGPU::TTMP_32RegClass.contains(reg) &&
- "trap handler registers should not be used");
- isSGPR = true;
- width = 1;
- } else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
- isSGPR = false;
- width = 1;
- } else if (AMDGPU::SReg_64RegClass.contains(reg)) {
- assert(!AMDGPU::TTMP_64RegClass.contains(reg) &&
- "trap handler registers should not be used");
- isSGPR = true;
- width = 2;
- } else if (AMDGPU::VReg_64RegClass.contains(reg)) {
- isSGPR = false;
- width = 2;
- } else if (AMDGPU::VReg_96RegClass.contains(reg)) {
- isSGPR = false;
- width = 3;
- } else if (AMDGPU::SReg_128RegClass.contains(reg)) {
- isSGPR = true;
- width = 4;
- } else if (AMDGPU::VReg_128RegClass.contains(reg)) {
- isSGPR = false;
- width = 4;
- } else if (AMDGPU::SReg_256RegClass.contains(reg)) {
- isSGPR = true;
- width = 8;
- } else if (AMDGPU::VReg_256RegClass.contains(reg)) {
- isSGPR = false;
- width = 8;
- } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
- isSGPR = true;
- width = 16;
- } else if (AMDGPU::VReg_512RegClass.contains(reg)) {
- isSGPR = false;
- width = 16;
- } else {
- llvm_unreachable("Unknown register class");
- }
- unsigned hwReg = RI->getEncodingValue(reg) & 0xff;
- unsigned maxUsed = hwReg + width - 1;
- if (isSGPR) {
- MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR;
- } else {
- MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR;
- }
- }
+ MCPhysReg NumVGPRReg = AMDGPU::NoRegister;
+ for (MCPhysReg Reg : reverse(AMDGPU::VGPR_32RegClass.getRegisters())) {
+ if (MRI.isPhysRegUsed(Reg)) {
+ NumVGPRReg = Reg;
+ break;
+ }
+ }
+
+ MCPhysReg NumSGPRReg = AMDGPU::NoRegister;
+ for (MCPhysReg Reg : reverse(AMDGPU::SGPR_32RegClass.getRegisters())) {
+ if (MRI.isPhysRegUsed(Reg)) {
+ NumSGPRReg = Reg;
+ break;
}
}
+ // We found the maximum register index. They start at 0, so add one to get the
+ // number of registers.
+ ProgInfo.NumVGPR = NumVGPRReg == AMDGPU::NoRegister ? 0 :
+ RI->getHWRegIndex(NumVGPRReg) + 1;
+ ProgInfo.NumSGPR = NumSGPRReg == AMDGPU::NoRegister ? 0 :
+ RI->getHWRegIndex(NumSGPRReg) + 1;
unsigned ExtraSGPRs = 0;
- if (VCCUsed)
+ ProgInfo.VCCUsed = MRI.isPhysRegUsed(AMDGPU::VCC_LO) ||
+ MRI.isPhysRegUsed(AMDGPU::VCC_HI);
+ if (ProgInfo.VCCUsed)
ExtraSGPRs = 2;
+ ProgInfo.FlatUsed = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) ||
+ MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI);
+
+ // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat
+ // instructions aren't used to access the scratch buffer. Inline assembly
+ // may need it though.
+ //
+ // If we only have implicit uses of flat_scr on flat instructions, it is not
+ // really needed.
+ if (ProgInfo.FlatUsed && !MFI->hasFlatScratchInit() &&
+ (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) &&
+ !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) &&
+ !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) {
+ ProgInfo.FlatUsed = false;
+ }
+
if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) {
- if (FlatUsed)
+ if (ProgInfo.FlatUsed)
ExtraSGPRs = 4;
} else {
if (STM.isXNACKEnabled())
ExtraSGPRs = 4;
- if (FlatUsed)
+ if (ProgInfo.FlatUsed)
ExtraSGPRs = 6;
}
@@ -505,34 +478,29 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
!STM.hasSGPRInitBug()) {
unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
- if (MaxSGPR + 1 > MaxAddressableNumSGPRs) {
+ if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
// This can happen due to a compiler bug or when using inline asm.
LLVMContext &Ctx = MF.getFunction()->getContext();
DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
"addressable scalar registers",
- MaxSGPR + 1, DS_Error,
+ ProgInfo.NumSGPR, DS_Error,
DK_ResourceLimit,
MaxAddressableNumSGPRs);
Ctx.diagnose(Diag);
- MaxSGPR = MaxAddressableNumSGPRs - 1;
+ ProgInfo.NumSGPR = MaxAddressableNumSGPRs - 1;
}
}
// Account for extra SGPRs and VGPRs reserved for debugger use.
- MaxSGPR += ExtraSGPRs;
- MaxVGPR += ExtraVGPRs;
-
- // We found the maximum register index. They start at 0, so add one to get the
- // number of registers.
- ProgInfo.NumSGPR = MaxSGPR + 1;
- ProgInfo.NumVGPR = MaxVGPR + 1;
+ ProgInfo.NumSGPR += ExtraSGPRs;
+ ProgInfo.NumVGPR += ExtraVGPRs;
// Adjust number of registers used to meet default/requested minimum/maximum
// number of waves per execution unit request.
ProgInfo.NumSGPRsForWavesPerEU = std::max(
- ProgInfo.NumSGPR, STM.getMinNumSGPRs(MFI->getMaxWavesPerEU()));
+ std::max(ProgInfo.NumSGPR, 1u), STM.getMinNumSGPRs(MFI->getMaxWavesPerEU()));
ProgInfo.NumVGPRsForWavesPerEU = std::max(
- ProgInfo.NumVGPR, STM.getMinNumVGPRs(MFI->getMaxWavesPerEU()));
+ std::max(ProgInfo.NumVGPR, 1u), STM.getMinNumVGPRs(MFI->getMaxWavesPerEU()));
if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS ||
STM.hasSGPRInitBug()) {
@@ -559,10 +527,10 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
}
- if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) {
+ if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) {
LLVMContext &Ctx = MF.getFunction()->getContext();
DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
- MFI->NumUserSGPRs, DS_Error);
+ MFI->getNumUserSGPRs(), DS_Error);
Ctx.diagnose(Diag);
}
@@ -584,7 +552,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.VGPRBlocks = ProgInfo.VGPRBlocks / STM.getVGPREncodingGranule() - 1;
// Record first reserved VGPR and number of reserved VGPRs.
- ProgInfo.ReservedVGPRFirst = STM.debuggerReserveRegs() ? MaxVGPR + 1 : 0;
+ ProgInfo.ReservedVGPRFirst = STM.debuggerReserveRegs() ? ProgInfo.NumVGPR : 0;
ProgInfo.ReservedVGPRCount = STM.getReservedNumVGPRs(MF);
// Update DebuggerWavefrontPrivateSegmentOffsetSGPR and
@@ -609,10 +577,6 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
ProgInfo.ScratchSize = FrameInfo.getStackSize();
- ProgInfo.FlatUsed = FlatUsed;
- ProgInfo.VCCUsed = VCCUsed;
- ProgInfo.CodeLen = CodeSize;
-
unsigned LDSAlignShift;
if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) {
// LDS is allocated in 64 dword blocks.
@@ -623,7 +587,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
}
unsigned LDSSpillSize =
- MFI->LDSWaveSpillSize * MFI->getMaxFlatWorkGroupSize();
+ MFI->getLDSWaveSpillSize() * MFI->getMaxFlatWorkGroupSize();
ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize;
ProgInfo.LDSBlocks =
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
index 13425c8b2a0f..8c86dea4b885 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
@@ -55,7 +55,7 @@ private:
uint32_t NumVGPR = 0;
uint32_t NumSGPR = 0;
- uint32_t LDSSize;
+ uint32_t LDSSize = 0;
bool FlatUsed = false;
// Number of SGPRs that meets number of waves per execution unit request.
@@ -85,11 +85,11 @@ private:
// Bonus information for debugging.
bool VCCUsed = false;
- uint64_t CodeLen = 0;
SIProgramInfo() = default;
};
+ uint64_t getFunctionCodeSize(const MachineFunction &MF) const;
void getSIProgramInfo(SIProgramInfo &Out, const MachineFunction &MF) const;
void getAmdKernelCode(amd_kernel_code_t &Out, const SIProgramInfo &KernelInfo,
const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 36bc2498781f..a5cda817ac11 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -415,9 +415,11 @@ public:
return 0;
}
+ // Scratch is allocated in 256 dword per wave blocks for the entire
+ // wavefront. When viewed from the perspecive of an arbitrary workitem, this
+ // is 4-byte aligned.
unsigned getStackAlignment() const {
- // Scratch is allocated in 256 dword per wave blocks.
- return 4 * 256 / getWavefrontSize();
+ return 4;
}
bool enableMachineScheduler() const override {
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 01ac9968181a..6edd3e923ba1 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -426,16 +426,23 @@ static bool isArgPassedInSGPR(const Argument *A) {
const Function *F = A->getParent();
// Arguments to compute shaders are never a source of divergence.
- if (!AMDGPU::isShader(F->getCallingConv()))
+ CallingConv::ID CC = F->getCallingConv();
+ switch (CC) {
+ case CallingConv::AMDGPU_KERNEL:
+ case CallingConv::SPIR_KERNEL:
return true;
-
- // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
- if (F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
- F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal))
- return true;
-
- // Everything else is in VGPRs.
- return false;
+ case CallingConv::AMDGPU_VS:
+ case CallingConv::AMDGPU_GS:
+ case CallingConv::AMDGPU_PS:
+ case CallingConv::AMDGPU_CS:
+ // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
+ // Everything else is in VGPRs.
+ return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
+ F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
+ default:
+ // TODO: Should calls support inreg for SGPR inputs?
+ return false;
+ }
}
///
diff --git a/contrib/llvm/lib/Target/AMDGPU/DSInstructions.td b/contrib/llvm/lib/Target/AMDGPU/DSInstructions.td
index a9f64589fa5e..357e18108e7e 100644
--- a/contrib/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/contrib/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -255,8 +255,6 @@ class DS_1A1D_PERMUTE <string opName, SDPatternOperator node = null_frag>
[(set i32:$vdst,
(node (DS1Addr1Offset i32:$addr, i16:$offset), i32:$data0))] > {
- let LGKM_CNT = 0;
-
let mayLoad = 0;
let mayStore = 0;
let isConvergent = 1;
diff --git a/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 1655591abf39..6c61fb1f2d6b 100644
--- a/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -14,6 +14,7 @@
using namespace llvm;
AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Triple &TT) : MCAsmInfoELF() {
+ CodePointerSize = (TT.getArch() == Triple::amdgcn) ? 8 : 4;
HasSingleParameterDotFile = false;
//===------------------------------------------------------------------===//
MinInstAlignment = 4;
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7268131396dc..dd867b15b4c7 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -461,6 +461,13 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
+ } else {
+ setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
+ setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
+ }
+
+ for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
+ setOperationAction(ISD::SELECT, VT, Custom);
}
setTargetDAGCombine(ISD::FADD);
@@ -2191,6 +2198,28 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
}
+ case ISD::SELECT: {
+ SDLoc SL(N);
+ EVT VT = N->getValueType(0);
+ EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
+ SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
+ SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
+
+ EVT SelectVT = NewVT;
+ if (NewVT.bitsLT(MVT::i32)) {
+ LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
+ RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
+ SelectVT = MVT::i32;
+ }
+
+ SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
+ N->getOperand(0), LHS, RHS);
+
+ if (NewVT != SelectVT)
+ NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
+ Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
+ return;
+ }
default:
break;
}
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index a84f3e274f82..810fb05984c4 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -133,14 +133,12 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
AMDGPUBufferPseudoSourceValue BufferPSV;
AMDGPUImagePseudoSourceValue ImagePSV;
-public:
- // FIXME: Make private
+private:
unsigned LDSWaveSpillSize;
unsigned ScratchOffsetReg;
unsigned NumUserSGPRs;
unsigned NumSystemSGPRs;
-private:
bool HasSpilledSGPRs;
bool HasSpilledVGPRs;
bool HasNonSpillStackObjects;
@@ -535,6 +533,10 @@ public:
llvm_unreachable("unexpected dimension");
}
+ unsigned getLDSWaveSpillSize() const {
+ return LDSWaveSpillSize;
+ }
+
const AMDGPUBufferPseudoSourceValue *getBufferPSV() const {
return &BufferPSV;
}
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 36d4df52ff0e..098c67252dd8 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -124,7 +124,7 @@ unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
unsigned RegCount = ST.getMaxNumSGPRs(MF);
unsigned Reg;
- // Try to place it in a hole after PrivateSegmentbufferReg.
+ // Try to place it in a hole after PrivateSegmentBufferReg.
if (RegCount & 3) {
// We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
// alignment constraints, so we have a hole where can put the wave offset.
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index 57f9d1c6b610..005b74a68af3 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -67,8 +67,9 @@ def FeatureFullFP16 : SubtargetFeature<"fullfp16", "HasFullFP16", "true",
[FeatureFPARMv8]>;
def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true",
"Restrict FP to 16 double registers">;
-def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true",
- "Enable divide instructions">;
+def FeatureHWDivThumb : SubtargetFeature<"hwdiv", "HasHardwareDivideInThumb",
+ "true",
+ "Enable divide instructions in Thumb">;
def FeatureHWDivARM : SubtargetFeature<"hwdiv-arm",
"HasHardwareDivideInARM", "true",
"Enable divide instructions in ARM mode">;
@@ -225,7 +226,7 @@ def FeatureMP : SubtargetFeature<"mp", "HasMPExtension", "true",
def FeatureVirtualization : SubtargetFeature<"virtualization",
"HasVirtualization", "true",
"Supports Virtualization extension",
- [FeatureHWDiv, FeatureHWDivARM]>;
+ [FeatureHWDivThumb, FeatureHWDivARM]>;
// M-series ISA
def FeatureMClass : SubtargetFeature<"mclass", "ARMProcClass", "MClass",
@@ -433,21 +434,21 @@ def ARMv7ve : Architecture<"armv7ve", "ARMv7ve", [HasV7Ops,
def ARMv7r : Architecture<"armv7-r", "ARMv7r", [HasV7Ops,
FeatureDB,
FeatureDSP,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureRClass]>;
def ARMv7m : Architecture<"armv7-m", "ARMv7m", [HasV7Ops,
FeatureThumb2,
FeatureNoARM,
FeatureDB,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureMClass]>;
def ARMv7em : Architecture<"armv7e-m", "ARMv7em", [HasV7Ops,
FeatureThumb2,
FeatureNoARM,
FeatureDB,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureMClass,
FeatureDSP]>;
@@ -502,7 +503,7 @@ def ARMv8mBaseline : Architecture<"armv8-m.base", "ARMv8mBaseline",
[HasV8MBaselineOps,
FeatureNoARM,
FeatureDB,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureV7Clrex,
Feature8MSecExt,
FeatureAcquireRelease,
@@ -512,7 +513,7 @@ def ARMv8mMainline : Architecture<"armv8-m.main", "ARMv8mMainline",
[HasV8MMainlineOps,
FeatureNoARM,
FeatureDB,
- FeatureHWDiv,
+ FeatureHWDivThumb,
Feature8MSecExt,
FeatureAcquireRelease,
FeatureMClass]>;
@@ -678,7 +679,7 @@ def : ProcessorModel<"krait", CortexA9Model, [ARMv7a, ProcKrait,
FeatureFP16,
FeatureAvoidPartialCPSR,
FeatureVFP4,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM]>;
def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift,
@@ -686,7 +687,7 @@ def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift,
FeatureNEONForFP,
FeatureVFP4,
FeatureMP,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp,
@@ -768,39 +769,39 @@ def : ProcNoItin<"cortex-m33", [ARMv8mMainline,
FeatureVFPOnlySP]>;
def : ProcNoItin<"cortex-a32", [ARMv8a,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a35", [ARMv8a, ProcA35,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a53", [ARMv8a, ProcA53,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC,
FeatureFPAO]>;
def : ProcNoItin<"cortex-a57", [ARMv8a, ProcA57,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC,
FeatureFPAO]>;
def : ProcNoItin<"cortex-a72", [ARMv8a, ProcA72,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"cortex-a73", [ARMv8a, ProcA73,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
@@ -811,7 +812,7 @@ def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
FeatureNEONForFP,
FeatureVFP4,
FeatureMP,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp,
@@ -820,25 +821,25 @@ def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
FeatureZCZeroing]>;
def : ProcNoItin<"exynos-m1", [ARMv8a, ProcExynosM1,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"exynos-m2", [ARMv8a, ProcExynosM1,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"exynos-m3", [ARMv8a, ProcExynosM1,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
def : ProcNoItin<"kryo", [ARMv8a, ProcKryo,
- FeatureHWDiv,
+ FeatureHWDivThumb,
FeatureHWDivARM,
FeatureCrypto,
FeatureCRC]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index eb0d410b596b..14e197f477f1 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -589,12 +589,6 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
ATS.finishAttributeSection();
}
-static bool isV8M(const ARMSubtarget *Subtarget) {
- // Note that v8M Baseline is a subset of v6T2!
- return (Subtarget->hasV8MBaselineOps() && !Subtarget->hasV6T2Ops()) ||
- Subtarget->hasV8MMainlineOps();
-}
-
//===----------------------------------------------------------------------===//
// Helper routines for EmitStartOfAsmFile() and EmitEndOfAsmFile()
// FIXME:
@@ -602,39 +596,6 @@ static bool isV8M(const ARMSubtarget *Subtarget) {
// to appear in the .ARM.attributes section in ELF.
// Instead of subclassing the MCELFStreamer, we do the work here.
-static ARMBuildAttrs::CPUArch getArchForCPU(StringRef CPU,
- const ARMSubtarget *Subtarget) {
- if (CPU == "xscale")
- return ARMBuildAttrs::v5TEJ;
-
- if (Subtarget->hasV8Ops()) {
- if (Subtarget->isRClass())
- return ARMBuildAttrs::v8_R;
- return ARMBuildAttrs::v8_A;
- } else if (Subtarget->hasV8MMainlineOps())
- return ARMBuildAttrs::v8_M_Main;
- else if (Subtarget->hasV7Ops()) {
- if (Subtarget->isMClass() && Subtarget->hasDSP())
- return ARMBuildAttrs::v7E_M;
- return ARMBuildAttrs::v7;
- } else if (Subtarget->hasV6T2Ops())
- return ARMBuildAttrs::v6T2;
- else if (Subtarget->hasV8MBaselineOps())
- return ARMBuildAttrs::v8_M_Base;
- else if (Subtarget->hasV6MOps())
- return ARMBuildAttrs::v6S_M;
- else if (Subtarget->hasV6Ops())
- return ARMBuildAttrs::v6;
- else if (Subtarget->hasV5TEOps())
- return ARMBuildAttrs::v5TE;
- else if (Subtarget->hasV5TOps())
- return ARMBuildAttrs::v5T;
- else if (Subtarget->hasV4TOps())
- return ARMBuildAttrs::v4T;
- else
- return ARMBuildAttrs::v4;
-}
-
// Returns true if all functions have the same function attribute value.
// It also returns true when the module has no functions.
static bool checkFunctionsAttributeConsistency(const Module &M, StringRef Attr,
@@ -671,89 +632,8 @@ void ARMAsmPrinter::emitAttributes() {
static_cast<const ARMBaseTargetMachine &>(TM);
const ARMSubtarget STI(TT, CPU, ArchFS, ATM, ATM.isLittleEndian());
- const std::string &CPUString = STI.getCPUString();
-
- if (!StringRef(CPUString).startswith("generic")) {
- // FIXME: remove krait check when GNU tools support krait cpu
- if (STI.isKrait()) {
- ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
- // We consider krait as a "cortex-a9" + hwdiv CPU
- // Enable hwdiv through ".arch_extension idiv"
- if (STI.hasDivide() || STI.hasDivideInARMMode())
- ATS.emitArchExtension(ARM::AEK_HWDIV | ARM::AEK_HWDIVARM);
- } else
- ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
- }
-
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(CPUString, &STI));
-
- // Tag_CPU_arch_profile must have the default value of 0 when "Architecture
- // profile is not applicable (e.g. pre v7, or cross-profile code)".
- if (STI.hasV7Ops() || isV8M(&STI)) {
- if (STI.isAClass()) {
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::ApplicationProfile);
- } else if (STI.isRClass()) {
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::RealTimeProfile);
- } else if (STI.isMClass()) {
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::MicroControllerProfile);
- }
- }
-
- ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use,
- STI.hasARMOps() ? ARMBuildAttrs::Allowed
- : ARMBuildAttrs::Not_Allowed);
- if (isV8M(&STI)) {
- ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::AllowThumbDerived);
- } else if (STI.isThumb1Only()) {
- ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
- } else if (STI.hasThumb2()) {
- ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
- ARMBuildAttrs::AllowThumb32);
- }
-
- if (STI.hasNEON()) {
- /* NEON is not exactly a VFP architecture, but GAS emit one of
- * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
- if (STI.hasFPARMv8()) {
- if (STI.hasCrypto())
- ATS.emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8);
- else
- ATS.emitFPU(ARM::FK_NEON_FP_ARMV8);
- } else if (STI.hasVFP4())
- ATS.emitFPU(ARM::FK_NEON_VFPV4);
- else
- ATS.emitFPU(STI.hasFP16() ? ARM::FK_NEON_FP16 : ARM::FK_NEON);
- // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
- if (STI.hasV8Ops())
- ATS.emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
- STI.hasV8_1aOps() ? ARMBuildAttrs::AllowNeonARMv8_1a:
- ARMBuildAttrs::AllowNeonARMv8);
- } else {
- if (STI.hasFPARMv8())
- // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
- // FPU, but there are two different names for it depending on the CPU.
- ATS.emitFPU(STI.hasD16()
- ? (STI.isFPOnlySP() ? ARM::FK_FPV5_SP_D16 : ARM::FK_FPV5_D16)
- : ARM::FK_FP_ARMV8);
- else if (STI.hasVFP4())
- ATS.emitFPU(STI.hasD16()
- ? (STI.isFPOnlySP() ? ARM::FK_FPV4_SP_D16 : ARM::FK_VFPV4_D16)
- : ARM::FK_VFPV4);
- else if (STI.hasVFP3())
- ATS.emitFPU(STI.hasD16()
- // +d16
- ? (STI.isFPOnlySP()
- ? (STI.hasFP16() ? ARM::FK_VFPV3XD_FP16 : ARM::FK_VFPV3XD)
- : (STI.hasFP16() ? ARM::FK_VFPV3_D16_FP16 : ARM::FK_VFPV3_D16))
- // -d16
- : (STI.hasFP16() ? ARM::FK_VFPV3_FP16 : ARM::FK_VFPV3));
- else if (STI.hasVFP2())
- ATS.emitFPU(ARM::FK_VFPV2);
- }
+ // Emit build attributes for the available hardware.
+ ATS.emitTargetAttributes(STI);
// RW data addressing.
if (isPositionIndependent()) {
@@ -846,32 +726,15 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model,
ARMBuildAttrs::AllowIEEE754);
- if (STI.allowsUnalignedMem())
- ATS.emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
- ARMBuildAttrs::Allowed);
- else
- ATS.emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
- ARMBuildAttrs::Not_Allowed);
-
// FIXME: add more flags to ARMBuildAttributes.h
// 8-bytes alignment stuff.
ATS.emitAttribute(ARMBuildAttrs::ABI_align_needed, 1);
ATS.emitAttribute(ARMBuildAttrs::ABI_align_preserved, 1);
- // ABI_HardFP_use attribute to indicate single precision FP.
- if (STI.isFPOnlySP())
- ATS.emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
- ARMBuildAttrs::HardFPSinglePrecision);
-
// Hard float. Use both S and D registers and conform to AAPCS-VFP.
if (STI.isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard)
ATS.emitAttribute(ARMBuildAttrs::ABI_VFP_args, ARMBuildAttrs::HardFPAAPCS);
- // FIXME: Should we signal R9 usage?
-
- if (STI.hasFP16())
- ATS.emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
-
// FIXME: To support emitting this build attribute as GCC does, the
// -mfp16-format option and associated plumbing must be
// supported. For now the __fp16 type is exposed by default, so this
@@ -879,21 +742,6 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_16bit_format,
ARMBuildAttrs::FP16FormatIEEE);
- if (STI.hasMPExtension())
- ATS.emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
-
- // Hardware divide in ARM mode is part of base arch, starting from ARMv8.
- // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
- // It is not possible to produce DisallowDIV: if hwdiv is present in the base
- // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
- // AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
- // otherwise, the default value (AllowDIVIfExists) applies.
- if (STI.hasDivideInARMMode() && !STI.hasV8Ops())
- ATS.emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
-
- if (STI.hasDSP() && isV8M(&STI))
- ATS.emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
-
if (MMI) {
if (const Module *SourceModule = MMI->getModule()) {
// ABI_PCS_wchar_t to indicate wchar_t width
@@ -930,16 +778,6 @@ void ARMAsmPrinter::emitAttributes() {
else
ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_R9_use,
ARMBuildAttrs::R9IsGPR);
-
- if (STI.hasTrustZone() && STI.hasVirtualization())
- ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
- ARMBuildAttrs::AllowTZVirtualization);
- else if (STI.hasTrustZone())
- ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
- ARMBuildAttrs::AllowTZ);
- else if (STI.hasVirtualization())
- ATS.emitAttribute(ARMBuildAttrs::Virtualization_use,
- ARMBuildAttrs::AllowVirtualization);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 23777b821f9f..faf1c631a3a7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -404,6 +404,29 @@ public:
/// Returns true if the instruction has a shift by immediate that can be
/// executed in one cycle less.
bool isSwiftFastImmShift(const MachineInstr *MI) const;
+
+ /// Returns predicate register associated with the given frame instruction.
+ unsigned getFramePred(const MachineInstr &MI) const {
+ assert(isFrameInstr(MI));
+ if (isFrameSetup(MI))
+ // Operands of ADJCALLSTACKDOWN:
+ // - argument declared in ADJCALLSTACKDOWN pattern:
+ // 0 - frame size
+ // 1 - predicate code (like ARMCC::AL)
+ // - added by predOps:
+ // 2 - predicate reg
+ return MI.getOperand(2).getReg();
+ assert(MI.getOpcode() == ARM::ADJCALLSTACKUP ||
+ MI.getOpcode() == ARM::tADJCALLSTACKUP);
+ // Operands of ADJCALLSTACKUP:
+ // - argument declared in ADJCALLSTACKUP pattern:
+ // 0 - frame size
+ // 1 - arg of CALLSEQ_END
+ // 2 - predicate code
+ // - added by predOps:
+ // 3 - predicate reg
+ return MI.getOperand(3).getReg();
+ }
};
/// Get the operands corresponding to the given \p Pred value. By default, the
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
index 7a7b7fede7c8..bc7afdb7f1c9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -273,9 +273,9 @@ def CSR_iOS_SwiftError : CalleeSavedRegs<(sub CSR_iOS, R8)>;
def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4,
(sub CSR_AAPCS_ThisReturn, R9))>;
-def CSR_iOS_TLSCall : CalleeSavedRegs<(add LR, SP,
- (sequence "R%u", 12, 1),
- (sequence "D%u", 31, 0))>;
+def CSR_iOS_TLSCall
+ : CalleeSavedRegs<(add LR, SP, (sub(sequence "R%u", 12, 1), R9, R12),
+ (sequence "D%u", 31, 0))>;
// C++ TLS access function saves all registers except SP. Try to match
// the order of CSRs in CSR_iOS.
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 23722f1b7f3f..6434df317aa8 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -1741,10 +1741,9 @@ bool ARMConstantIslands::undoLRSpillRestore() {
.add(MI->getOperand(1));
MI->eraseFromParent();
MadeChange = true;
- }
- if (MI->getOpcode() == ARM::tPUSH &&
- MI->getOperand(2).getReg() == ARM::LR &&
- MI->getNumExplicitOperands() == 3) {
+ } else if (MI->getOpcode() == ARM::tPUSH &&
+ MI->getOperand(2).getReg() == ARM::LR &&
+ MI->getNumExplicitOperands() == 3) {
// Just remove the push.
MI->eraseFromParent();
MadeChange = true;
@@ -2158,6 +2157,15 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
// If we're in PIC mode, there should be another ADD following.
auto *TRI = STI->getRegisterInfo();
+
+ // %base cannot be redefined after the load as it will appear before
+ // TBB/TBH like:
+ // %base =
+ // %base =
+ // tBB %base, %idx
+ if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
+ continue;
+
if (isPositionIndependentOrROPI) {
MachineInstr *Add = Load->getNextNode();
if (Add->getOpcode() != ARM::tADDrr ||
diff --git a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
index 01e062bd185c..e9bc7db66fa4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -1702,7 +1702,8 @@ bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
// If we have integer div support we should have selected this automagically.
// In case we have a real miss go ahead and return false and we'll pick
// it up later.
- if (Subtarget->hasDivide()) return false;
+ if (Subtarget->hasDivideInThumbMode())
+ return false;
// Otherwise emit a libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 37be22bed540..70dbe1bc5b95 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -322,6 +322,18 @@ static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI,
}
}
+/// We need the offset of the frame pointer relative to other MachineFrameInfo
+/// offsets which are encoded relative to SP at function begin.
+/// See also emitPrologue() for how the FP is set up.
+/// Unfortunately we cannot determine this value in determineCalleeSaves() yet
+/// as assignCalleeSavedSpillSlots() hasn't run at this point. Instead we use
+/// this to produce a conservative estimate that we check in an assert() later.
+static int getMaxFPOffset(const Function &F, const ARMFunctionInfo &AFI) {
+ // This is a conservative estimation: Assume the frame pointer being r7 and
+ // pc("r15") up to r8 getting spilled before (= 8 registers).
+ return -AFI.getArgRegsSaveSize() - (8 * 4);
+}
+
void ARMFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -432,8 +444,10 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
unsigned DPRCSOffset = GPRCS2Offset - DPRGapSize - DPRCSSize;
int FramePtrOffsetInPush = 0;
if (HasFP) {
- FramePtrOffsetInPush =
- MFI.getObjectOffset(FramePtrSpillFI) + ArgRegsSaveSize;
+ int FPOffset = MFI.getObjectOffset(FramePtrSpillFI);
+ assert(getMaxFPOffset(*MF.getFunction(), *AFI) <= FPOffset &&
+ "Max FP estimation is wrong");
+ FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize;
AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) +
NumBytes);
}
@@ -1700,6 +1714,14 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
// worth the effort and added fragility?
unsigned EstimatedStackSize =
MFI.estimateStackSize(MF) + 4 * (NumGPRSpills + NumFPRSpills);
+
+ // Determine biggest (positive) SP offset in MachineFrameInfo.
+ int MaxFixedOffset = 0;
+ for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
+ int MaxObjectOffset = MFI.getObjectOffset(I) + MFI.getObjectSize(I);
+ MaxFixedOffset = std::max(MaxFixedOffset, MaxObjectOffset);
+ }
+
bool HasFP = hasFP(MF);
if (HasFP) {
if (AFI->hasStackFrame())
@@ -1707,15 +1729,20 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
} else {
// If FP is not used, SP will be used to access arguments, so count the
// size of arguments into the estimation.
- EstimatedStackSize += AFI->getArgumentStackSize();
+ EstimatedStackSize += MaxFixedOffset;
}
EstimatedStackSize += 16; // For possible paddings.
- bool BigStack = EstimatedStackSize >= estimateRSStackSizeLimit(MF, this) ||
- MFI.hasVarSizedObjects() ||
- (MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF));
+ unsigned EstimatedRSStackSizeLimit = estimateRSStackSizeLimit(MF, this);
+ int MaxFPOffset = getMaxFPOffset(*MF.getFunction(), *AFI);
+ bool BigFrameOffsets = EstimatedStackSize >= EstimatedRSStackSizeLimit ||
+ MFI.hasVarSizedObjects() ||
+ (MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)) ||
+ // For large argument stacks fp relative addressed may overflow.
+ (HasFP && (MaxFixedOffset - MaxFPOffset) >= (int)EstimatedRSStackSizeLimit);
bool ExtraCSSpill = false;
- if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) {
+ if (BigFrameOffsets ||
+ !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) {
AFI->setHasStackFrame(true);
if (HasFP) {
@@ -1899,7 +1926,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
// callee-saved register or reserve a special spill slot to facilitate
// register scavenging. Thumb1 needs a spill slot for stack pointer
// adjustments also, even when the frame itself is small.
- if (BigStack && !ExtraCSSpill) {
+ if (BigFrameOffsets && !ExtraCSSpill) {
// If any non-reserved CS register isn't spilled, just spill one or two
// extra. That should take care of it!
unsigned NumExtras = TargetAlign / 4;
@@ -1958,7 +1985,7 @@ MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr(
// ADJCALLSTACKUP -> add, sp, sp, amount
MachineInstr &Old = *I;
DebugLoc dl = Old.getDebugLoc();
- unsigned Amount = Old.getOperand(0).getImm();
+ unsigned Amount = TII.getFrameSize(Old);
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
@@ -1976,14 +2003,11 @@ MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr(
ARMCC::CondCodes Pred =
(PIdx == -1) ? ARMCC::AL
: (ARMCC::CondCodes)Old.getOperand(PIdx).getImm();
+ unsigned PredReg = TII.getFramePred(Old);
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
- // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
- unsigned PredReg = Old.getOperand(2).getReg();
emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags,
Pred, PredReg);
} else {
- // Note: PredReg is operand 3 for ADJCALLSTACKUP.
- unsigned PredReg = Old.getOperand(3).getReg();
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags,
Pred, PredReg);
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index b07b4e1f5cfb..e9df9449103c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -228,11 +228,6 @@ private:
const uint16_t *DOpcodes,
const uint16_t *QOpcodes = nullptr);
- /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
- /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
- /// generated to force the table registers to be consecutive.
- void SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
-
/// Try to select SBFX/UBFX instructions for ARM.
bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
@@ -544,11 +539,11 @@ bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
SDValue NewMulConst;
if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) {
HandleSDNode Handle(N);
+ SDLoc Loc(N);
replaceDAGValue(N.getOperand(1), NewMulConst);
BaseReg = Handle.getValue();
- Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ARM_AM::lsl,
- PowerOfTwo),
- SDLoc(N), MVT::i32);
+ Opc = CurDAG->getTargetConstant(
+ ARM_AM::getSORegOpc(ARM_AM::lsl, PowerOfTwo), Loc, MVT::i32);
return true;
}
}
@@ -1859,6 +1854,14 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
return Opc; // If not one we handle, return it unchanged.
}
+/// Returns true if the given increment is a Constant known to be equal to the
+/// access size performed by a NEON load/store. This means the "[rN]!" form can
+/// be used.
+static bool isPerfectIncrement(SDValue Inc, EVT VecTy, unsigned NumVecs) {
+ auto C = dyn_cast<ConstantSDNode>(Inc);
+ return C && C->getZExtValue() == VecTy.getSizeInBits() / 8 * NumVecs;
+}
+
void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
const uint16_t *DOpcodes,
const uint16_t *QOpcodes0,
@@ -1926,13 +1929,13 @@ void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
SDValue Inc = N->getOperand(AddrOpIdx + 1);
// FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
// case entirely when the rest are updated to that form, too.
- if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode()))
+ bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
+ if ((NumVecs <= 2) && !IsImmUpdate)
Opc = getVLDSTRegisterUpdateOpcode(Opc);
// FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
// check for that explicitly too. Horribly hacky, but temporary.
- if ((NumVecs > 2 && !isVLDfixed(Opc)) ||
- !isa<ConstantSDNode>(Inc.getNode()))
- Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ if ((NumVecs > 2 && !isVLDfixed(Opc)) || !IsImmUpdate)
+ Ops.push_back(IsImmUpdate ? Reg0 : Inc);
}
Ops.push_back(Pred);
Ops.push_back(Reg0);
@@ -2080,11 +2083,12 @@ void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
SDValue Inc = N->getOperand(AddrOpIdx + 1);
// FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
// case entirely when the rest are updated to that form, too.
- if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
+ bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
+ if (NumVecs <= 2 && !IsImmUpdate)
Opc = getVLDSTRegisterUpdateOpcode(Opc);
// FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
// check for that explicitly too. Horribly hacky, but temporary.
- if (!isa<ConstantSDNode>(Inc.getNode()))
+ if (!IsImmUpdate)
Ops.push_back(Inc);
else if (NumVecs > 2 && !isVSTfixed(Opc))
Ops.push_back(Reg0);
@@ -2214,7 +2218,9 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
Ops.push_back(Align);
if (isUpdating) {
SDValue Inc = N->getOperand(AddrOpIdx + 1);
- Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ bool IsImmUpdate =
+ isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
+ Ops.push_back(IsImmUpdate ? Reg0 : Inc);
}
SDValue SuperReg;
@@ -2318,9 +2324,11 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
// fixed-stride update instructions don't have an explicit writeback
// operand. It's implicit in the opcode itself.
SDValue Inc = N->getOperand(2);
- if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
+ bool IsImmUpdate =
+ isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
+ if (NumVecs <= 2 && !IsImmUpdate)
Opc = getVLDSTRegisterUpdateOpcode(Opc);
- if (!isa<ConstantSDNode>(Inc.getNode()))
+ if (!IsImmUpdate)
Ops.push_back(Inc);
// FIXME: VLD3 and VLD4 haven't been updated to that form yet.
else if (NumVecs > 2)
@@ -2356,39 +2364,6 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
CurDAG->RemoveDeadNode(N);
}
-void ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
- unsigned Opc) {
- assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
- SDLoc dl(N);
- EVT VT = N->getValueType(0);
- unsigned FirstTblReg = IsExt ? 2 : 1;
-
- // Form a REG_SEQUENCE to force register allocation.
- SDValue RegSeq;
- SDValue V0 = N->getOperand(FirstTblReg + 0);
- SDValue V1 = N->getOperand(FirstTblReg + 1);
- if (NumVecs == 2)
- RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
- else {
- SDValue V2 = N->getOperand(FirstTblReg + 2);
- // If it's a vtbl3, form a quad D-register and leave the last part as
- // an undef.
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
- : N->getOperand(FirstTblReg + 3);
- RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
- }
-
- SmallVector<SDValue, 6> Ops;
- if (IsExt)
- Ops.push_back(N->getOperand(1));
- Ops.push_back(RegSeq);
- Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
- Ops.push_back(getAL(CurDAG, dl)); // predicate
- Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
- ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
-}
-
bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
if (!Subtarget->hasV6T2Ops())
return false;
@@ -3730,59 +3705,6 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
break;
}
- case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
- switch (IntNo) {
- default:
- break;
-
- case Intrinsic::arm_neon_vtbl2:
- SelectVTBL(N, false, 2, ARM::VTBL2);
- return;
- case Intrinsic::arm_neon_vtbl3:
- SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
- return;
- case Intrinsic::arm_neon_vtbl4:
- SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
- return;
-
- case Intrinsic::arm_neon_vtbx2:
- SelectVTBL(N, true, 2, ARM::VTBX2);
- return;
- case Intrinsic::arm_neon_vtbx3:
- SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
- return;
- case Intrinsic::arm_neon_vtbx4:
- SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
- return;
- }
- break;
- }
-
- case ARMISD::VTBL1: {
- SDLoc dl(N);
- EVT VT = N->getValueType(0);
- SDValue Ops[] = {N->getOperand(0), N->getOperand(1),
- getAL(CurDAG, dl), // Predicate
- CurDAG->getRegister(0, MVT::i32)}; // Predicate Register
- ReplaceNode(N, CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops));
- return;
- }
- case ARMISD::VTBL2: {
- SDLoc dl(N);
- EVT VT = N->getValueType(0);
-
- // Form a REG_SEQUENCE to force register allocation.
- SDValue V0 = N->getOperand(0);
- SDValue V1 = N->getOperand(1);
- SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
-
- SDValue Ops[] = {RegSeq, N->getOperand(2), getAL(CurDAG, dl), // Predicate
- CurDAG->getRegister(0, MVT::i32)}; // Predicate Register
- ReplaceNode(N, CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops));
- return;
- }
-
case ISD::ATOMIC_CMP_SWAP:
SelectCMP_SWAP(N);
return;
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e697c8ca5339..165e9b7378c7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -852,7 +852,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
if (!Subtarget->hasV6Ops())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide()
+ bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
: Subtarget->hasDivideInARMMode();
if (!hasDivide) {
// These are expanded into libcalls if the cpu doesn't have HW divider.
@@ -860,7 +860,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UDIV, MVT::i32, LibCall);
}
- if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) {
+ if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
setOperationAction(ISD::SDIV, MVT::i32, Custom);
setOperationAction(ISD::UDIV, MVT::i32, Custom);
@@ -2633,7 +2633,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
return true;
}
-bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
if (!Subtarget->supportsTailCall())
return false;
@@ -3347,6 +3347,12 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
}
+ case Intrinsic::arm_neon_vtbl1:
+ return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::arm_neon_vtbl2:
+ return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
}
}
@@ -10867,11 +10873,8 @@ static SDValue CombineBaseUpdate(SDNode *N,
// If the increment is a constant, it must match the memory ref size.
SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
- if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
- uint64_t IncVal = CInc->getZExtValue();
- if (IncVal != NumBytes)
- continue;
- } else if (NumBytes >= 3 * 16) {
+ ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
+ if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
// VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
// separate instructions that make it harder to use a non-constant update.
continue;
@@ -11688,34 +11691,6 @@ static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero,
- APInt &KnownOne) {
- if (Op.getOpcode() == ARMISD::BFI) {
- // Conservatively, we can recurse down the first operand
- // and just mask out all affected bits.
- computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne);
-
- // The operand to BFI is already a mask suitable for removing the bits it
- // sets.
- ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
- const APInt &Mask = CI->getAPIntValue();
- KnownZero &= Mask;
- KnownOne &= Mask;
- return;
- }
- if (Op.getOpcode() == ARMISD::CMOV) {
- APInt KZ2(KnownZero.getBitWidth(), 0);
- APInt KO2(KnownOne.getBitWidth(), 0);
- computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne);
- computeKnownBits(DAG, Op.getOperand(1), KZ2, KO2);
-
- KnownZero &= KZ2;
- KnownOne &= KO2;
- return;
- }
- return DAG.computeKnownBits(Op, KnownZero, KnownOne);
-}
-
SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
// If we have a CMOV, OR and AND combination such as:
// if (x & CN)
@@ -11777,7 +11752,7 @@ SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &D
// Lastly, can we determine that the bits defined by OrCI
// are zero in Y?
APInt KnownZero, KnownOne;
- computeKnownBits(DAG, Y, KnownZero, KnownOne);
+ DAG.computeKnownBits(Y, KnownZero, KnownOne);
if ((OrCI & KnownZero) != OrCI)
return SDValue();
@@ -12657,6 +12632,19 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
}
}
+ case ARMISD::BFI: {
+ // Conservatively, we can recurse down the first operand
+ // and just mask out all affected bits.
+ DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1);
+
+ // The operand to BFI is already a mask suitable for removing the bits it
+ // sets.
+ ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
+ const APInt &Mask = CI->getAPIntValue();
+ KnownZero &= Mask;
+ KnownOne &= Mask;
+ return;
+ }
}
}
@@ -13052,7 +13040,9 @@ SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
// rem = a - b * div
// return {div, rem}
// This should be lowered into UDIV/SDIV + MLS later on.
- if (Subtarget->hasDivide() && Op->getValueType(0).isSimple() &&
+ bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
+ : Subtarget->hasDivideInARMMode();
+ if (hasDivide && Op->getValueType(0).isSimple() &&
Op->getSimpleValueType(0) == MVT::i32) {
unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
const SDValue Dividend = Op->getOperand(0);
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 70a0b1380ec9..8b54ce430ed2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -717,7 +717,7 @@ class InstrItineraryData;
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
SDValue ARMcc, SDValue CCR, SDValue Cmp,
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index cc0e7d4d9c35..703e8071b177 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -259,8 +259,8 @@ def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16","half-float conversions">;
def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
AssemblerPredicate<"FeatureFullFP16","full half-float">;
-def HasDivide : Predicate<"Subtarget->hasDivide()">,
- AssemblerPredicate<"FeatureHWDiv", "divide in THUMB">;
+def HasDivideInThumb : Predicate<"Subtarget->hasDivideInThumbMode()">,
+ AssemblerPredicate<"FeatureHWDivThumb", "divide in THUMB">;
def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">,
AssemblerPredicate<"FeatureHWDivARM", "divide in ARM">;
def HasDSP : Predicate<"Subtarget->hasDSP()">,
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 681e235d78f0..9b08c612e16b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -587,6 +587,14 @@ def SDTARMVMULL : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVMULL>;
def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVMULL>;
+def SDTARMVTBL1 : SDTypeProfile<1, 2, [SDTCisVT<0, v8i8>, SDTCisVT<1, v8i8>,
+ SDTCisVT<2, v8i8>]>;
+def SDTARMVTBL2 : SDTypeProfile<1, 3, [SDTCisVT<0, v8i8>, SDTCisVT<1, v8i8>,
+ SDTCisVT<2, v8i8>, SDTCisVT<3, v8i8>]>;
+def NEONvtbl1 : SDNode<"ARMISD::VTBL1", SDTARMVTBL1>;
+def NEONvtbl2 : SDNode<"ARMISD::VTBL2", SDTARMVTBL2>;
+
+
def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{
ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
unsigned EltBits = 0;
@@ -6443,7 +6451,8 @@ def VTBL1
: N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd),
(ins VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
"vtbl", "8", "$Vd, $Vn, $Vm", "",
- [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 VecListOneD:$Vn, DPR:$Vm)))]>;
+ [(set DPR:$Vd, (v8i8 (NEONvtbl1 VecListOneD:$Vn, DPR:$Vm)))]>;
+
let hasExtraSrcRegAllocReq = 1 in {
def VTBL2
: N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd),
@@ -6498,6 +6507,49 @@ def VTBX4Pseudo
IIC_VTBX4, "$orig = $dst", []>;
} // DecoderMethod = "DecodeTBLInstruction"
+def : Pat<(v8i8 (NEONvtbl2 v8i8:$Vn0, v8i8:$Vn1, v8i8:$Vm)),
+ (v8i8 (VTBL2 (REG_SEQUENCE DPair, v8i8:$Vn0, dsub_0,
+ v8i8:$Vn1, dsub_1),
+ v8i8:$Vm))>;
+def : Pat<(v8i8 (int_arm_neon_vtbx2 v8i8:$orig, v8i8:$Vn0, v8i8:$Vn1,
+ v8i8:$Vm)),
+ (v8i8 (VTBX2 v8i8:$orig,
+ (REG_SEQUENCE DPair, v8i8:$Vn0, dsub_0,
+ v8i8:$Vn1, dsub_1),
+ v8i8:$Vm))>;
+
+def : Pat<(v8i8 (int_arm_neon_vtbl3 v8i8:$Vn0, v8i8:$Vn1,
+ v8i8:$Vn2, v8i8:$Vm)),
+ (v8i8 (VTBL3Pseudo (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0,
+ v8i8:$Vn1, dsub_1,
+ v8i8:$Vn2, dsub_2,
+ (v8i8 (IMPLICIT_DEF)), dsub_3),
+ v8i8:$Vm))>;
+def : Pat<(v8i8 (int_arm_neon_vtbx3 v8i8:$orig, v8i8:$Vn0, v8i8:$Vn1,
+ v8i8:$Vn2, v8i8:$Vm)),
+ (v8i8 (VTBX3Pseudo v8i8:$orig,
+ (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0,
+ v8i8:$Vn1, dsub_1,
+ v8i8:$Vn2, dsub_2,
+ (v8i8 (IMPLICIT_DEF)), dsub_3),
+ v8i8:$Vm))>;
+
+def : Pat<(v8i8 (int_arm_neon_vtbl4 v8i8:$Vn0, v8i8:$Vn1,
+ v8i8:$Vn2, v8i8:$Vn3, v8i8:$Vm)),
+ (v8i8 (VTBL4Pseudo (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0,
+ v8i8:$Vn1, dsub_1,
+ v8i8:$Vn2, dsub_2,
+ v8i8:$Vn3, dsub_3),
+ v8i8:$Vm))>;
+def : Pat<(v8i8 (int_arm_neon_vtbx4 v8i8:$orig, v8i8:$Vn0, v8i8:$Vn1,
+ v8i8:$Vn2, v8i8:$Vn3, v8i8:$Vm)),
+ (v8i8 (VTBX4Pseudo v8i8:$orig,
+ (REG_SEQUENCE QQPR, v8i8:$Vn0, dsub_0,
+ v8i8:$Vn1, dsub_1,
+ v8i8:$Vn2, dsub_2,
+ v8i8:$Vn3, dsub_3),
+ v8i8:$Vm))>;
+
// VRINT : Vector Rounding
multiclass VRINT_FPI<string op, bits<3> op9_7, SDPatternOperator Int> {
let PostEncoderMethod = "NEONThumb2V8PostEncoder", DecoderNamespace = "v8NEON" in {
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index f5b673b78ad7..f710ee6a7e77 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -2797,7 +2797,7 @@ def t2SMLSLDX : T2DualHalfMulAddLong<0b101, 0b1101, "smlsldx">;
def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
"sdiv", "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>,
- Requires<[HasDivide, IsThumb, HasV8MBaseline]>,
+ Requires<[HasDivideInThumb, IsThumb, HasV8MBaseline]>,
Sched<[WriteDIV]> {
let Inst{31-27} = 0b11111;
let Inst{26-21} = 0b011100;
@@ -2809,7 +2809,7 @@ def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
"udiv", "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>,
- Requires<[HasDivide, IsThumb, HasV8MBaseline]>,
+ Requires<[HasDivideInThumb, IsThumb, HasV8MBaseline]>,
Sched<[WriteDIV]> {
let Inst{31-27} = 0b11111;
let Inst{26-21} = 0b011101;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index 8d224d6a70fa..816596b85721 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -299,6 +299,20 @@ bool ARMInstructionSelector::select(MachineInstr &I) const {
I.setDesc(TII.get(ARM::ADDrr));
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
break;
+ case G_SUB:
+ I.setDesc(TII.get(ARM::SUBrr));
+ MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
+ break;
+ case G_MUL:
+ if (TII.getSubtarget().hasV6Ops()) {
+ I.setDesc(TII.get(ARM::MUL));
+ } else {
+ assert(TII.getSubtarget().useMulOps() && "Unsupported target");
+ I.setDesc(TII.get(ARM::MULv5));
+ MIB->getOperand(0).setIsEarlyClobber(true);
+ }
+ MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
+ break;
case G_FADD:
if (!selectFAdd(MIB, TII, MRI))
return false;
diff --git a/contrib/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
index 994bbd673dd8..fe9681439e6b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -43,8 +43,9 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
setAction({Op, 1, p0}, Legal);
}
- for (auto Ty : {s1, s8, s16, s32})
- setAction({G_ADD, Ty}, Legal);
+ for (unsigned Op : {G_ADD, G_SUB, G_MUL})
+ for (auto Ty : {s1, s8, s16, s32})
+ setAction({Op, Ty}, Legal);
for (unsigned Op : {G_SEXT, G_ZEXT}) {
setAction({Op, s32}, Legal);
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
index 08f3da738868..e47bd3a8963e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
@@ -219,6 +219,8 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
switch (Opc) {
case G_ADD:
+ case G_SUB:
+ case G_MUL:
case G_SEXT:
case G_ZEXT:
case G_GEP:
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index 40993fc0aa8a..d2630685d91b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -208,8 +208,8 @@ protected:
/// FP registers for VFPv3.
bool HasD16 = false;
- /// HasHardwareDivide - True if subtarget supports [su]div
- bool HasHardwareDivide = false;
+ /// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode
+ bool HasHardwareDivideInThumb = false;
/// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
bool HasHardwareDivideInARM = false;
@@ -507,7 +507,7 @@ public:
return hasNEON() && UseNEONForSinglePrecisionFP;
}
- bool hasDivide() const { return HasHardwareDivide; }
+ bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; }
bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
bool hasDataBarrier() const { return HasDataBarrier; }
bool hasV7Clrex() const { return HasV7Clrex; }
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index f421d3ac1693..ada816c16389 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -67,6 +67,9 @@ static cl::opt<ImplicitItModeTy> ImplicitItMode(
clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
"Warn in ARM, emit implicit ITs in Thumb")));
+static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
+ cl::init(false));
+
class ARMOperand;
enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
@@ -540,6 +543,10 @@ public:
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ // Add build attributes based on the selected target.
+ if (AddBuildAttributes)
+ getTargetStreamer().emitTargetAttributes(STI);
+
// Not in an ITBlock to start with.
ITState.CurPosition = ~0U;
@@ -10189,8 +10196,8 @@ static const struct {
{ ARM::AEK_CRYPTO, Feature_HasV8,
{ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
{ ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
- { (ARM::AEK_HWDIV | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
- {ARM::FeatureHWDiv, ARM::FeatureHWDivARM} },
+ { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
+ {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
{ ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
{ ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
{ ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 6fa890ba1cd5..4d6c52f3cd49 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -464,7 +464,7 @@ public:
void emitUnwindRaw(int64_t Offset, const SmallVectorImpl<uint8_t> &Opcodes);
void ChangeSection(MCSection *Section, const MCExpr *Subsection) override {
- LastMappingSymbols[getPreviousSection().first] = std::move(LastEMSInfo);
+ LastMappingSymbols[getCurrentSection().first] = std::move(LastEMSInfo);
MCELFStreamer::ChangeSection(Section, Subsection);
auto LastMappingSymbol = LastMappingSymbols.find(Section);
if (LastMappingSymbol != LastMappingSymbols.end()) {
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
index 73e563890dd9..2b0cd461df7a 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
@@ -11,9 +11,13 @@
//
//===----------------------------------------------------------------------===//
+#include "ARMTargetMachine.h"
#include "llvm/MC/ConstantPools.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/Support/TargetParser.h"
using namespace llvm;
@@ -75,3 +79,179 @@ void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {}
void
ARMTargetStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) {}
void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {}
+
+static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) {
+ if (STI.getCPU() == "xscale")
+ return ARMBuildAttrs::v5TEJ;
+
+ if (STI.hasFeature(ARM::HasV8Ops)) {
+ if (STI.hasFeature(ARM::FeatureRClass))
+ return ARMBuildAttrs::v8_R;
+ return ARMBuildAttrs::v8_A;
+ } else if (STI.hasFeature(ARM::HasV8MMainlineOps))
+ return ARMBuildAttrs::v8_M_Main;
+ else if (STI.hasFeature(ARM::HasV7Ops)) {
+ if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP))
+ return ARMBuildAttrs::v7E_M;
+ return ARMBuildAttrs::v7;
+ } else if (STI.hasFeature(ARM::HasV6T2Ops))
+ return ARMBuildAttrs::v6T2;
+ else if (STI.hasFeature(ARM::HasV8MBaselineOps))
+ return ARMBuildAttrs::v8_M_Base;
+ else if (STI.hasFeature(ARM::HasV6MOps))
+ return ARMBuildAttrs::v6S_M;
+ else if (STI.hasFeature(ARM::HasV6Ops))
+ return ARMBuildAttrs::v6;
+ else if (STI.hasFeature(ARM::HasV5TEOps))
+ return ARMBuildAttrs::v5TE;
+ else if (STI.hasFeature(ARM::HasV5TOps))
+ return ARMBuildAttrs::v5T;
+ else if (STI.hasFeature(ARM::HasV4TOps))
+ return ARMBuildAttrs::v4T;
+ else
+ return ARMBuildAttrs::v4;
+}
+
+static bool isV8M(const MCSubtargetInfo &STI) {
+ // Note that v8M Baseline is a subset of v6T2!
+ return (STI.hasFeature(ARM::HasV8MBaselineOps) &&
+ !STI.hasFeature(ARM::HasV6T2Ops)) ||
+ STI.hasFeature(ARM::HasV8MMainlineOps);
+}
+
+/// Emit the build attributes that only depend on the hardware that we expect
+// /to be available, and not on the ABI, or any source-language choices.
+void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
+ switchVendor("aeabi");
+
+ const StringRef CPUString = STI.getCPU();
+ if (!CPUString.empty() && !CPUString.startswith("generic")) {
+ // FIXME: remove krait check when GNU tools support krait cpu
+ if (STI.hasFeature(ARM::ProcKrait)) {
+ emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
+ // We consider krait as a "cortex-a9" + hwdiv CPU
+ // Enable hwdiv through ".arch_extension idiv"
+ if (STI.hasFeature(ARM::FeatureHWDivThumb) ||
+ STI.hasFeature(ARM::FeatureHWDivARM))
+ emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM);
+ } else {
+ emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
+ }
+ }
+
+ emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI));
+
+ if (STI.hasFeature(ARM::FeatureAClass)) {
+ emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::ApplicationProfile);
+ } else if (STI.hasFeature(ARM::FeatureRClass)) {
+ emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::RealTimeProfile);
+ } else if (STI.hasFeature(ARM::FeatureMClass)) {
+ emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::MicroControllerProfile);
+ }
+
+ emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM)
+ ? ARMBuildAttrs::Not_Allowed
+ : ARMBuildAttrs::Allowed);
+
+ if (isV8M(STI)) {
+ emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::AllowThumbDerived);
+ } else if (STI.hasFeature(ARM::FeatureThumb2)) {
+ emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::AllowThumb32);
+ } else if (STI.hasFeature(ARM::HasV4TOps)) {
+ emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
+ }
+
+ if (STI.hasFeature(ARM::FeatureNEON)) {
+ /* NEON is not exactly a VFP architecture, but GAS emit one of
+ * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
+ if (STI.hasFeature(ARM::FeatureFPARMv8)) {
+ if (STI.hasFeature(ARM::FeatureCrypto))
+ emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8);
+ else
+ emitFPU(ARM::FK_NEON_FP_ARMV8);
+ } else if (STI.hasFeature(ARM::FeatureVFP4))
+ emitFPU(ARM::FK_NEON_VFPV4);
+ else
+ emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16
+ : ARM::FK_NEON);
+ // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
+ if (STI.hasFeature(ARM::HasV8Ops))
+ emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
+ STI.hasFeature(ARM::HasV8_1aOps)
+ ? ARMBuildAttrs::AllowNeonARMv8_1a
+ : ARMBuildAttrs::AllowNeonARMv8);
+ } else {
+ if (STI.hasFeature(ARM::FeatureFPARMv8))
+ // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
+ // FPU, but there are two different names for it depending on the CPU.
+ emitFPU(STI.hasFeature(ARM::FeatureD16)
+ ? (STI.hasFeature(ARM::FeatureVFPOnlySP) ? ARM::FK_FPV5_SP_D16
+ : ARM::FK_FPV5_D16)
+ : ARM::FK_FP_ARMV8);
+ else if (STI.hasFeature(ARM::FeatureVFP4))
+ emitFPU(STI.hasFeature(ARM::FeatureD16)
+ ? (STI.hasFeature(ARM::FeatureVFPOnlySP) ? ARM::FK_FPV4_SP_D16
+ : ARM::FK_VFPV4_D16)
+ : ARM::FK_VFPV4);
+ else if (STI.hasFeature(ARM::FeatureVFP3))
+ emitFPU(
+ STI.hasFeature(ARM::FeatureD16)
+ // +d16
+ ? (STI.hasFeature(ARM::FeatureVFPOnlySP)
+ ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16
+ : ARM::FK_VFPV3XD)
+ : (STI.hasFeature(ARM::FeatureFP16)
+ ? ARM::FK_VFPV3_D16_FP16
+ : ARM::FK_VFPV3_D16))
+ // -d16
+ : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16
+ : ARM::FK_VFPV3));
+ else if (STI.hasFeature(ARM::FeatureVFP2))
+ emitFPU(ARM::FK_VFPV2);
+ }
+
+ // ABI_HardFP_use attribute to indicate single precision FP.
+ if (STI.hasFeature(ARM::FeatureVFPOnlySP))
+ emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
+ ARMBuildAttrs::HardFPSinglePrecision);
+
+ if (STI.hasFeature(ARM::FeatureFP16))
+ emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
+
+ if (STI.hasFeature(ARM::FeatureMP))
+ emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
+
+ // Hardware divide in ARM mode is part of base arch, starting from ARMv8.
+ // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
+ // It is not possible to produce DisallowDIV: if hwdiv is present in the base
+ // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
+ // AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
+ // otherwise, the default value (AllowDIVIfExists) applies.
+ if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops))
+ emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
+
+ if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI))
+ emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
+
+ if (STI.hasFeature(ARM::FeatureStrictAlign))
+ emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
+ ARMBuildAttrs::Not_Allowed);
+ else
+ emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
+ ARMBuildAttrs::Allowed);
+
+ if (STI.hasFeature(ARM::FeatureTrustZone) &&
+ STI.hasFeature(ARM::FeatureVirtualization))
+ emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowTZVirtualization);
+ else if (STI.hasFeature(ARM::FeatureTrustZone))
+ emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ);
+ else if (STI.hasFeature(ARM::FeatureVirtualization))
+ emitAttribute(ARMBuildAttrs::Virtualization_use,
+ ARMBuildAttrs::AllowVirtualization);
+}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index fc083b98395b..d0fd366ab9ed 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -83,13 +83,12 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// ADJCALLSTACKUP -> add, sp, sp, amount
MachineInstr &Old = *I;
DebugLoc dl = Old.getDebugLoc();
- unsigned Amount = Old.getOperand(0).getImm();
+ unsigned Amount = TII.getFrameSize(Old);
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned Align = getStackAlignment();
- Amount = (Amount+Align-1)/Align*Align;
+ Amount = alignTo(Amount, getStackAlignment());
// Replace the pseudo instruction with a new instruction...
unsigned Opc = Old.getOpcode();
diff --git a/contrib/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp b/contrib/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp
index 9f2ee8cf8035..535bb012eb07 100644
--- a/contrib/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp
@@ -18,7 +18,7 @@
namespace llvm {
AVRMCAsmInfo::AVRMCAsmInfo(const Triple &TT) {
- PointerSize = 2;
+ CodePointerSize = 2;
CalleeSaveStackSlotSize = 2;
CommentString = ";";
PrivateGlobalPrefix = ".L";
diff --git a/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h b/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
index 559ac291a79e..fd7c97bf1f0a 100644
--- a/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
@@ -42,7 +42,7 @@ public:
// messed up in random places by 4 bytes. .debug_line
// section will be parsable, but with odd offsets and
// line numbers, etc.
- PointerSize = 8;
+ CodePointerSize = 8;
}
};
}
diff --git a/contrib/llvm/lib/Target/Hexagon/BitTracker.cpp b/contrib/llvm/lib/Target/Hexagon/BitTracker.cpp
index 61d3630ac095..cb3049bf1500 100644
--- a/contrib/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -1011,12 +1011,7 @@ void BT::subst(RegisterRef OldRR, RegisterRef NewRR) {
bool BT::reached(const MachineBasicBlock *B) const {
int BN = B->getNumber();
assert(BN >= 0);
- for (EdgeSetType::iterator I = EdgeExec.begin(), E = EdgeExec.end();
- I != E; ++I) {
- if (I->second == BN)
- return true;
- }
- return false;
+ return ReachedBB.count(BN);
}
// Visit an individual instruction. This could be a newly added instruction,
@@ -1036,6 +1031,8 @@ void BT::reset() {
EdgeExec.clear();
InstrExec.clear();
Map.clear();
+ ReachedBB.clear();
+ ReachedBB.reserve(MF.size());
}
void BT::run() {
@@ -1068,6 +1065,7 @@ void BT::run() {
if (EdgeExec.count(Edge))
continue;
EdgeExec.insert(Edge);
+ ReachedBB.insert(Edge.second);
const MachineBasicBlock &B = *MF.getBlockNumbered(Edge.second);
MachineBasicBlock::const_iterator It = B.begin(), End = B.end();
diff --git a/contrib/llvm/lib/Target/Hexagon/BitTracker.h b/contrib/llvm/lib/Target/Hexagon/BitTracker.h
index a547b34e852f..7f49f430382d 100644
--- a/contrib/llvm/lib/Target/Hexagon/BitTracker.h
+++ b/contrib/llvm/lib/Target/Hexagon/BitTracker.h
@@ -10,6 +10,7 @@
#ifndef LLVM_LIB_TARGET_HEXAGON_BITTRACKER_H
#define LLVM_LIB_TARGET_HEXAGON_BITTRACKER_H
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -68,10 +69,11 @@ private:
typedef std::set<const MachineInstr *> InstrSetType;
typedef std::queue<CFGEdge> EdgeQueueType;
- EdgeSetType EdgeExec; // Executable flow graph edges.
- InstrSetType InstrExec; // Executable instructions.
- EdgeQueueType FlowQ; // Work queue of CFG edges.
- bool Trace; // Enable tracing for debugging.
+ EdgeSetType EdgeExec; // Executable flow graph edges.
+ InstrSetType InstrExec; // Executable instructions.
+ EdgeQueueType FlowQ; // Work queue of CFG edges.
+ DenseSet<unsigned> ReachedBB; // Cache of reached blocks.
+ bool Trace; // Enable tracing for debugging.
const MachineEvaluator &ME;
MachineFunction &MF;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 418dd71aeb4b..e5eb059b566f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -635,7 +635,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
}
-bool HexagonTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
// If either no tail call or told not to tail call at all, don't.
auto Attr =
CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index fb8f0ba6b057..1415156487c0 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -195,7 +195,7 @@ namespace HexagonISD {
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const override;
- bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
/// If a physical register, this returns the register that receives the
/// exception address on entry to an EH pad.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp
index b243de317dc5..27b40f134b1f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp
@@ -35,7 +35,6 @@
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
-#include <map>
static cl::opt<int> CodeGrowthLimit("hexagon-amode-growth-limit",
cl::Hidden, cl::init(0), cl::desc("Code growth limit for address mode "
@@ -45,10 +44,8 @@ using namespace llvm;
using namespace rdf;
namespace llvm {
-
FunctionPass *createHexagonOptAddrMode();
- void initializeHexagonOptAddrModePass(PassRegistry &);
-
+ void initializeHexagonOptAddrModePass(PassRegistry&);
} // end namespace llvm
namespace {
@@ -59,10 +56,7 @@ public:
HexagonOptAddrMode()
: MachineFunctionPass(ID), HII(nullptr), MDT(nullptr), DFG(nullptr),
- LV(nullptr) {
- PassRegistry &R = *PassRegistry::getPassRegistry();
- initializeHexagonOptAddrModePass(R);
- }
+ LV(nullptr) {}
StringRef getPassName() const override {
return "Optimize addressing mode of load/store";
@@ -84,7 +78,6 @@ private:
MachineDominatorTree *MDT;
DataFlowGraph *DFG;
DataFlowGraph::DefStackMap DefM;
- std::map<RegisterRef, std::map<NodeId, NodeId>> RDefMap;
Liveness *LV;
MISetType Deleted;
@@ -99,8 +92,6 @@ private:
void getAllRealUses(NodeAddr<StmtNode *> SN, NodeList &UNodeList);
bool allValidCandidates(NodeAddr<StmtNode *> SA, NodeList &UNodeList);
short getBaseWithLongOffset(const MachineInstr &MI) const;
- void updateMap(NodeAddr<InstrNode *> IA);
- bool constructDefMap(MachineBasicBlock *B);
bool changeStore(MachineInstr *OldMI, MachineOperand ImmOp,
unsigned ImmOpNum);
bool changeLoad(MachineInstr *OldMI, MachineOperand ImmOp, unsigned ImmOpNum);
@@ -112,11 +103,11 @@ private:
char HexagonOptAddrMode::ID = 0;
-INITIALIZE_PASS_BEGIN(HexagonOptAddrMode, "opt-amode",
+INITIALIZE_PASS_BEGIN(HexagonOptAddrMode, "amode-opt",
"Optimize addressing mode", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
-INITIALIZE_PASS_END(HexagonOptAddrMode, "opt-amode", "Optimize addressing mode",
+INITIALIZE_PASS_END(HexagonOptAddrMode, "amode-opt", "Optimize addressing mode",
false, false)
bool HexagonOptAddrMode::hasRepForm(MachineInstr &MI, unsigned TfrDefR) {
@@ -173,8 +164,11 @@ bool HexagonOptAddrMode::canRemoveAddasl(NodeAddr<StmtNode *> AddAslSN,
for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
NodeAddr<UseNode *> UA = *I;
NodeAddr<InstrNode *> IA = UA.Addr->getOwner(*DFG);
- if ((UA.Addr->getFlags() & NodeAttrs::PhiRef) ||
- RDefMap[OffsetRR][IA.Id] != OffsetRegRD)
+ if (UA.Addr->getFlags() & NodeAttrs::PhiRef)
+ return false;
+ NodeAddr<RefNode*> AA = LV->getNearestAliasedRef(OffsetRR, IA);
+ if ((DFG->IsDef(AA) && AA.Id != OffsetRegRD) ||
+ AA.Addr->getReachingDef() != OffsetRegRD)
return false;
MachineInstr &UseMI = *NodeAddr<StmtNode *>(IA).Addr->getCode();
@@ -486,14 +480,14 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr<UseNode *> AddAslUN,
MIB.add(AddAslMI->getOperand(2));
MIB.add(AddAslMI->getOperand(3));
const GlobalValue *GV = ImmOp.getGlobal();
- MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm(),
+ MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm()+ImmOp.getOffset(),
ImmOp.getTargetFlags());
OpStart = 3;
} else if (UseMID.mayStore()) {
MIB.add(AddAslMI->getOperand(2));
MIB.add(AddAslMI->getOperand(3));
const GlobalValue *GV = ImmOp.getGlobal();
- MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm(),
+ MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm()+ImmOp.getOffset(),
ImmOp.getTargetFlags());
MIB.add(UseMI->getOperand(2));
OpStart = 3;
@@ -597,46 +591,6 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) {
return Changed;
}
-void HexagonOptAddrMode::updateMap(NodeAddr<InstrNode *> IA) {
- RegisterSet RRs;
- for (NodeAddr<RefNode *> RA : IA.Addr->members(*DFG))
- RRs.insert(RA.Addr->getRegRef(*DFG));
- bool Common = false;
- for (auto &R : RDefMap) {
- if (!RRs.count(R.first))
- continue;
- Common = true;
- break;
- }
- if (!Common)
- return;
-
- for (auto &R : RDefMap) {
- auto F = DefM.find(R.first.Reg);
- if (F == DefM.end() || F->second.empty())
- continue;
- R.second[IA.Id] = F->second.top()->Id;
- }
-}
-
-bool HexagonOptAddrMode::constructDefMap(MachineBasicBlock *B) {
- bool Changed = false;
- auto BA = DFG->getFunc().Addr->findBlock(B, *DFG);
- DFG->markBlock(BA.Id, DefM);
-
- for (NodeAddr<InstrNode *> IA : BA.Addr->members(*DFG)) {
- updateMap(IA);
- DFG->pushAllDefs(IA, DefM);
- }
-
- MachineDomTreeNode *N = MDT->getNode(B);
- for (auto I : *N)
- Changed |= constructDefMap(I->getBlock());
-
- DFG->releaseBlock(BA.Id, DefM);
- return Changed;
-}
-
bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(*MF.getFunction()))
return false;
@@ -658,8 +612,6 @@ bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
L.computePhiInfo();
LV = &L;
- constructDefMap(&DFG->getMF().front());
-
Deleted.clear();
NodeAddr<FuncNode *> FA = DFG->getFunc();
DEBUG(dbgs() << "==== [RefMap#]=====:\n "
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 06fc9195fa67..6913d50bbcaa 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -111,6 +111,7 @@ namespace llvm {
extern char &HexagonExpandCondsetsID;
void initializeHexagonExpandCondsetsPass(PassRegistry&);
void initializeHexagonLoopIdiomRecognizePass(PassRegistry&);
+ void initializeHexagonOptAddrModePass(PassRegistry&);
Pass *createHexagonLoopIdiomPass();
FunctionPass *createHexagonBitSimplify();
@@ -152,6 +153,7 @@ extern "C" void LLVMInitializeHexagonTarget() {
// Register the target.
RegisterTargetMachine<HexagonTargetMachine> X(getTheHexagonTarget());
initializeHexagonLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
+ initializeHexagonOptAddrModePass(*PassRegistry::getPassRegistry());
}
HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFCopy.cpp b/contrib/llvm/lib/Target/Hexagon/RDFCopy.cpp
index 57ce9fabc5e3..ea86ffba58f6 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFCopy.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/RDFCopy.cpp
@@ -59,7 +59,7 @@ void CopyPropagation::recordCopy(NodeAddr<StmtNode*> SA, EqualityMap &EM) {
bool CopyPropagation::scanBlock(MachineBasicBlock *B) {
bool Changed = false;
- auto BA = DFG.getFunc().Addr->findBlock(B, DFG);
+ NodeAddr<BlockNode*> BA = DFG.findBlock(B);
for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
if (DFG.IsCode<NodeAttrs::Stmt>(IA)) {
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFGraph.h b/contrib/llvm/lib/Target/Hexagon/RDFGraph.h
index d5faca4cd6f4..52f390356b26 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFGraph.h
+++ b/contrib/llvm/lib/Target/Hexagon/RDFGraph.h
@@ -508,7 +508,8 @@ namespace rdf {
static_assert(sizeof(NodeBase) <= NodeAllocator::NodeMemSize,
"NodeBase must be at most NodeAllocator::NodeMemSize bytes");
- typedef std::vector<NodeAddr<NodeBase*>> NodeList;
+// typedef std::vector<NodeAddr<NodeBase*>> NodeList;
+ typedef SmallVector<NodeAddr<NodeBase*>,4> NodeList;
typedef std::set<NodeId> NodeSet;
struct RefNode : public NodeBase {
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp
index 5c5496a548af..4224ded3418b 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp
@@ -69,6 +69,19 @@ PhysicalRegisterInfo::PhysicalRegisterInfo(const TargetRegisterInfo &tri,
for (const MachineOperand &Op : In.operands())
if (Op.isRegMask())
RegMasks.insert(Op.getRegMask());
+
+ MaskInfos.resize(RegMasks.size()+1);
+ for (uint32_t M = 1, NM = RegMasks.size(); M <= NM; ++M) {
+ BitVector PU(TRI.getNumRegUnits());
+ const uint32_t *MB = RegMasks.get(M);
+ for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i) {
+ if (!(MB[i/32] & (1u << (i%32))))
+ continue;
+ for (MCRegUnitIterator U(i, &TRI); U.isValid(); ++U)
+ PU.set(*U);
+ }
+ MaskInfos[M].Units = PU.flip();
+ }
}
RegisterRef PhysicalRegisterInfo::normalize(RegisterRef RR) const {
@@ -201,17 +214,8 @@ bool PhysicalRegisterInfo::aliasMM(RegisterRef RM, RegisterRef RN) const {
bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
- if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) {
- // XXX SLOW
- const uint32_t *MB = PRI.getRegMaskBits(RR.Reg);
- for (unsigned i = 1, e = PRI.getTRI().getNumRegs(); i != e; ++i) {
- if (MB[i/32] & (1u << (i%32)))
- continue;
- if (hasAliasOf(RegisterRef(i, LaneBitmask::getAll())))
- return true;
- }
- return false;
- }
+ if (PhysicalRegisterInfo::isRegMaskId(RR.Reg))
+ return Units.anyCommon(PRI.getMaskUnits(RR.Reg));
for (MCRegUnitMaskIterator U(RR.Reg, &PRI.getTRI()); U.isValid(); ++U) {
std::pair<uint32_t,LaneBitmask> P = *U;
@@ -224,15 +228,8 @@ bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
bool RegisterAggr::hasCoverOf(RegisterRef RR) const {
if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) {
- // XXX SLOW
- const uint32_t *MB = PRI.getRegMaskBits(RR.Reg);
- for (unsigned i = 1, e = PRI.getTRI().getNumRegs(); i != e; ++i) {
- if (MB[i/32] & (1u << (i%32)))
- continue;
- if (!hasCoverOf(RegisterRef(i, LaneBitmask::getAll())))
- return false;
- }
- return true;
+ BitVector T(PRI.getMaskUnits(RR.Reg));
+ return T.reset(Units).none();
}
for (MCRegUnitMaskIterator U(RR.Reg, &PRI.getTRI()); U.isValid(); ++U) {
@@ -246,15 +243,7 @@ bool RegisterAggr::hasCoverOf(RegisterRef RR) const {
RegisterAggr &RegisterAggr::insert(RegisterRef RR) {
if (PhysicalRegisterInfo::isRegMaskId(RR.Reg)) {
- BitVector PU(PRI.getTRI().getNumRegUnits()); // Preserved units.
- const uint32_t *MB = PRI.getRegMaskBits(RR.Reg);
- for (unsigned i = 1, e = PRI.getTRI().getNumRegs(); i != e; ++i) {
- if (!(MB[i/32] & (1u << (i%32))))
- continue;
- for (MCRegUnitIterator U(i, &PRI.getTRI()); U.isValid(); ++U)
- PU.set(*U);
- }
- Units |= PU.flip();
+ Units |= PRI.getMaskUnits(RR.Reg);
return *this;
}
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h
index 4b35c85a6b62..314d8b5666d7 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h
+++ b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h
@@ -51,6 +51,8 @@ namespace rdf {
return F - Map.begin() + 1;
}
+ uint32_t size() const { return Map.size(); }
+
typedef typename std::vector<T>::const_iterator const_iterator;
const_iterator begin() const { return Map.begin(); }
const_iterator end() const { return Map.end(); }
@@ -107,6 +109,9 @@ namespace rdf {
RegisterRef getRefForUnit(uint32_t U) const {
return RegisterRef(UnitInfos[U].Reg, UnitInfos[U].Mask);
}
+ const BitVector &getMaskUnits(RegisterId MaskId) const {
+ return MaskInfos[TargetRegisterInfo::stackSlot2Index(MaskId)].Units;
+ }
const TargetRegisterInfo &getTRI() const { return TRI; }
@@ -118,11 +123,15 @@ namespace rdf {
RegisterId Reg = 0;
LaneBitmask Mask;
};
+ struct MaskInfo {
+ BitVector Units;
+ };
const TargetRegisterInfo &TRI;
+ IndexedSet<const uint32_t*> RegMasks;
std::vector<RegInfo> RegInfos;
std::vector<UnitInfo> UnitInfos;
- IndexedSet<const uint32_t*> RegMasks;
+ std::vector<MaskInfo> MaskInfos;
bool aliasRR(RegisterRef RA, RegisterRef RB) const;
bool aliasRM(RegisterRef RR, RegisterRef RM) const;
@@ -135,7 +144,7 @@ namespace rdf {
: Units(pri.getTRI().getNumRegUnits()), PRI(pri) {}
RegisterAggr(const RegisterAggr &RG) = default;
- bool empty() const { return Units.empty(); }
+ bool empty() const { return Units.none(); }
bool hasAliasOf(RegisterRef RR) const;
bool hasCoverOf(RegisterRef RR) const;
static bool isCoverOf(RegisterRef RA, RegisterRef RB,
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
index c26b3081dbc3..82e6731ecd78 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
void MSP430MCAsmInfo::anchor() { }
MSP430MCAsmInfo::MSP430MCAsmInfo(const Triple &TT) {
- PointerSize = CalleeSaveStackSlotSize = 2;
+ CodePointerSize = CalleeSaveStackSlotSize = 2;
CommentString = ";";
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index ebe3c5784888..11411d997bb3 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -23,7 +23,7 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Triple &TheTriple) {
if ((TheTriple.getArch() == Triple::mips64el) ||
(TheTriple.getArch() == Triple::mips64)) {
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
}
// FIXME: This condition isn't quite right but it's the best we can do until
diff --git a/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td
index 8b04fcb76920..bf79f0f2ff82 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -3781,6 +3781,80 @@ let Predicates = [HasMSA] in {
ISA_MIPS1_NOT_32R6_64R6;
}
+def vsplati64_imm_eq_63 : PatLeaf<(bitconvert (v4i32 (build_vector))), [{
+ APInt Imm;
+ SDNode *BV = N->getOperand(0).getNode();
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ return selectVSplat(BV, Imm, EltTy.getSizeInBits()) &&
+ Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == 63;
+}]>;
+
+def immi32Cst7 : ImmLeaf<i32, [{return isUInt<32>(Imm) && Imm == 7;}]>;
+def immi32Cst15 : ImmLeaf<i32, [{return isUInt<32>(Imm) && Imm == 15;}]>;
+def immi32Cst31 : ImmLeaf<i32, [{return isUInt<32>(Imm) && Imm == 31;}]>;
+
+def vsplati8imm7 : PatFrag<(ops node:$wt),
+ (and node:$wt, (vsplati8 immi32Cst7))>;
+def vsplati16imm15 : PatFrag<(ops node:$wt),
+ (and node:$wt, (vsplati16 immi32Cst15))>;
+def vsplati32imm31 : PatFrag<(ops node:$wt),
+ (and node:$wt, (vsplati32 immi32Cst31))>;
+def vsplati64imm63 : PatFrag<(ops node:$wt),
+ (and node:$wt, vsplati64_imm_eq_63)>;
+
+class MSAShiftPat<SDNode Node, ValueType VT, MSAInst Insn, dag Vec> :
+ MSAPat<(VT (Node VT:$ws, (VT (and VT:$wt, Vec)))),
+ (VT (Insn VT:$ws, VT:$wt))>;
+
+class MSABitPat<SDNode Node, ValueType VT, MSAInst Insn, PatFrag Frag> :
+ MSAPat<(VT (Node VT:$ws, (shl vsplat_imm_eq_1, (Frag VT:$wt)))),
+ (VT (Insn VT:$ws, VT:$wt))>;
+
+multiclass MSAShiftPats<SDNode Node, string Insn> {
+ def : MSAShiftPat<Node, v16i8, !cast<MSAInst>(Insn#_B),
+ (vsplati8 immi32Cst7)>;
+ def : MSAShiftPat<Node, v8i16, !cast<MSAInst>(Insn#_H),
+ (vsplati16 immi32Cst15)>;
+ def : MSAShiftPat<Node, v4i32, !cast<MSAInst>(Insn#_W),
+ (vsplati32 immi32Cst31)>;
+ def : MSAPat<(v2i64 (Node v2i64:$ws, (v2i64 (and v2i64:$wt,
+ vsplati64_imm_eq_63)))),
+ (v2i64 (!cast<MSAInst>(Insn#_D) v2i64:$ws, v2i64:$wt))>;
+}
+
+multiclass MSABitPats<SDNode Node, string Insn> {
+ def : MSABitPat<Node, v16i8, !cast<MSAInst>(Insn#_B), vsplati8imm7>;
+ def : MSABitPat<Node, v8i16, !cast<MSAInst>(Insn#_H), vsplati16imm15>;
+ def : MSABitPat<Node, v4i32, !cast<MSAInst>(Insn#_W), vsplati32imm31>;
+ def : MSAPat<(Node v2i64:$ws, (shl (v2i64 vsplati64_imm_eq_1),
+ (vsplati64imm63 v2i64:$wt))),
+ (v2i64 (!cast<MSAInst>(Insn#_D) v2i64:$ws, v2i64:$wt))>;
+}
+
+defm : MSAShiftPats<shl, "SLL">;
+defm : MSAShiftPats<srl, "SRL">;
+defm : MSAShiftPats<sra, "SRA">;
+defm : MSABitPats<xor, "BNEG">;
+defm : MSABitPats<or, "BSET">;
+
+def : MSAPat<(and v16i8:$ws, (xor (shl vsplat_imm_eq_1,
+ (vsplati8imm7 v16i8:$wt)),
+ immAllOnesV)),
+ (v16i8 (BCLR_B v16i8:$ws, v16i8:$wt))>;
+def : MSAPat<(and v8i16:$ws, (xor (shl vsplat_imm_eq_1,
+ (vsplati16imm15 v8i16:$wt)),
+ immAllOnesV)),
+ (v8i16 (BCLR_H v8i16:$ws, v8i16:$wt))>;
+def : MSAPat<(and v4i32:$ws, (xor (shl vsplat_imm_eq_1,
+ (vsplati32imm31 v4i32:$wt)),
+ immAllOnesV)),
+ (v4i32 (BCLR_W v4i32:$ws, v4i32:$wt))>;
+def : MSAPat<(and v2i64:$ws, (xor (shl (v2i64 vsplati64_imm_eq_1),
+ (vsplati64imm63 v2i64:$wt)),
+ (bitconvert (v4i32 immAllOnesV)))),
+ (v2i64 (BCLR_D v2i64:$ws, v2i64:$wt))>;
+
// Vector extraction with fixed index.
//
// Extracting 32-bit values on MSA32 should always use COPY_S_W rather than
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
index e2da8477295b..bf7f079e3105 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -1547,11 +1547,24 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm);
}
+static SDValue truncateVecElts(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ EVT ResTy = Op->getValueType(0);
+ SDValue Vec = Op->getOperand(2);
+ bool BigEndian = !DAG.getSubtarget().getTargetTriple().isLittleEndian();
+ MVT ResEltTy = ResTy == MVT::v2i64 ? MVT::i64 : MVT::i32;
+ SDValue ConstValue = DAG.getConstant(Vec.getScalarValueSizeInBits() - 1,
+ DL, ResEltTy);
+ SDValue SplatVec = getBuildVectorSplat(ResTy, ConstValue, BigEndian, DAG);
+
+ return DAG.getNode(ISD::AND, DL, ResTy, Vec, SplatVec);
+}
+
static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) {
EVT ResTy = Op->getValueType(0);
SDLoc DL(Op);
SDValue One = DAG.getConstant(1, DL, ResTy);
- SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2));
+ SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, truncateVecElts(Op, DAG));
return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1),
DAG.getNOT(DL, Bit, ResTy));
@@ -1687,7 +1700,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, DL, VecTy, One,
- Op->getOperand(2)));
+ truncateVecElts(Op, DAG)));
}
case Intrinsic::mips_bnegi_b:
case Intrinsic::mips_bnegi_h:
@@ -1723,7 +1736,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, DL, VecTy, One,
- Op->getOperand(2)));
+ truncateVecElts(Op, DAG)));
}
case Intrinsic::mips_bseti_b:
case Intrinsic::mips_bseti_h:
@@ -2210,7 +2223,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_sll_w:
case Intrinsic::mips_sll_d:
return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1),
- Op->getOperand(2));
+ truncateVecElts(Op, DAG));
case Intrinsic::mips_slli_b:
case Intrinsic::mips_slli_h:
case Intrinsic::mips_slli_w:
@@ -2240,7 +2253,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_sra_w:
case Intrinsic::mips_sra_d:
return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1),
- Op->getOperand(2));
+ truncateVecElts(Op, DAG));
case Intrinsic::mips_srai_b:
case Intrinsic::mips_srai_h:
case Intrinsic::mips_srai_w:
@@ -2270,7 +2283,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_srl_w:
case Intrinsic::mips_srl_d:
return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1),
- Op->getOperand(2));
+ truncateVecElts(Op, DAG));
case Intrinsic::mips_srli_b:
case Intrinsic::mips_srli_h:
case Intrinsic::mips_srli_w:
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
index 78bdf4e698d8..bdd0f156c8af 100644
--- a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -27,7 +27,7 @@ void NVPTXMCAsmInfo::anchor() {}
NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Triple &TheTriple) {
if (TheTriple.getArch() == Triple::nvptx64) {
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
}
CommentString = "//";
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 21e25de80dc7..ba28cd83278b 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -2004,7 +2004,7 @@ void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
for (unsigned I = 0, E = DL.getTypeAllocSize(CPV->getType()); I < E; ++I) {
uint8_t Byte = Val.getLoBits(8).getZExtValue();
aggBuffer->addBytes(&Byte, 1, 1);
- Val = Val.lshr(8);
+ Val.lshrInPlace(8);
}
return;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index d8fab5b7c01a..d30bf1a56e8a 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -20,7 +20,7 @@ void PPCMCAsmInfoDarwin::anchor() { }
PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit, const Triple& T) {
if (is64Bit) {
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
}
IsLittleEndian = false;
@@ -50,7 +50,7 @@ PPCELFMCAsmInfo::PPCELFMCAsmInfo(bool is64Bit, const Triple& T) {
NeedsLocalForSize = true;
if (is64Bit) {
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
}
IsLittleEndian = T.getArch() == Triple::ppc64le;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 9c72638023bb..125c00295f88 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -2977,10 +2977,10 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) {
SDValue Chain = LD->getChain();
SDValue Ops[] = { Base, Offset, Chain };
- SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX,
- N->getValueType(0), Ops);
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = LD->getMemOperand();
+ SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX,
+ N->getValueType(0), Ops);
cast<MachineSDNode>(NewN)->setMemRefs(MemOp, MemOp + 1);
return;
}
diff --git a/contrib/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp b/contrib/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp
index b164df8b595a..d622911e92c4 100644
--- a/contrib/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp
@@ -18,7 +18,7 @@ using namespace llvm;
void RISCVMCAsmInfo::anchor() {}
RISCVMCAsmInfo::RISCVMCAsmInfo(const Triple &TT) {
- PointerSize = CalleeSaveStackSlotSize = TT.isArch64Bit() ? 8 : 4;
+ CodePointerSize = CalleeSaveStackSlotSize = TT.isArch64Bit() ? 8 : 4;
CommentString = "#";
AlignmentIsInBytes = false;
SupportsDebugInformation = true;
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index 3ed09898fb78..21df60237d96 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -28,7 +28,7 @@ SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Triple &TheTriple) {
IsLittleEndian = (TheTriple.getArch() == Triple::sparcel);
if (isV9) {
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
}
Data16bitsDirective = "\t.half\t";
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index b17977d41be1..6e00981939b6 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -14,7 +14,7 @@
using namespace llvm;
SystemZMCAsmInfo::SystemZMCAsmInfo(const Triple &TT) {
- PointerSize = 8;
+ CodePointerSize = 8;
CalleeSaveStackSlotSize = 8;
IsLittleEndian = false;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 84d3c7bed50a..f2fd581f7847 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -829,7 +829,7 @@ bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
return isTruncateFree(FromType, ToType);
}
-bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
return CI->isTailCall();
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 7d92a7355877..1c34dc43e8bb 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -454,7 +454,7 @@ public:
MachineBasicBlock *BB) const override;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
bool allowTruncateForTailCall(Type *, Type *) const override;
- bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
index 2dcec5263fa1..5f8c78ed1683 100644
--- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
@@ -22,7 +22,7 @@ using namespace llvm;
WebAssemblyMCAsmInfoELF::~WebAssemblyMCAsmInfoELF() {}
WebAssemblyMCAsmInfoELF::WebAssemblyMCAsmInfoELF(const Triple &T) {
- PointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
+ CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
// TODO: What should MaxInstLength be?
@@ -55,7 +55,7 @@ WebAssemblyMCAsmInfoELF::WebAssemblyMCAsmInfoELF(const Triple &T) {
WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() {}
WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) {
- PointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
+ CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
// TODO: What should MaxInstLength be?
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
index a0b008947491..544cd653fd72 100644
--- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -94,6 +94,8 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
MCFixupKind(WebAssembly::fixup_code_global_index), MI.getLoc()));
++MCNumFixups;
encodeULEB128(uint64_t(MO.getImm()), OS);
+ } else if (Info.OperandType == WebAssembly::OPERAND_SIGNATURE) {
+ encodeSLEB128(int64_t(MO.getImm()), OS);
} else {
encodeULEB128(uint64_t(MO.getImm()), OS);
}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
index f4c9a4ef6b9c..559165e4c86b 100644
--- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
@@ -54,7 +54,7 @@ FunctionPass *llvm::createWebAssemblyOptimizeReturned() {
void OptimizeReturned::visitCallSite(CallSite CS) {
for (unsigned i = 0, e = CS.getNumArgOperands(); i < e; ++i)
- if (CS.paramHasAttr(0, Attribute::Returned)) {
+ if (CS.paramHasAttr(i, Attribute::Returned)) {
Instruction *Inst = CS.getInstruction();
Value *Arg = CS.getArgOperand(i);
// Ignore constants, globals, undef, etc.
diff --git a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt
index 8dd5e8a03e2e..8e8e5fd1eff1 100644
--- a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt
+++ b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt
@@ -1,5 +1,15 @@
# Tests which are known to fail from the GCC torture test suite.
+# Syntax: Each line has a single test to be marked as a 'known failure' (or
+# 'exclusion'. Known failures are expected to fail, and will cause an error if
+# they pass. (Known failures that do not run at all will not cause an
+# error). The format is
+# <name> <attributes> # comment
+#
+# The attributes in this case represent the different arguments used to
+# compiler: 'wasm-s' is for compiling to .s files, and 'wasm-o' for compiling
+# to wasm object files (.o).
+
# Computed gotos are not supported (Cannot select BlockAddress/BRIND)
20040302-1.c
20071210-1.c
@@ -66,3 +76,21 @@ pr41935.c
920728-1.c
pr28865.c
widechar-2.c
+
+# crash: Running pass 'WebAssembly Explicit Locals' on function
+20020107-1.c wasm-o
+20030222-1.c wasm-o
+20071220-1.c wasm-o
+20071220-2.c wasm-o
+990130-1.c wasm-o
+pr38533.c wasm-o
+pr41239.c wasm-o
+pr43385.c wasm-o
+pr43560.c wasm-o
+pr45695.c wasm-o
+pr49279.c wasm-o
+pr49390.c wasm-o
+pr52286.c wasm-o
+
+# fatal error: error in backend: data symbols must have a size set with .size
+921110-1.c wasm-o
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index 48a1d8f1330c..9c35a251e480 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -43,7 +43,7 @@ void X86MCAsmInfoDarwin::anchor() { }
X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
bool is64Bit = T.getArch() == Triple::x86_64;
if (is64Bit)
- PointerSize = CalleeSaveStackSlotSize = 8;
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
AssemblerDialect = AsmWriterFlavor;
@@ -92,7 +92,7 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
// For ELF, x86-64 pointer size depends on the ABI.
// For x86-64 without the x32 ABI, pointer size is 8. For x86 and for x86-64
// with the x32 ABI, pointer size remains the default 4.
- PointerSize = (is64Bit && !isX32) ? 8 : 4;
+ CodePointerSize = (is64Bit && !isX32) ? 8 : 4;
// OTOH, stack slot size is always 8 for x86-64, even with the x32 ABI.
CalleeSaveStackSlotSize = is64Bit ? 8 : 4;
@@ -129,7 +129,7 @@ X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
if (Triple.getArch() == Triple::x86_64) {
PrivateGlobalPrefix = ".L";
PrivateLabelPrefix = ".L";
- PointerSize = 8;
+ CodePointerSize = 8;
WinEHEncodingType = WinEH::EncodingType::Itanium;
} else {
// 32-bit X86 doesn't use CFI, so this isn't a real encoding type. It's just
@@ -156,7 +156,7 @@ X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
if (Triple.getArch() == Triple::x86_64) {
PrivateGlobalPrefix = ".L";
PrivateLabelPrefix = ".L";
- PointerSize = 8;
+ CodePointerSize = 8;
WinEHEncodingType = WinEH::EncodingType::Itanium;
ExceptionsType = ExceptionHandling::WinEH;
} else {
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index 78e0bca4158e..8678a13b95d0 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1698,21 +1698,18 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
}
-// NOTE: this only has a subset of the full frame index logic. In
-// particular, the FI < 0 and AfterFPPop logic is handled in
-// X86RegisterInfo::eliminateFrameIndex, but not here. Possibly
-// (probably?) it should be moved into here.
int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
+ bool IsFixed = MFI.isFixedObjectIndex(FI);
// We can't calculate offset from frame pointer if the stack is realigned,
// so enforce usage of stack/base pointer. The base pointer is used when we
// have dynamic allocas in addition to dynamic realignment.
if (TRI->hasBasePointer(MF))
- FrameReg = TRI->getBaseRegister();
+ FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
else if (TRI->needsStackRealignment(MF))
- FrameReg = TRI->getStackRegister();
+ FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
else
FrameReg = TRI->getFrameRegister(MF);
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index eb5c56ff2ff9..2d788bf0cf99 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1311,8 +1311,9 @@ bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
++Cost;
// If the base is a register with multiple uses, this
// transformation may save a mov.
- if ((AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base_Reg.getNode() &&
+ // FIXME: Don't rely on DELETED_NODEs.
+ if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
+ AM.Base_Reg->getOpcode() != ISD::DELETED_NODE &&
!AM.Base_Reg.getNode()->hasOneUse()) ||
AM.BaseType == X86ISelAddressMode::FrameIndexBase)
--Cost;
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6bf3672c3c08..b5f29fb400ef 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2742,13 +2742,13 @@ static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
}
-bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
auto Attr =
CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
- CallSite CS(CI);
+ ImmutableCallSite CS(CI);
CallingConv::ID CalleeCC = CS.getCallingConv();
if (!mayTailCallThisCC(CalleeCC))
return false;
@@ -8327,13 +8327,13 @@ static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
Zeroable.setBit(i);
else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
APInt Val = Cst->getAPIntValue();
- Val = Val.lshr((M % Scale) * ScalarSizeInBits);
+ Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
Val = Val.getLoBits(ScalarSizeInBits);
if (Val == 0)
Zeroable.setBit(i);
} else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
APInt Val = Cst->getValueAPF().bitcastToAPInt();
- Val = Val.lshr((M % Scale) * ScalarSizeInBits);
+ Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
Val = Val.getLoBits(ScalarSizeInBits);
if (Val == 0)
Zeroable.setBit(i);
@@ -16069,7 +16069,7 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
unsigned EltBits = EltVT.getSizeInBits();
// For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
APInt MaskElt =
- IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
+ IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignMask(EltBits);
const fltSemantics &Sem =
EltVT == MVT::f64 ? APFloat::IEEEdouble() :
(IsF128 ? APFloat::IEEEquad() : APFloat::IEEEsingle());
@@ -16132,9 +16132,9 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
// The mask constants are automatically splatted for vector types.
unsigned EltSizeInBits = VT.getScalarSizeInBits();
SDValue SignMask = DAG.getConstantFP(
- APFloat(Sem, APInt::getSignBit(EltSizeInBits)), dl, LogicVT);
+ APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
SDValue MagMask = DAG.getConstantFP(
- APFloat(Sem, ~APInt::getSignBit(EltSizeInBits)), dl, LogicVT);
+ APFloat(Sem, ~APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
// First, clear all bits but the sign bit from the second operand (sign).
if (IsFakeVector)
@@ -17344,10 +17344,10 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
// bits of the inputs before performing those operations.
if (FlipSigns) {
MVT EltVT = VT.getVectorElementType();
- SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), dl,
+ SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
VT);
- Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
- Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
+ Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
+ Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
}
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
@@ -22111,11 +22111,11 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
}
// i64 vector arithmetic shift can be emulated with the transform:
- // M = lshr(SIGN_BIT, Amt)
+ // M = lshr(SIGN_MASK, Amt)
// ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
Op.getOpcode() == ISD::SRA) {
- SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT);
+ SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
R = DAG.getNode(ISD::XOR, dl, VT, R, M);
@@ -22647,7 +22647,7 @@ bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
// FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
- auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
+ auto PTy = cast<PointerType>(LI->getPointerOperandType());
return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
@@ -26722,8 +26722,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
// Low bits are known zero.
KnownZero.setLowBits(ShAmt);
} else {
- KnownZero = KnownZero.lshr(ShAmt);
- KnownOne = KnownOne.lshr(ShAmt);
+ KnownZero.lshrInPlace(ShAmt);
+ KnownOne.lshrInPlace(ShAmt);
// High bits are known zero.
KnownZero.setHighBits(ShAmt);
}
@@ -30152,7 +30152,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
// x s< 0 ? x^C : 0 --> subus x, C
if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
- OpRHSConst->getAPIntValue().isSignBit())
+ OpRHSConst->getAPIntValue().isSignMask())
// Note that we have to rebuild the RHS constant here to ensure we
// don't rely on particular values of undef lanes.
return DAG.getNode(
@@ -30203,7 +30203,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
return SDValue();
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
- APInt DemandedMask(APInt::getSignBit(BitWidth));
+ APInt DemandedMask(APInt::getSignMask(BitWidth));
APInt KnownZero, KnownOne;
TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
DCI.isBeforeLegalizeOps());
@@ -31269,7 +31269,7 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
else if (X86ISD::VSRAI == Opcode)
Elt = Elt.ashr(ShiftImm);
else
- Elt = Elt.lshr(ShiftImm);
+ Elt.lshrInPlace(ShiftImm);
}
return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
}
@@ -32234,8 +32234,8 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV || !BV->isConstant())
return false;
- for (unsigned i = 0, e = V.getNumOperands(); i < e; i++) {
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(i));
+ for (SDValue Op : V->ops()) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
if (!C)
return false;
uint64_t Val = C->getZExtValue();
@@ -33428,8 +33428,8 @@ static SDValue isFNEG(SDNode *N) {
SDValue Op0 = peekThroughBitcasts(Op.getOperand(0));
unsigned EltBits = Op1.getScalarValueSizeInBits();
- auto isSignBitValue = [&](const ConstantFP *C) {
- return C->getValueAPF().bitcastToAPInt() == APInt::getSignBit(EltBits);
+ auto isSignMask = [&](const ConstantFP *C) {
+ return C->getValueAPF().bitcastToAPInt() == APInt::getSignMask(EltBits);
};
// There is more than one way to represent the same constant on
@@ -33440,21 +33440,21 @@ static SDValue isFNEG(SDNode *N) {
// We check all variants here.
if (Op1.getOpcode() == X86ISD::VBROADCAST) {
if (auto *C = getTargetConstantFromNode(Op1.getOperand(0)))
- if (isSignBitValue(cast<ConstantFP>(C)))
+ if (isSignMask(cast<ConstantFP>(C)))
return Op0;
} else if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1)) {
if (ConstantFPSDNode *CN = BV->getConstantFPSplatNode())
- if (isSignBitValue(CN->getConstantFPValue()))
+ if (isSignMask(CN->getConstantFPValue()))
return Op0;
} else if (auto *C = getTargetConstantFromNode(Op1)) {
if (C->getType()->isVectorTy()) {
if (auto *SplatV = C->getSplatValue())
- if (isSignBitValue(cast<ConstantFP>(SplatV)))
+ if (isSignMask(cast<ConstantFP>(SplatV)))
return Op0;
} else if (auto *FPConst = dyn_cast<ConstantFP>(C))
- if (isSignBitValue(FPConst))
+ if (isSignMask(FPConst))
return Op0;
}
return SDValue();
@@ -34631,7 +34631,7 @@ static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
return SDValue();
ShrinkMode Mode;
- if (!canReduceVMulWidth(MulOp.getNode(), DAG, Mode))
+ if (!canReduceVMulWidth(MulOp.getNode(), DAG, Mode) || Mode == MULU16)
return SDValue();
EVT VT = N->getValueType(0);
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index ab4910daca02..190a88335000 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1207,7 +1207,7 @@ namespace llvm {
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
ISD::NodeType ExtendKind) const override;
diff --git a/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp b/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 6cc5e8b63597..fb9315792892 100644
--- a/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -67,6 +67,8 @@ private:
MachineFunction &MF) const;
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
+ bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
const X86Subtarget &STI;
const X86InstrInfo &TII;
@@ -99,6 +101,10 @@ X86InstructionSelector::X86InstructionSelector(const X86Subtarget &STI,
static const TargetRegisterClass *
getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) {
if (RB.getID() == X86::GPRRegBankID) {
+ if (Ty.getSizeInBits() <= 8)
+ return &X86::GR8RegClass;
+ if (Ty.getSizeInBits() == 16)
+ return &X86::GR16RegClass;
if (Ty.getSizeInBits() == 32)
return &X86::GR32RegClass;
if (Ty.getSizeInBits() == 64)
@@ -207,6 +213,8 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
return true;
if (selectConstant(I, MRI, MF))
return true;
+ if (selectTrunc(I, MRI, MF))
+ return true;
return selectImpl(I);
}
@@ -509,6 +517,59 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+bool X86InstructionSelector::selectTrunc(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ if (I.getOpcode() != TargetOpcode::G_TRUNC)
+ return false;
+
+ const unsigned DstReg = I.getOperand(0).getReg();
+ const unsigned SrcReg = I.getOperand(1).getReg();
+
+ const LLT DstTy = MRI.getType(DstReg);
+ const LLT SrcTy = MRI.getType(SrcReg);
+
+ const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
+ const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
+
+ if (DstRB.getID() != SrcRB.getID()) {
+ DEBUG(dbgs() << "G_TRUNC input/output on different banks\n");
+ return false;
+ }
+
+ if (DstRB.getID() != X86::GPRRegBankID)
+ return false;
+
+ const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB);
+ if (!DstRC)
+ return false;
+
+ const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(SrcTy, SrcRB);
+ if (!SrcRC)
+ return false;
+
+ if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
+ !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
+ DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
+ return false;
+ }
+
+ if (DstRC == SrcRC) {
+ // Nothing to be done
+ } else if (DstRC == &X86::GR32RegClass) {
+ I.getOperand(1).setSubReg(X86::sub_32bit);
+ } else if (DstRC == &X86::GR16RegClass) {
+ I.getOperand(1).setSubReg(X86::sub_16bit);
+ } else if (DstRC == &X86::GR8RegClass) {
+ I.getOperand(1).setSubReg(X86::sub_8bit);
+ } else {
+ return false;
+ }
+
+ I.setDesc(TII.get(X86::COPY));
+ return true;
+}
+
InstructionSelector *
llvm::createX86InstructionSelector(X86Subtarget &Subtarget,
X86RegisterBankInfo &RBI) {
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp
index d395c826e6bf..0f8a750a0235 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp
@@ -68,6 +68,7 @@ X86GenRegisterBankInfo::PartialMappingIdx
X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) {
if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
switch (Ty.getSizeInBits()) {
+ case 1:
case 8:
return PMI_GPR8;
case 16:
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index 58fa31e94fba..25958f0c3106 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -133,6 +133,11 @@ public:
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
unsigned getStackRegister() const { return StackPtr; }
unsigned getBaseRegister() const { return BasePtr; }
+ /// Returns physical register used as frame pointer.
+ /// This will always returns the frame pointer register, contrary to
+ /// getFrameRegister() which returns the "base pointer" in situations
+ /// involving a stack, frame and base pointer.
+ unsigned getFramePtr() const { return FramePtr; }
// FIXME: Move to FrameInfok
unsigned getSlotSize() const { return SlotSize; }
};
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 375b74c494d9..8e26849ea9e3 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -167,15 +167,12 @@ bool DeadArgumentEliminationPass::DeleteDeadVarargs(Function &Fn) {
// Drop any attributes that were on the vararg arguments.
AttributeList PAL = CS.getAttributes();
- if (!PAL.isEmpty() && PAL.getSlotIndex(PAL.getNumSlots() - 1) > NumArgs) {
- SmallVector<AttributeList, 8> AttributesVec;
- for (unsigned i = 0; PAL.getSlotIndex(i) <= NumArgs; ++i)
- AttributesVec.push_back(PAL.getSlotAttributes(i));
- if (PAL.hasAttributes(AttributeList::FunctionIndex))
- AttributesVec.push_back(AttributeList::get(Fn.getContext(),
- AttributeList::FunctionIndex,
- PAL.getFnAttributes()));
- PAL = AttributeList::get(Fn.getContext(), AttributesVec);
+ if (!PAL.isEmpty()) {
+ SmallVector<AttributeSet, 8> ArgAttrs;
+ for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo)
+ ArgAttrs.push_back(PAL.getParamAttributes(ArgNo));
+ PAL = AttributeList::get(Fn.getContext(), PAL.getFnAttributes(),
+ PAL.getRetAttributes(), ArgAttrs);
}
SmallVector<OperandBundleDef, 1> OpBundles;
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 4d13b3f40688..9648883b7f27 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -222,15 +222,11 @@ static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter) {
MadeChange = true;
// Clear out any existing attributes.
- AttrBuilder B;
- B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone);
- F->removeAttributes(
- AttributeList::FunctionIndex,
- AttributeList::get(F->getContext(), AttributeList::FunctionIndex, B));
+ F->removeFnAttr(Attribute::ReadOnly);
+ F->removeFnAttr(Attribute::ReadNone);
// Add in the new attribute.
- F->addAttribute(AttributeList::FunctionIndex,
- ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone);
+ F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone);
if (ReadsMemory)
++NumReadOnly;
@@ -495,9 +491,6 @@ determinePointerReadAttrs(Argument *A,
static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) {
bool Changed = false;
- AttrBuilder B;
- B.addAttribute(Attribute::Returned);
-
// Check each function in turn, determining if an argument is always returned.
for (Function *F : SCCNodes) {
// We can infer and propagate function attributes only when we know that the
@@ -535,7 +528,7 @@ static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) {
if (Value *RetArg = FindRetArg()) {
auto *A = cast<Argument>(RetArg);
- A->addAttr(AttributeList::get(F->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(Attribute::Returned);
++NumReturned;
Changed = true;
}
@@ -593,9 +586,6 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
ArgumentGraph AG;
- AttrBuilder B;
- B.addAttribute(Attribute::NoCapture);
-
// Check each function in turn, determining which pointer arguments are not
// captured.
for (Function *F : SCCNodes) {
@@ -614,7 +604,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
++A) {
if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
- A->addAttr(AttributeList::get(F->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed = true;
}
@@ -633,8 +623,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
if (!Tracker.Captured) {
if (Tracker.Uses.empty()) {
// If it's trivially not captured, mark it nocapture now.
- A->addAttr(
- AttributeList::get(F->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed = true;
} else {
@@ -660,9 +649,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
Self.insert(&*A);
Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self);
if (R != Attribute::None) {
- AttrBuilder B;
- B.addAttribute(R);
- A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(R);
Changed = true;
R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg;
}
@@ -687,7 +674,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
if (ArgumentSCC[0]->Uses.size() == 1 &&
ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
Argument *A = ArgumentSCC[0]->Definition;
- A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed = true;
}
@@ -729,7 +716,7 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
Argument *A = ArgumentSCC[i]->Definition;
- A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B));
+ A->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed = true;
}
@@ -760,15 +747,12 @@ static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
}
if (ReadAttr != Attribute::None) {
- AttrBuilder B, R;
- B.addAttribute(ReadAttr);
- R.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone);
for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
Argument *A = ArgumentSCC[i]->Definition;
// Clear out existing readonly/readnone attributes
- A->removeAttr(
- AttributeList::get(A->getContext(), A->getArgNo() + 1, R));
- A->addAttr(AttributeList::get(A->getContext(), A->getArgNo() + 1, B));
+ A->removeAttr(Attribute::ReadOnly);
+ A->removeAttr(Attribute::ReadNone);
+ A->addAttr(ReadAttr);
ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg;
Changed = true;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index ade4f21ceb52..ae9d4ce11e0d 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1979,16 +1979,11 @@ static void ChangeCalleesToFastCall(Function *F) {
}
}
-static AttributeList StripNest(LLVMContext &C, const AttributeList &Attrs) {
- for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
- unsigned Index = Attrs.getSlotIndex(i);
- if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest))
- continue;
-
- // There can be only one.
- return Attrs.removeAttribute(C, Index, Attribute::Nest);
- }
-
+static AttributeList StripNest(LLVMContext &C, AttributeList Attrs) {
+ // There can be at most one attribute set with a nest attribute.
+ unsigned NestIndex;
+ if (Attrs.hasAttrSomewhere(Attribute::Nest, &NestIndex))
+ return Attrs.removeAttribute(C, NestIndex, Attribute::Nest);
return Attrs;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/SampleProfile.cpp b/contrib/llvm/lib/Transforms/IPO/SampleProfile.cpp
index 3371de6e3d14..e755e2bd8f26 100644
--- a/contrib/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -43,6 +43,7 @@
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Pass.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/SampleProfReader.h"
@@ -208,6 +209,12 @@ protected:
/// the same number of times.
EquivalenceClassMap EquivalenceClass;
+ /// Map from function name to Function *. Used to find the function from
+ /// the function name. If the function name contains suffix, additional
+ /// entry is added to map from the stripped name to the function if there
+ /// is one-to-one mapping.
+ StringMap<Function *> SymbolMap;
+
/// \brief Dominance, post-dominance and loop information.
std::unique_ptr<DominatorTree> DT;
std::unique_ptr<DominatorTreeBase<BasicBlock>> PDT;
@@ -670,7 +677,7 @@ bool SampleProfileLoader::inlineHotFunctions(
for (auto &I : BB.getInstList()) {
const FunctionSamples *FS = nullptr;
if ((isa<CallInst>(I) || isa<InvokeInst>(I)) &&
- (FS = findCalleeFunctionSamples(I))) {
+ !isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(I))) {
Candidates.push_back(&I);
if (callsiteIsHot(Samples, FS))
Hot = true;
@@ -689,7 +696,10 @@ bool SampleProfileLoader::inlineHotFunctions(
for (const auto *FS : findIndirectCallFunctionSamples(*I)) {
auto CalleeFunctionName = FS->getName();
const char *Reason = "Callee function not available";
- CalledFunction = F.getParent()->getFunction(CalleeFunctionName);
+ auto R = SymbolMap.find(CalleeFunctionName);
+ if (R == SymbolMap.end())
+ continue;
+ CalledFunction = R->getValue();
if (CalledFunction && isLegalToPromote(I, CalledFunction, &Reason)) {
// The indirect target was promoted and inlined in the profile, as a
// result, we do not have profile info for the branch probability.
@@ -1181,8 +1191,11 @@ void SampleProfileLoader::propagateWeights(Function &F) {
if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
continue;
+ DebugLoc BranchLoc = TI->getDebugLoc();
DEBUG(dbgs() << "\nGetting weights for branch at line "
- << TI->getDebugLoc().getLine() << ".\n");
+ << ((BranchLoc) ? Twine(BranchLoc.getLine())
+ : Twine("<UNKNOWN LOCATION>"))
+ << ".\n");
SmallVector<uint32_t, 4> Weights;
uint32_t MaxWeight = 0;
DebugLoc MaxDestLoc;
@@ -1219,7 +1232,6 @@ void SampleProfileLoader::propagateWeights(Function &F) {
DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
TI->setMetadata(llvm::LLVMContext::MD_prof,
MDB.createBranchWeights(Weights));
- DebugLoc BranchLoc = TI->getDebugLoc();
emitOptimizationRemark(
Ctx, DEBUG_TYPE, F, MaxDestLoc,
Twine("most popular destination for conditional branches at ") +
@@ -1414,6 +1426,26 @@ bool SampleProfileLoader::runOnModule(Module &M) {
for (const auto &I : Reader->getProfiles())
TotalCollectedSamples += I.second.getTotalSamples();
+ // Populate the symbol map.
+ for (const auto &N_F : M.getValueSymbolTable()) {
+ std::string OrigName = N_F.getKey();
+ Function *F = dyn_cast<Function>(N_F.getValue());
+ if (F == nullptr)
+ continue;
+ SymbolMap[OrigName] = F;
+ auto pos = OrigName.find('.');
+ if (pos != std::string::npos) {
+ std::string NewName = OrigName.substr(0, pos);
+ auto r = SymbolMap.insert(std::make_pair(NewName, F));
+ // Failiing to insert means there is already an entry in SymbolMap,
+ // thus there are multiple functions that are mapped to the same
+ // stripped name. In this case of name conflicting, set the value
+ // to nullptr to avoid confusion.
+ if (!r.second)
+ r.first->second = nullptr;
+ }
+ }
+
bool retval = false;
for (auto &F : M)
if (!F.isDeclaration()) {
diff --git a/contrib/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/contrib/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index 65deb82cd2a5..9801a0a61416 100644
--- a/contrib/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -363,6 +363,7 @@ void splitAndWriteThinLTOBitcode(
W.writeModule(&M, /*ShouldPreserveUseListOrder=*/false, &Index,
/*GenerateHash=*/true, &ModHash);
W.writeModule(MergedM.get());
+ W.writeStrtab();
OS << Buffer;
// If a minimized bitcode module was requested for the thin link,
@@ -375,6 +376,7 @@ void splitAndWriteThinLTOBitcode(
W2.writeModule(&M, /*ShouldPreserveUseListOrder=*/false, &Index,
/*GenerateHash=*/false, &ModHash);
W2.writeModule(MergedM.get());
+ W2.writeStrtab();
*ThinLinkOS << Buffer;
}
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 174ec8036274..e30a4bafb9b0 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1044,14 +1044,14 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
const APInt *RHSC;
if (match(RHS, m_APInt(RHSC))) {
- if (RHSC->isSignBit()) {
+ if (RHSC->isSignMask()) {
// If wrapping is not allowed, then the addition must set the sign bit:
- // X + (signbit) --> X | signbit
+ // X + (signmask) --> X | signmask
if (I.hasNoSignedWrap() || I.hasNoUnsignedWrap())
return BinaryOperator::CreateOr(LHS, RHS);
// If wrapping is allowed, then the addition flips the sign bit of LHS:
- // X + (signbit) --> X ^ signbit
+ // X + (signmask) --> X ^ signmask
return BinaryOperator::CreateXor(LHS, RHS);
}
@@ -1120,9 +1120,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
XorLHS);
}
- // (X + signbit) + C could have gotten canonicalized to (X ^ signbit) + C,
- // transform them into (X + (signbit ^ C))
- if (XorRHS->getValue().isSignBit())
+ // (X + signmask) + C could have gotten canonicalized to (X^signmask) + C,
+ // transform them into (X + (signmask ^ C))
+ if (XorRHS->getValue().isSignMask())
return BinaryOperator::CreateAdd(XorLHS,
ConstantExpr::getXor(XorRHS, CI));
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index b2a41c699202..3a98e8937bda 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2078,7 +2078,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
Value *NOr = Builder->CreateOr(A, Op1);
NOr->takeName(Op0);
return BinaryOperator::CreateXor(NOr,
- cast<Instruction>(Op0)->getOperand(1));
+ ConstantInt::get(NOr->getType(), *C));
}
// Y|(X^C) -> (X|Y)^C iff Y&C == 0
@@ -2087,7 +2087,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
Value *NOr = Builder->CreateOr(A, Op0);
NOr->takeName(Op0);
return BinaryOperator::CreateXor(NOr,
- cast<Instruction>(Op1)->getOperand(1));
+ ConstantInt::get(NOr->getType(), *C));
}
}
@@ -2480,8 +2480,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
return BinaryOperator::CreateSub(SubOne(NegOp0CI),
Op0I->getOperand(0));
- } else if (RHSC->getValue().isSignBit()) {
- // (X + C) ^ signbit -> (X + C + signbit)
+ } else if (RHSC->getValue().isSignMask()) {
+ // (X + C) ^ signmask -> (X + C + signmask)
Constant *C = Builder->getInt(RHSC->getValue() + Op0CI->getValue());
return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 69484f47223f..e7aa1a457371 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -839,7 +839,8 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
// Length bits.
if (CI0) {
APInt Elt = CI0->getValue();
- Elt = Elt.lshr(Index).zextOrTrunc(Length);
+ Elt.lshrInPlace(Index);
+ Elt = Elt.zextOrTrunc(Length);
return LowConstantHighUndef(Elt.getZExtValue());
}
@@ -1036,7 +1037,7 @@ static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
// The PD variants uses bit 1 to select per-lane element index, so
// shift down to convert to generic shuffle mask index.
if (IsPD)
- Index = Index.lshr(1);
+ Index.lshrInPlace(1);
// The _256 variants are a bit trickier since the mask bits always index
// into the corresponding 128 half. In order to convert to a generic
@@ -4067,21 +4068,15 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
}
if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
- !CallerPAL.isEmpty())
+ !CallerPAL.isEmpty()) {
// In this case we have more arguments than the new function type, but we
// won't be dropping them. Check that these extra arguments have attributes
// that are compatible with being a vararg call argument.
- for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
- unsigned Index = CallerPAL.getSlotIndex(i - 1);
- if (Index <= FT->getNumParams())
- break;
-
- // Check if it has an attribute that's incompatible with varargs.
- AttributeList PAttrs = CallerPAL.getSlotAttributes(i - 1);
- if (PAttrs.hasAttribute(Index, Attribute::StructRet))
- return false;
- }
-
+ unsigned SRetIdx;
+ if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
+ SRetIdx > FT->getNumParams())
+ return false;
+ }
// Okay, we decided that this is a safe thing to do: go ahead and start
// inserting cast instructions as necessary.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 25683132c786..9127ddca5915 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1591,7 +1591,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
// GEP into CI would undo canonicalizing addrspacecast with different
// pointer types, causing infinite loops.
(!isa<AddrSpaceCastInst>(CI) ||
- GEP->getType() == GEP->getPointerOperand()->getType())) {
+ GEP->getType() == GEP->getPointerOperandType())) {
// Changing the cast operand is usually not a good idea but it is safe
// here because the pointer operand is being replaced with another
// pointer operand so the opcode doesn't need to change.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index bbafa9e9f468..003029ae39d5 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -140,7 +140,7 @@ static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
case ICmpInst::ICMP_UGE:
// True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
TrueIfSigned = true;
- return RHS.isSignBit();
+ return RHS.isSignMask();
default:
return false;
}
@@ -1532,14 +1532,14 @@ Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
}
if (Xor->hasOneUse()) {
- // (icmp u/s (xor X SignBit), C) -> (icmp s/u X, (xor C SignBit))
- if (!Cmp.isEquality() && XorC->isSignBit()) {
+ // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
+ if (!Cmp.isEquality() && XorC->isSignMask()) {
Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
: Cmp.getSignedPredicate();
return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC));
}
- // (icmp u/s (xor X ~SignBit), C) -> (icmp s/u X, (xor C ~SignBit))
+ // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
: Cmp.getSignedPredicate();
@@ -2402,9 +2402,9 @@ Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
const APInt &Upper = CR.getUpper();
const APInt &Lower = CR.getLower();
if (Cmp.isSigned()) {
- if (Lower.isSignBit())
+ if (Lower.isSignMask())
return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
- if (Upper.isSignBit())
+ if (Upper.isSignMask())
return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
} else {
if (Lower.isMinValue())
@@ -2604,7 +2604,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
break;
// Replace (and X, (1 << size(X)-1) != 0) with x s< 0
- if (BOC->isSignBit()) {
+ if (BOC->isSignMask()) {
Constant *Zero = Constant::getNullValue(BOp0->getType());
auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
return new ICmpInst(NewPred, BOp0, Zero);
@@ -3032,9 +3032,9 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
BO1->getOperand(0));
- // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
+ // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
- if (CI->getValue().isSignBit()) {
+ if (CI->getValue().isSignMask()) {
ICmpInst::Predicate Pred =
I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
@@ -3797,7 +3797,7 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth,
bool isSignCheck) {
if (isSignCheck)
- return APInt::getSignBit(BitWidth);
+ return APInt::getSignMask(BitWidth);
ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
if (!CI) return APInt::getAllOnesValue(BitWidth);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 6288e054f1bc..675553017838 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -931,6 +931,18 @@ static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
return nullptr;
}
+static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
+ const Value *GEPI0 = GEPI->getOperand(0);
+ if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0)
+ return true;
+ }
+ if (isa<UndefValue>(Op) ||
+ (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0))
+ return true;
+ return false;
+}
+
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
@@ -979,27 +991,13 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (!LI.isUnordered()) return nullptr;
// load(gep null, ...) -> unreachable
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
- const Value *GEPI0 = GEPI->getOperand(0);
- // TODO: Consider a target hook for valid address spaces for this xform.
- if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
- // Insert a new store to null instruction before the load to indicate
- // that this code is not reachable. We do this instead of inserting
- // an unreachable instruction directly because we cannot modify the
- // CFG.
- new StoreInst(UndefValue::get(LI.getType()),
- Constant::getNullValue(Op->getType()), &LI);
- return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
- }
- }
-
// load null/undef -> unreachable
- // TODO: Consider a target hook for valid address spaces for this xform.
- if (isa<UndefValue>(Op) ||
- (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
- // Insert a new store to null instruction before the load to indicate that
- // this code is not reachable. We do this instead of inserting an
- // unreachable instruction directly because we cannot modify the CFG.
+ // TODO: Consider a target hook for valid address spaces for this xforms.
+ if (canSimplifyNullLoadOrGEP(LI, Op)) {
+ // Insert a new store to null instruction before the load to indicate
+ // that this code is not reachable. We do this instead of inserting
+ // an unreachable instruction directly because we cannot modify the
+ // CFG.
new StoreInst(UndefValue::get(LI.getType()),
Constant::getNullValue(Op->getType()), &LI);
return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index f1ac82057e6c..ce66581a491a 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -944,22 +944,21 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
}
}
- if (ConstantInt *One = dyn_cast<ConstantInt>(Op0)) {
- if (One->isOne() && !I.getType()->isIntegerTy(1)) {
- bool isSigned = I.getOpcode() == Instruction::SDiv;
- if (isSigned) {
- // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
- // result is one, if Op1 is -1 then the result is minus one, otherwise
- // it's zero.
- Value *Inc = Builder->CreateAdd(Op1, One);
- Value *Cmp = Builder->CreateICmpULT(
- Inc, ConstantInt::get(I.getType(), 3));
- return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0));
- } else {
- // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
- // result is one, otherwise it's zero.
- return new ZExtInst(Builder->CreateICmpEQ(Op1, One), I.getType());
- }
+ if (match(Op0, m_One())) {
+ assert(!I.getType()->getScalarType()->isIntegerTy(1) &&
+ "i1 divide not removed?");
+ if (I.getOpcode() == Instruction::SDiv) {
+ // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
+ // result is one, if Op1 is -1 then the result is minus one, otherwise
+ // it's zero.
+ Value *Inc = Builder->CreateAdd(Op1, Op0);
+ Value *Cmp = Builder->CreateICmpULT(
+ Inc, ConstantInt::get(I.getType(), 3));
+ return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0));
+ } else {
+ // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
+ // result is one, otherwise it's zero.
+ return new ZExtInst(Builder->CreateICmpEQ(Op1, Op0), I.getType());
}
}
@@ -1238,25 +1237,23 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a udiv.
- if (I.getType()->isIntegerTy()) {
- APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
- if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
- if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
- // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
- auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
- BO->setIsExact(I.isExact());
- return BO;
- }
+ APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
+ if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
+ if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
+ // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
+ auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
+ BO->setIsExact(I.isExact());
+ return BO;
+ }
- if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) {
- // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
- // Safe because the only negative value (1 << Y) can take on is
- // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
- // the sign bit set.
- auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
- BO->setIsExact(I.isExact());
- return BO;
- }
+ if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) {
+ // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
+ // Safe because the only negative value (1 << Y) can take on is
+ // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
+ // the sign bit set.
+ auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
+ BO->setIsExact(I.isExact());
+ return BO;
}
}
@@ -1546,13 +1543,11 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a urem.
- if (I.getType()->isIntegerTy()) {
- APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
- if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
- MaskedValueIsZero(Op0, Mask, 0, &I)) {
- // X srem Y -> X urem Y, iff X and Y don't have sign bit set
- return BinaryOperator::CreateURem(Op0, Op1, I.getName());
- }
+ APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
+ if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
+ MaskedValueIsZero(Op0, Mask, 0, &I)) {
+ // X srem Y -> X urem Y, iff X and Y don't have sign bit set
+ return BinaryOperator::CreateURem(Op0, Op1, I.getName());
}
// If it's a constant vector, flip any negative values positive.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 693b6c95c169..5d6d899da4b5 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -618,7 +618,7 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
{
unsigned BitWidth =
DL.getTypeSizeInBits(TrueVal->getType()->getScalarType());
- APInt MinSignedValue = APInt::getSignBit(BitWidth);
+ APInt MinSignedValue = APInt::getSignedMinValue(BitWidth);
Value *X;
const APInt *Y, *C;
bool TrueWhenUnset;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 9aa679c60e47..f77d713b9b07 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -370,7 +370,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
MaskV <<= Op1C->getZExtValue();
else {
assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
- MaskV = MaskV.lshr(Op1C->getZExtValue());
+ MaskV.lshrInPlace(Op1C->getZExtValue());
}
// shift1 & 0x00FF
@@ -760,7 +760,7 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
}
// See if we can turn a signed shr into an unsigned shr.
- if (MaskedValueIsZero(Op0, APInt::getSignBit(BitWidth), 0, &I))
+ if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), 0, &I))
return BinaryOperator::CreateLShr(Op0, Op1);
return nullptr;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 4e6f02058d83..2ba052b7e02d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -38,7 +38,7 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
// If there are no bits set that aren't demanded, nothing to do.
Demanded = Demanded.zextOrTrunc(C->getBitWidth());
- if ((~Demanded & *C) == 0)
+ if (C->isSubsetOf(Demanded))
return false;
// This instruction is producing bits that are not demanded. Shrink the RHS.
@@ -117,27 +117,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
KnownOne.getBitWidth() == BitWidth &&
"Value *V, DemandedMask, KnownZero and KnownOne "
"must have same BitWidth");
- const APInt *C;
- if (match(V, m_APInt(C))) {
- // We know all of the bits for a scalar constant or a splat vector constant!
- KnownOne = *C & DemandedMask;
- KnownZero = ~KnownOne & DemandedMask;
- return nullptr;
- }
- if (isa<ConstantPointerNull>(V)) {
- // We know all of the bits for a constant!
- KnownOne.clearAllBits();
- KnownZero = DemandedMask;
+
+ if (isa<Constant>(V)) {
+ computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
return nullptr;
}
KnownZero.clearAllBits();
KnownOne.clearAllBits();
- if (DemandedMask == 0) { // Not demanding any bits from V.
- if (isa<UndefValue>(V))
- return nullptr;
+ if (DemandedMask == 0) // Not demanding any bits from V.
return UndefValue::get(VTy);
- }
if (Depth == 6) // Limit search depth.
return nullptr;
@@ -187,16 +176,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
return Constant::getIntegerValue(VTy, IKnownOne);
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and'.
- if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
- (DemandedMask & ~LHSKnownZero))
+ if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne))
return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
- (DemandedMask & ~RHSKnownZero))
+ if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne))
return I->getOperand(1);
// If the RHS is a constant, see if we can simplify it.
@@ -224,25 +211,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
return Constant::getIntegerValue(VTy, IKnownOne);
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'or'.
- if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
- (DemandedMask & ~LHSKnownOne))
+ if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero))
return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
- (DemandedMask & ~RHSKnownOne))
- return I->getOperand(1);
-
- // If all of the potentially set bits on one side are known to be set on
- // the other side, just use the 'other' side.
- if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
- (DemandedMask & (~RHSKnownZero)))
- return I->getOperand(0);
- if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
- (DemandedMask & (~LHSKnownZero)))
+ if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero))
return I->getOperand(1);
// If the RHS is a constant, see if we can simplify it.
@@ -271,20 +247,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
return Constant::getIntegerValue(VTy, IKnownOne);
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'xor'.
- if ((DemandedMask & RHSKnownZero) == DemandedMask)
+ if (DemandedMask.isSubsetOf(RHSKnownZero))
return I->getOperand(0);
- if ((DemandedMask & LHSKnownZero) == DemandedMask)
+ if (DemandedMask.isSubsetOf(LHSKnownZero))
return I->getOperand(1);
// If all of the demanded bits are known to be zero on one side or the
// other, turn this into an *inclusive* or.
// e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
- if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
+ if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownZero)) {
Instruction *Or =
BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
I->getName());
@@ -295,14 +271,12 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
- if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
- // all known
- if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
- Constant *AndC = Constant::getIntegerValue(VTy,
- ~RHSKnownOne & DemandedMask);
- Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
- return InsertNewInstWith(And, *I);
- }
+ if (DemandedMask.isSubsetOf(RHSKnownZero|RHSKnownOne) &&
+ RHSKnownOne.isSubsetOf(LHSKnownOne)) {
+ Constant *AndC = Constant::getIntegerValue(VTy,
+ ~RHSKnownOne & DemandedMask);
+ Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
+ return InsertNewInstWith(And, *I);
}
// If the RHS is a constant, see if we can simplify it.
@@ -529,9 +503,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
KnownZero.setLowBits(ShiftAmt);
}
break;
- case Instruction::LShr:
- // For a logical shift right
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ case Instruction::LShr: {
+ const APInt *SA;
+ if (match(I->getOperand(1), m_APInt(SA))) {
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Unsigned shift right.
@@ -546,13 +520,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Depth + 1))
return I;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- KnownZero = KnownZero.lshr(ShiftAmt);
- KnownOne = KnownOne.lshr(ShiftAmt);
+ KnownZero.lshrInPlace(ShiftAmt);
+ KnownOne.lshrInPlace(ShiftAmt);
if (ShiftAmt)
KnownZero.setHighBits(ShiftAmt); // high bits known zero.
}
break;
- case Instruction::AShr:
+ }
+ case Instruction::AShr: {
// If this is an arithmetic shift right and only the low-bit is set, we can
// always convert this into a logical shr, even if the shift amount is
// variable. The low bit of the shift cannot be an input sign bit unless
@@ -566,15 +541,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the sign bit is the only bit demanded by this ashr, then there is no
// need to do it, the shift doesn't change the high bit.
- if (DemandedMask.isSignBit())
+ if (DemandedMask.isSignMask())
return I->getOperand(0);
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ const APInt *SA;
+ if (match(I->getOperand(1), m_APInt(SA))) {
uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Signed shift right.
APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
- // If any of the "high bits" are demanded, we should set the sign bit as
+ // If any of the high bits are demanded, we should set the sign bit as
// demanded.
if (DemandedMask.countLeadingZeros() <= ShiftAmt)
DemandedMaskIn.setSignBit();
@@ -587,31 +563,32 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
Depth + 1))
return I;
+
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// Compute the new bits that are at the top now.
APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- KnownZero = KnownZero.lshr(ShiftAmt);
- KnownOne = KnownOne.lshr(ShiftAmt);
+ KnownZero.lshrInPlace(ShiftAmt);
+ KnownOne.lshrInPlace(ShiftAmt);
// Handle the sign bits.
- APInt SignBit(APInt::getSignBit(BitWidth));
+ APInt SignMask(APInt::getSignMask(BitWidth));
// Adjust to where it is now in the mask.
- SignBit = SignBit.lshr(ShiftAmt);
+ SignMask.lshrInPlace(ShiftAmt);
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] ||
(HighBits & ~DemandedMask) == HighBits) {
- // Perform the logical shift right.
- BinaryOperator *NewVal = BinaryOperator::CreateLShr(I->getOperand(0),
- SA, I->getName());
- NewVal->setIsExact(cast<BinaryOperator>(I)->isExact());
- return InsertNewInstWith(NewVal, *I);
- } else if ((KnownOne & SignBit) != 0) { // New bits are known one.
+ BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
+ I->getOperand(1));
+ LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
+ return InsertNewInstWith(LShr, *I);
+ } else if ((KnownOne & SignMask) != 0) { // New bits are known one.
KnownOne |= HighBits;
}
}
break;
+ }
case Instruction::SRem:
if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
// X % -1 demands all the bits because we don't want to introduce
@@ -624,7 +601,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I->getOperand(0);
APInt LowBits = RA - 1;
- APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
+ APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
if (SimplifyDemandedBits(I, 0, Mask2, LHSKnownZero, LHSKnownOne,
Depth + 1))
return I;
@@ -635,26 +612,26 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If LHS is non-negative or has all low bits zero, then the upper bits
// are all zero.
- if (LHSKnownZero.isNegative() || ((LHSKnownZero & LowBits) == LowBits))
+ if (LHSKnownZero.isSignBitSet() || ((LHSKnownZero & LowBits) == LowBits))
KnownZero |= ~LowBits;
// If LHS is negative and not all low bits are zero, then the upper bits
// are all one.
- if (LHSKnownOne.isNegative() && ((LHSKnownOne & LowBits) != 0))
+ if (LHSKnownOne.isSignBitSet() && ((LHSKnownOne & LowBits) != 0))
KnownOne |= ~LowBits;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ break;
}
}
// The sign bit is the LHS's sign bit, except when the result of the
// remainder is zero.
- if (DemandedMask.isNegative() && KnownZero.isNonNegative()) {
- APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
+ if (DemandedMask.isSignBitSet()) {
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
CxtI);
// If it's known zero, our sign bit is also zero.
- if (LHSKnownZero.isNegative())
+ if (LHSKnownZero.isSignBitSet())
KnownZero.setSignBit();
}
break;
@@ -744,7 +721,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(KnownZero|KnownOne))
return Constant::getIntegerValue(VTy, KnownOne);
return nullptr;
}
@@ -783,17 +760,15 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
return Constant::getIntegerValue(ITy, IKnownOne);
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and' in this
// context.
- if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
- (DemandedMask & ~LHSKnownZero))
+ if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne))
return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
- (DemandedMask & ~RHSKnownZero))
+ if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne))
return I->getOperand(1);
KnownZero = std::move(IKnownZero);
@@ -817,26 +792,15 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
return Constant::getIntegerValue(ITy, IKnownOne);
// If all of the demanded bits are known zero on one side, return the
// other. These bits cannot contribute to the result of the 'or' in this
// context.
- if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
- (DemandedMask & ~LHSKnownOne))
+ if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero))
return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
- (DemandedMask & ~RHSKnownOne))
- return I->getOperand(1);
-
- // If all of the potentially set bits on one side are known to be set on
- // the other side, just use the 'other' side.
- if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
- (DemandedMask & (~RHSKnownZero)))
- return I->getOperand(0);
- if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
- (DemandedMask & (~LHSKnownZero)))
+ if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero))
return I->getOperand(1);
KnownZero = std::move(IKnownZero);
@@ -861,14 +825,14 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If the client is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
return Constant::getIntegerValue(ITy, IKnownOne);
// If all of the demanded bits are known zero on one side, return the
// other.
- if ((DemandedMask & RHSKnownZero) == DemandedMask)
+ if (DemandedMask.isSubsetOf(RHSKnownZero))
return I->getOperand(0);
- if ((DemandedMask & LHSKnownZero) == DemandedMask)
+ if (DemandedMask.isSubsetOf(LHSKnownZero))
return I->getOperand(1);
// Output known-0 bits are known if clear or set in both the LHS & RHS.
@@ -883,7 +847,7 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If this user is only demanding bits that we know, return the known
// constant.
- if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
+ if (DemandedMask.isSubsetOf(KnownZero|KnownOne))
return Constant::getIntegerValue(ITy, KnownOne);
break;
@@ -1641,7 +1605,52 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
UndefElts.setHighBits(VWidth / 2);
break;
case Intrinsic::amdgcn_buffer_load:
- case Intrinsic::amdgcn_buffer_load_format: {
+ case Intrinsic::amdgcn_buffer_load_format:
+ case Intrinsic::amdgcn_image_sample:
+ case Intrinsic::amdgcn_image_sample_cl:
+ case Intrinsic::amdgcn_image_sample_d:
+ case Intrinsic::amdgcn_image_sample_d_cl:
+ case Intrinsic::amdgcn_image_sample_l:
+ case Intrinsic::amdgcn_image_sample_b:
+ case Intrinsic::amdgcn_image_sample_b_cl:
+ case Intrinsic::amdgcn_image_sample_lz:
+ case Intrinsic::amdgcn_image_sample_cd:
+ case Intrinsic::amdgcn_image_sample_cd_cl:
+
+ case Intrinsic::amdgcn_image_sample_c:
+ case Intrinsic::amdgcn_image_sample_c_cl:
+ case Intrinsic::amdgcn_image_sample_c_d:
+ case Intrinsic::amdgcn_image_sample_c_d_cl:
+ case Intrinsic::amdgcn_image_sample_c_l:
+ case Intrinsic::amdgcn_image_sample_c_b:
+ case Intrinsic::amdgcn_image_sample_c_b_cl:
+ case Intrinsic::amdgcn_image_sample_c_lz:
+ case Intrinsic::amdgcn_image_sample_c_cd:
+ case Intrinsic::amdgcn_image_sample_c_cd_cl:
+
+ case Intrinsic::amdgcn_image_sample_o:
+ case Intrinsic::amdgcn_image_sample_cl_o:
+ case Intrinsic::amdgcn_image_sample_d_o:
+ case Intrinsic::amdgcn_image_sample_d_cl_o:
+ case Intrinsic::amdgcn_image_sample_l_o:
+ case Intrinsic::amdgcn_image_sample_b_o:
+ case Intrinsic::amdgcn_image_sample_b_cl_o:
+ case Intrinsic::amdgcn_image_sample_lz_o:
+ case Intrinsic::amdgcn_image_sample_cd_o:
+ case Intrinsic::amdgcn_image_sample_cd_cl_o:
+
+ case Intrinsic::amdgcn_image_sample_c_o:
+ case Intrinsic::amdgcn_image_sample_c_cl_o:
+ case Intrinsic::amdgcn_image_sample_c_d_o:
+ case Intrinsic::amdgcn_image_sample_c_d_cl_o:
+ case Intrinsic::amdgcn_image_sample_c_l_o:
+ case Intrinsic::amdgcn_image_sample_c_b_o:
+ case Intrinsic::amdgcn_image_sample_c_b_cl_o:
+ case Intrinsic::amdgcn_image_sample_c_lz_o:
+ case Intrinsic::amdgcn_image_sample_c_cd_o:
+ case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
+
+ case Intrinsic::amdgcn_image_getlod: {
if (VWidth == 1 || !DemandedElts.isMask())
return nullptr;
@@ -1656,8 +1665,17 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
Type *NewTy = (NewNumElts == 1) ? EltTy :
VectorType::get(EltTy, NewNumElts);
- Function *NewIntrin = Intrinsic::getDeclaration(M, II->getIntrinsicID(),
- NewTy);
+ auto IID = II->getIntrinsicID();
+
+ bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load ||
+ IID == Intrinsic::amdgcn_buffer_load_format;
+
+ Function *NewIntrin = IsBuffer ?
+ Intrinsic::getDeclaration(M, IID, NewTy) :
+ // Samplers have 3 mangled types.
+ Intrinsic::getDeclaration(M, IID,
+ { NewTy, II->getArgOperand(0)->getType(),
+ II->getArgOperand(1)->getType()});
SmallVector<Value *, 5> Args;
for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
@@ -1669,6 +1687,29 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
CallInst *NewCall = Builder->CreateCall(NewIntrin, Args);
NewCall->takeName(II);
NewCall->copyMetadata(*II);
+
+ if (!IsBuffer) {
+ ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3));
+ if (DMask) {
+ unsigned DMaskVal = DMask->getZExtValue() & 0xf;
+
+ unsigned PopCnt = 0;
+ unsigned NewDMask = 0;
+ for (unsigned I = 0; I < 4; ++I) {
+ const unsigned Bit = 1 << I;
+ if (!!(DMaskVal & Bit)) {
+ if (++PopCnt > NewNumElts)
+ break;
+
+ NewDMask |= Bit;
+ }
+ }
+
+ NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask));
+ }
+ }
+
+
if (NewNumElts == 1) {
return Builder->CreateInsertElement(UndefValue::get(V->getType()),
NewCall, static_cast<uint64_t>(0));
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 88ef17bbc8fa..81f2d9fa179f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -148,9 +148,9 @@ static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
bool Overflow = false;
if (Opcode == Instruction::Add)
- BVal->sadd_ov(*CVal, Overflow);
+ (void)BVal->sadd_ov(*CVal, Overflow);
else
- BVal->ssub_ov(*CVal, Overflow);
+ (void)BVal->ssub_ov(*CVal, Overflow);
return !Overflow;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 94cfc69ed555..036dd8d39a08 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -2586,7 +2586,7 @@ void FunctionStackPoisoner::processStaticAllocas() {
Value *NewAllocaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
AI->getType());
- replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true);
+ replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/false);
AI->replaceAllUsesWith(NewAllocaPtr);
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index fa0c7cc5a4c5..8bdd917a0596 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -59,13 +59,8 @@ using namespace llvm;
static const char *const SanCovModuleInitName = "__sanitizer_cov_module_init";
static const char *const SanCovName = "__sanitizer_cov";
static const char *const SanCovWithCheckName = "__sanitizer_cov_with_check";
-static const char *const SanCovIndirCallName = "__sanitizer_cov_indir_call16";
static const char *const SanCovTracePCIndirName =
"__sanitizer_cov_trace_pc_indir";
-static const char *const SanCovTraceEnterName =
- "__sanitizer_cov_trace_func_enter";
-static const char *const SanCovTraceBBName =
- "__sanitizer_cov_trace_basic_block";
static const char *const SanCovTracePCName = "__sanitizer_cov_trace_pc";
static const char *const SanCovTraceCmp1 = "__sanitizer_cov_trace_cmp1";
static const char *const SanCovTraceCmp2 = "__sanitizer_cov_trace_cmp2";
@@ -86,8 +81,7 @@ static const char *const SanCovTracePCGuardInitName =
static cl::opt<int> ClCoverageLevel(
"sanitizer-coverage-level",
cl::desc("Sanitizer Coverage. 0: none, 1: entry block, 2: all blocks, "
- "3: all blocks and critical edges, "
- "4: above plus indirect calls"),
+ "3: all blocks and critical edges"),
cl::Hidden, cl::init(0));
static cl::opt<unsigned> ClCoverageBlockThreshold(
@@ -96,12 +90,6 @@ static cl::opt<unsigned> ClCoverageBlockThreshold(
" more than this number of blocks."),
cl::Hidden, cl::init(0));
-static cl::opt<bool>
- ClExperimentalTracing("sanitizer-coverage-experimental-tracing",
- cl::desc("Experimental basic-block tracing: insert "
- "callbacks at every basic block"),
- cl::Hidden, cl::init(false));
-
static cl::opt<bool> ClExperimentalTracePC("sanitizer-coverage-trace-pc",
cl::desc("Experimental pc tracing"),
cl::Hidden, cl::init(false));
@@ -128,16 +116,6 @@ static cl::opt<bool>
cl::desc("Reduce the number of instrumented blocks"),
cl::Hidden, cl::init(true));
-// Experimental 8-bit counters used as an additional search heuristic during
-// coverage-guided fuzzing.
-// The counters are not thread-friendly:
-// - contention on these counters may cause significant slowdown;
-// - the counter updates are racy and the results may be inaccurate.
-// They are also inaccurate due to 8-bit integer overflow.
-static cl::opt<bool> ClUse8bitCounters("sanitizer-coverage-8bit-counters",
- cl::desc("Experimental 8-bit counters"),
- cl::Hidden, cl::init(false));
-
namespace {
SanitizerCoverageOptions getOptions(int LegacyCoverageLevel) {
@@ -168,11 +146,9 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
SanitizerCoverageOptions CLOpts = getOptions(ClCoverageLevel);
Options.CoverageType = std::max(Options.CoverageType, CLOpts.CoverageType);
Options.IndirectCalls |= CLOpts.IndirectCalls;
- Options.TraceBB |= ClExperimentalTracing;
Options.TraceCmp |= ClCMPTracing;
Options.TraceDiv |= ClDIVTracing;
Options.TraceGep |= ClGEPTracing;
- Options.Use8bitCounters |= ClUse8bitCounters;
Options.TracePC |= ClExperimentalTracePC;
Options.TracePCGuard |= ClTracePCGuard;
return Options;
@@ -212,16 +188,15 @@ private:
bool UseCalls);
unsigned NumberOfInstrumentedBlocks() {
return SanCovFunction->getNumUses() +
- SanCovWithCheckFunction->getNumUses() + SanCovTraceBB->getNumUses() +
- SanCovTraceEnter->getNumUses();
+ SanCovWithCheckFunction->getNumUses();
}
StringRef getSanCovTracePCGuardSection() const;
StringRef getSanCovTracePCGuardSectionStart() const;
StringRef getSanCovTracePCGuardSectionEnd() const;
Function *SanCovFunction;
Function *SanCovWithCheckFunction;
- Function *SanCovIndirCallFunction, *SanCovTracePCIndir;
- Function *SanCovTraceEnter, *SanCovTraceBB, *SanCovTracePC, *SanCovTracePCGuard;
+ Function *SanCovTracePCIndir;
+ Function *SanCovTracePC, *SanCovTracePCGuard;
Function *SanCovTraceCmpFunction[4];
Function *SanCovTraceDivFunction[2];
Function *SanCovTraceGepFunction;
@@ -235,7 +210,6 @@ private:
GlobalVariable *GuardArray;
GlobalVariable *FunctionGuardArray; // for trace-pc-guard.
- GlobalVariable *EightBitCounterArray;
bool HasSancovGuardsSection;
SanitizerCoverageOptions Options;
@@ -267,9 +241,6 @@ bool SanitizerCoverageModule::runOnModule(Module &M) {
M.getOrInsertFunction(SanCovWithCheckName, VoidTy, Int32PtrTy));
SanCovTracePCIndir = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(SanCovTracePCIndirName, VoidTy, IntptrTy));
- SanCovIndirCallFunction =
- checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- SanCovIndirCallName, VoidTy, IntptrTy, IntptrTy));
SanCovTraceCmpFunction[0] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
SanCovTraceCmp1, VoidTy, IRB.getInt8Ty(), IRB.getInt8Ty()));
@@ -305,24 +276,15 @@ bool SanitizerCoverageModule::runOnModule(Module &M) {
M.getOrInsertFunction(SanCovTracePCName, VoidTy));
SanCovTracePCGuard = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
SanCovTracePCGuardName, VoidTy, Int32PtrTy));
- SanCovTraceEnter = checkSanitizerInterfaceFunction(
- M.getOrInsertFunction(SanCovTraceEnterName, VoidTy, Int32PtrTy));
- SanCovTraceBB = checkSanitizerInterfaceFunction(
- M.getOrInsertFunction(SanCovTraceBBName, VoidTy, Int32PtrTy));
// At this point we create a dummy array of guards because we don't
// know how many elements we will need.
Type *Int32Ty = IRB.getInt32Ty();
- Type *Int8Ty = IRB.getInt8Ty();
if (!Options.TracePCGuard)
GuardArray =
new GlobalVariable(M, Int32Ty, false, GlobalValue::ExternalLinkage,
nullptr, "__sancov_gen_cov_tmp");
- if (Options.Use8bitCounters)
- EightBitCounterArray =
- new GlobalVariable(M, Int8Ty, false, GlobalVariable::ExternalLinkage,
- nullptr, "__sancov_gen_cov_tmp");
for (auto &F : M)
runOnFunction(F);
@@ -344,20 +306,6 @@ bool SanitizerCoverageModule::runOnModule(Module &M) {
GuardArray->eraseFromParent();
}
- GlobalVariable *RealEightBitCounterArray;
- if (Options.Use8bitCounters) {
- // Make sure the array is 16-aligned.
- static const int CounterAlignment = 16;
- Type *Int8ArrayNTy = ArrayType::get(Int8Ty, alignTo(N, CounterAlignment));
- RealEightBitCounterArray = new GlobalVariable(
- M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage,
- Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter");
- RealEightBitCounterArray->setAlignment(CounterAlignment);
- EightBitCounterArray->replaceAllUsesWith(
- IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy));
- EightBitCounterArray->eraseFromParent();
- }
-
// Create variable for module (compilation unit) name
Constant *ModNameStrConst =
ConstantDataArray::getString(M.getContext(), M.getName(), true);
@@ -396,10 +344,7 @@ bool SanitizerCoverageModule::runOnModule(Module &M) {
M, SanCovModuleCtorName, SanCovModuleInitName,
{Int32PtrTy, IntptrTy, Int8PtrTy, Int8PtrTy},
{IRB.CreatePointerCast(RealGuardArray, Int32PtrTy),
- ConstantInt::get(IntptrTy, N),
- Options.Use8bitCounters
- ? IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy)
- : Constant::getNullValue(Int8PtrTy),
+ ConstantInt::get(IntptrTy, N), Constant::getNullValue(Int8PtrTy),
IRB.CreatePointerCast(ModuleName, Int8PtrTy)});
appendToGlobalCtors(M, CtorFunc, SanCtorAndDtorPriority);
@@ -566,26 +511,15 @@ void SanitizerCoverageModule::InjectCoverageForIndirectCalls(
Function &F, ArrayRef<Instruction *> IndirCalls) {
if (IndirCalls.empty())
return;
- const int CacheSize = 16;
- const int CacheAlignment = 64; // Align for better performance.
- Type *Ty = ArrayType::get(IntptrTy, CacheSize);
+ if (!Options.TracePC && !Options.TracePCGuard)
+ return;
for (auto I : IndirCalls) {
IRBuilder<> IRB(I);
CallSite CS(I);
Value *Callee = CS.getCalledValue();
if (isa<InlineAsm>(Callee))
continue;
- GlobalVariable *CalleeCache = new GlobalVariable(
- *F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
- Constant::getNullValue(Ty), "__sancov_gen_callee_cache");
- CalleeCache->setAlignment(CacheAlignment);
- if (Options.TracePC || Options.TracePCGuard)
- IRB.CreateCall(SanCovTracePCIndir,
- IRB.CreatePointerCast(Callee, IntptrTy));
- else
- IRB.CreateCall(SanCovIndirCallFunction,
- {IRB.CreatePointerCast(Callee, IntptrTy),
- IRB.CreatePointerCast(CalleeCache, IntptrTy)});
+ IRB.CreateCall(SanCovTracePCIndir, IRB.CreatePointerCast(Callee, IntptrTy));
}
}
@@ -735,9 +669,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
IRB.CreatePointerCast(GuardArray, IntptrTy),
ConstantInt::get(IntptrTy, (1 + NumberOfInstrumentedBlocks()) * 4));
GuardP = IRB.CreateIntToPtr(GuardP, Int32PtrTy);
- if (Options.TraceBB) {
- IRB.CreateCall(IsEntryBB ? SanCovTraceEnter : SanCovTraceBB, GuardP);
- } else if (UseCalls) {
+ if (UseCalls) {
IRB.CreateCall(SanCovWithCheckFunction, GuardP);
} else {
LoadInst *Load = IRB.CreateLoad(GuardP);
@@ -755,19 +687,6 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
IRB.CreateCall(EmptyAsm, {}); // Avoids callback merge.
}
}
-
- if (Options.Use8bitCounters) {
- IRB.SetInsertPoint(&*IP);
- Value *P = IRB.CreateAdd(
- IRB.CreatePointerCast(EightBitCounterArray, IntptrTy),
- ConstantInt::get(IntptrTy, NumberOfInstrumentedBlocks() - 1));
- P = IRB.CreateIntToPtr(P, IRB.getInt8PtrTy());
- LoadInst *LI = IRB.CreateLoad(P);
- Value *Inc = IRB.CreateAdd(LI, ConstantInt::get(IRB.getInt8Ty(), 1));
- StoreInst *SI = IRB.CreateStore(Inc, P);
- SetNoSanitizeMetadata(LI);
- SetNoSanitizeMetadata(SI);
- }
}
StringRef SanitizerCoverageModule::getSanCovTracePCGuardSection() const {
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp
index 6adfe130d148..b7514a6d5793 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVNHoist.cpp
@@ -45,6 +45,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -1010,6 +1011,7 @@ public:
AU.addRequired<MemorySSAWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<MemorySSAWrapperPass>();
+ AU.addPreserved<GlobalsAAWrapperPass>();
}
};
} // namespace
@@ -1026,6 +1028,7 @@ PreservedAnalyses GVNHoistPass::run(Function &F, FunctionAnalysisManager &AM) {
PreservedAnalyses PA;
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<MemorySSAAnalysis>();
+ PA.preserve<GlobalsAA>();
return PA;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index cf63cb660db8..20b37c4b70e6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -197,8 +197,7 @@ public:
continue;
// Only progagate the value if they are of the same type.
- if (Store->getPointerOperand()->getType() !=
- Load->getPointerOperand()->getType())
+ if (Store->getPointerOperandType() != Load->getPointerOperandType())
continue;
Candidates.emplace_front(Load, Store);
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
index 86058fe0b1aa..fd15a9014def 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -557,7 +557,7 @@ bool LoopReroll::isLoopControlIV(Loop *L, Instruction *IV) {
Instruction *UUser = dyn_cast<Instruction>(UU);
// Skip SExt if we are extending an nsw value
// TODO: Allow ZExt too
- if (BO->hasNoSignedWrap() && UUser && UUser->getNumUses() == 1 &&
+ if (BO->hasNoSignedWrap() && UUser && UUser->hasOneUse() &&
isa<SExtInst>(UUser))
UUser = dyn_cast<Instruction>(*(UUser->user_begin()));
if (!isCompareUsedByBranch(UUser))
@@ -852,7 +852,7 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) {
for (auto &KV : Roots) {
if (KV.first == 0)
continue;
- if (KV.second->getNumUses() != NumBaseUses) {
+ if (!KV.second->hasNUses(NumBaseUses)) {
DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: "
<< "#Base=" << NumBaseUses << ", #Root=" <<
KV.second->getNumUses() << "\n");
@@ -867,7 +867,7 @@ void LoopReroll::DAGRootTracker::
findRootsRecursive(Instruction *I, SmallInstructionSet SubsumedInsts) {
// Does the user look like it could be part of a root set?
// All its users must be simple arithmetic ops.
- if (I->getNumUses() > IL_MaxRerollIterations)
+ if (I->hasNUsesOrMore(IL_MaxRerollIterations + 1))
return;
if (I != IV && findRootsBase(I, SubsumedInsts))
diff --git a/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp b/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 3d8ce888867e..a014ddd9ba0a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -138,7 +138,8 @@ PHIExpression::~PHIExpression() = default;
// It also wants to hand us SCC's that are unrelated to the phi node we ask
// about, and have us process them there or risk redoing work.
// Graph traits over a filter iterator also doesn't work that well here.
-// This SCC finder is specialized to walk use-def chains, and only follows instructions,
+// This SCC finder is specialized to walk use-def chains, and only follows
+// instructions,
// not generic values (arguments, etc).
struct TarjanSCC {
@@ -170,8 +171,10 @@ private:
Root[I] = std::min(Root.lookup(I), Root.lookup(Op));
}
}
- // See if we really were the root of a component, by seeing if we still have our DFSNumber.
- // If we do, we are the root of the component, and we have completed a component. If we do not,
+ // See if we really were the root of a component, by seeing if we still have
+ // our DFSNumber.
+ // If we do, we are the root of the component, and we have completed a
+ // component. If we do not,
// we are not the root of a component, and belong on the component stack.
if (Root.lookup(I) == OurDFS) {
unsigned ComponentID = Components.size();
@@ -2254,12 +2257,13 @@ void NewGVN::initializeCongruenceClasses(Function &F) {
MemoryAccessToClass[MSSA->getLiveOnEntryDef()] =
createMemoryClass(MSSA->getLiveOnEntryDef());
- for (auto &B : F) {
+ for (auto DTN : nodes(DT)) {
+ BasicBlock *BB = DTN->getBlock();
// All MemoryAccesses are equivalent to live on entry to start. They must
// be initialized to something so that initial changes are noticed. For
// the maximal answer, we initialize them all to be the same as
// liveOnEntry.
- auto *MemoryBlockDefs = MSSA->getBlockDefs(&B);
+ auto *MemoryBlockDefs = MSSA->getBlockDefs(BB);
if (MemoryBlockDefs)
for (const auto &Def : *MemoryBlockDefs) {
MemoryAccessToClass[&Def] = TOPClass;
@@ -2274,7 +2278,7 @@ void NewGVN::initializeCongruenceClasses(Function &F) {
if (MD && isa<StoreInst>(MD->getMemoryInst()))
TOPClass->incStoreCount();
}
- for (auto &I : B) {
+ for (auto &I : *BB) {
// Don't insert void terminators into the class. We don't value number
// them, and they just end up sitting in TOP.
if (isa<TerminatorInst>(I) && I.getType()->isVoidTy())
@@ -2518,14 +2522,11 @@ void NewGVN::verifyMemoryCongruency() const {
auto ReachableAccessPred =
[&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) {
bool Result = ReachableBlocks.count(Pair.first->getBlock());
- if (!Result)
+ if (!Result || MSSA->isLiveOnEntryDef(Pair.first) ||
+ MemoryToDFSNum(Pair.first) == 0)
return false;
- if (MSSA->isLiveOnEntryDef(Pair.first))
- return true;
if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first))
return !isInstructionTriviallyDead(MemDef->getMemoryInst());
- if (MemoryToDFSNum(Pair.first) == 0)
- return false;
return true;
};
@@ -2719,25 +2720,13 @@ bool NewGVN::runGVN() {
}
// Now a standard depth first ordering of the domtree is equivalent to RPO.
- auto DFI = df_begin(DT->getRootNode());
- for (auto DFE = df_end(DT->getRootNode()); DFI != DFE; ++DFI) {
- BasicBlock *B = DFI->getBlock();
+ for (auto DTN : depth_first(DT->getRootNode())) {
+ BasicBlock *B = DTN->getBlock();
const auto &BlockRange = assignDFSNumbers(B, ICount);
BlockInstRange.insert({B, BlockRange});
ICount += BlockRange.second - BlockRange.first;
}
- // Handle forward unreachable blocks and figure out which blocks
- // have single preds.
- for (auto &B : F) {
- // Assign numbers to unreachable blocks.
- if (!DFI.nodeVisited(DT->getNode(&B))) {
- const auto &BlockRange = assignDFSNumbers(&B, ICount);
- BlockInstRange.insert({&B, BlockRange});
- ICount += BlockRange.second - BlockRange.first;
- }
- }
-
TouchedInstructions.resize(ICount);
// Ensure we don't end up resizing the expressionToClass map, as
// that can be quite expensive. At most, we have one expression per
diff --git a/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index 49ce0262c97b..659353e912fe 100644
--- a/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -352,10 +352,20 @@ Value *StructurizeCFG::invert(Value *Condition) {
if (Instruction *Inst = dyn_cast<Instruction>(Condition)) {
// Third: Check all the users for an invert
BasicBlock *Parent = Inst->getParent();
- for (User *U : Condition->users())
- if (Instruction *I = dyn_cast<Instruction>(U))
+ for (User *U : Condition->users()) {
+ if (Instruction *I = dyn_cast<Instruction>(U)) {
if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
return I;
+ }
+ }
+
+ // Avoid creating a new instruction in the common case of a compare.
+ if (CmpInst *Cmp = dyn_cast<CmpInst>(Inst)) {
+ if (Cmp->hasOneUse()) {
+ Cmp->setPredicate(Cmp->getInversePredicate());
+ return Cmp;
+ }
+ }
// Last option: Create a new instruction
return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator());
diff --git a/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp b/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
index 60ae3745c835..9f4d9c7e3981 100644
--- a/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
@@ -73,17 +73,17 @@ bool llvm::decomposeBitTestICmp(const ICmpInst *I, CmpInst::Predicate &Pred,
default:
return false;
case ICmpInst::ICMP_SLT:
- // X < 0 is equivalent to (X & SignBit) != 0.
+ // X < 0 is equivalent to (X & SignMask) != 0.
if (!C->isZero())
return false;
- Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth()));
+ Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth()));
Pred = ICmpInst::ICMP_NE;
break;
case ICmpInst::ICMP_SGT:
- // X > -1 is equivalent to (X & SignBit) == 0.
+ // X > -1 is equivalent to (X & SignMask) == 0.
if (!C->isAllOnesValue())
return false;
- Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth()));
+ Y = ConstantInt::get(I->getContext(), APInt::getSignMask(C->getBitWidth()));
Pred = ICmpInst::ICMP_EQ;
break;
case ICmpInst::ICMP_ULT:
diff --git a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 644d93b727b3..82552684b832 100644
--- a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -112,24 +112,6 @@ buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs) {
return buildExtractionBlockSet(BBs.begin(), BBs.end());
}
-/// \brief Helper to call buildExtractionBlockSet with a RegionNode.
-static SetVector<BasicBlock *>
-buildExtractionBlockSet(const RegionNode &RN) {
- if (!RN.isSubRegion())
- // Just a single BasicBlock.
- return buildExtractionBlockSet(RN.getNodeAs<BasicBlock>());
-
- const Region &R = *RN.getNodeAs<Region>();
-
- return buildExtractionBlockSet(R.block_begin(), R.block_end());
-}
-
-CodeExtractor::CodeExtractor(BasicBlock *BB, bool AggregateArgs,
- BlockFrequencyInfo *BFI,
- BranchProbabilityInfo *BPI)
- : DT(nullptr), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
- BPI(BPI), Blocks(buildExtractionBlockSet(BB)), NumExitBlocks(~0U) {}
-
CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
bool AggregateArgs, BlockFrequencyInfo *BFI,
BranchProbabilityInfo *BPI)
@@ -143,12 +125,6 @@ CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs,
BPI(BPI), Blocks(buildExtractionBlockSet(L.getBlocks())),
NumExitBlocks(~0U) {}
-CodeExtractor::CodeExtractor(DominatorTree &DT, const RegionNode &RN,
- bool AggregateArgs, BlockFrequencyInfo *BFI,
- BranchProbabilityInfo *BPI)
- : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
- BPI(BPI), Blocks(buildExtractionBlockSet(RN)), NumExitBlocks(~0U) {}
-
/// definedInRegion - Return true if the specified value is defined in the
/// extracted region.
static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
diff --git a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
index 49b4bd92faf4..089f2b5f3b18 100644
--- a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -85,6 +85,7 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
UsesToRewrite.clear();
Instruction *I = Worklist.pop_back_val();
+ assert(!I->getType()->isTokenTy() && "Tokens shouldn't be in the worklist");
BasicBlock *InstBB = I->getParent();
Loop *L = LI.getLoopFor(InstBB);
assert(L && "Instruction belongs to a BB that's not part of a loop");
@@ -96,13 +97,6 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
if (ExitBlocks.empty())
continue;
- // Tokens cannot be used in PHI nodes, so we skip over them.
- // We can run into tokens which are live out of a loop with catchswitch
- // instructions in Windows EH if the catchswitch has one catchpad which
- // is inside the loop and another which is not.
- if (I->getType()->isTokenTy())
- continue;
-
for (Use &U : I->uses()) {
Instruction *User = cast<Instruction>(U.getUser());
BasicBlock *UserBB = User->getParent();
@@ -214,13 +208,9 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
// Post process PHI instructions that were inserted into another disjoint
// loop and update their exits properly.
- for (auto *PostProcessPN : PostProcessPHIs) {
- if (PostProcessPN->use_empty())
- continue;
-
- // Reprocess each PHI instruction.
- Worklist.push_back(PostProcessPN);
- }
+ for (auto *PostProcessPN : PostProcessPHIs)
+ if (!PostProcessPN->use_empty())
+ Worklist.push_back(PostProcessPN);
// Keep track of PHI nodes that we want to remove because they did not have
// any uses rewritten.
@@ -241,7 +231,7 @@ bool llvm::formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
// Compute the set of BasicBlocks in the loop `L` dominating at least one exit.
static void computeBlocksDominatingExits(
Loop &L, DominatorTree &DT, SmallVector<BasicBlock *, 8> &ExitBlocks,
- SmallPtrSet<BasicBlock *, 8> &BlocksDominatingExits) {
+ SmallSetVector<BasicBlock *, 8> &BlocksDominatingExits) {
SmallVector<BasicBlock *, 8> BBWorklist;
// We start from the exit blocks, as every block trivially dominates itself
@@ -279,7 +269,7 @@ static void computeBlocksDominatingExits(
if (!L.contains(IDomBB))
continue;
- if (BlocksDominatingExits.insert(IDomBB).second)
+ if (BlocksDominatingExits.insert(IDomBB))
BBWorklist.push_back(IDomBB);
}
}
@@ -293,7 +283,7 @@ bool llvm::formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI,
if (ExitBlocks.empty())
return false;
- SmallPtrSet<BasicBlock *, 8> BlocksDominatingExits;
+ SmallSetVector<BasicBlock *, 8> BlocksDominatingExits;
// We want to avoid use-scanning leveraging dominance informations.
// If a block doesn't dominate any of the loop exits, the none of the values
@@ -315,6 +305,13 @@ bool llvm::formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI,
!isa<PHINode>(I.user_back())))
continue;
+ // Tokens cannot be used in PHI nodes, so we skip over them.
+ // We can run into tokens which are live out of a loop with catchswitch
+ // instructions in Windows EH if the catchswitch has one catchpad which
+ // is inside the loop and another which is not.
+ if (I.getType()->isTokenTy())
+ continue;
+
Worklist.push_back(&I);
}
}
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index 18b29226c2ef..8c5442762643 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -1227,13 +1227,9 @@ bool llvm::LowerDbgDeclare(Function &F) {
// This is a call by-value or some other instruction that
// takes a pointer to the variable. Insert a *value*
// intrinsic that describes the alloca.
- SmallVector<uint64_t, 1> NewDIExpr;
- auto *DIExpr = DDI->getExpression();
- NewDIExpr.push_back(dwarf::DW_OP_deref);
- NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
DIB.insertDbgValueIntrinsic(AI, 0, DDI->getVariable(),
- DIB.createExpression(NewDIExpr),
- DDI->getDebugLoc(), CI);
+ DDI->getExpression(), DDI->getDebugLoc(),
+ CI);
}
}
DDI->eraseFromParent();
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp
index 73c14f5606b7..5c21490793e7 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp
@@ -46,6 +46,11 @@ static cl::opt<unsigned> UnrollForcePeelCount(
"unroll-force-peel-count", cl::init(0), cl::Hidden,
cl::desc("Force a peel count regardless of profiling information."));
+// Designates that a Phi is estimated to become invariant after an "infinite"
+// number of loop iterations (i.e. only may become an invariant if the loop is
+// fully unrolled).
+static const unsigned InfiniteIterationsToInvariance = UINT_MAX;
+
// Check whether we are capable of peeling this loop.
static bool canPeel(Loop *L) {
// Make sure the loop is in simplified form
@@ -66,10 +71,62 @@ static bool canPeel(Loop *L) {
return true;
}
+// This function calculates the number of iterations after which the given Phi
+// becomes an invariant. The pre-calculated values are memorized in the map. The
+// function (shortcut is I) is calculated according to the following definition:
+// Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge].
+// If %y is a loop invariant, then I(%x) = 1.
+// If %y is a Phi from the loop header, I(%x) = I(%y) + 1.
+// Otherwise, I(%x) is infinite.
+// TODO: Actually if %y is an expression that depends only on Phi %z and some
+// loop invariants, we can estimate I(%x) = I(%z) + 1. The example
+// looks like:
+// %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration.
+// %y = phi(0, 5),
+// %a = %y + 1.
+static unsigned calculateIterationsToInvariance(
+ PHINode *Phi, Loop *L, BasicBlock *BackEdge,
+ SmallDenseMap<PHINode *, unsigned> &IterationsToInvariance) {
+ assert(Phi->getParent() == L->getHeader() &&
+ "Non-loop Phi should not be checked for turning into invariant.");
+ assert(BackEdge == L->getLoopLatch() && "Wrong latch?");
+ // If we already know the answer, take it from the map.
+ auto I = IterationsToInvariance.find(Phi);
+ if (I != IterationsToInvariance.end())
+ return I->second;
+
+ // Otherwise we need to analyze the input from the back edge.
+ Value *Input = Phi->getIncomingValueForBlock(BackEdge);
+ // Place infinity to map to avoid infinite recursion for cycled Phis. Such
+ // cycles can never stop on an invariant.
+ IterationsToInvariance[Phi] = InfiniteIterationsToInvariance;
+ unsigned ToInvariance = InfiniteIterationsToInvariance;
+
+ if (L->isLoopInvariant(Input))
+ ToInvariance = 1u;
+ else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) {
+ // Only consider Phis in header block.
+ if (IncPhi->getParent() != L->getHeader())
+ return InfiniteIterationsToInvariance;
+ // If the input becomes an invariant after X iterations, then our Phi
+ // becomes an invariant after X + 1 iterations.
+ unsigned InputToInvariance = calculateIterationsToInvariance(
+ IncPhi, L, BackEdge, IterationsToInvariance);
+ if (InputToInvariance != InfiniteIterationsToInvariance)
+ ToInvariance = InputToInvariance + 1u;
+ }
+
+ // If we found that this Phi lies in an invariant chain, update the map.
+ if (ToInvariance != InfiniteIterationsToInvariance)
+ IterationsToInvariance[Phi] = ToInvariance;
+ return ToInvariance;
+}
+
// Return the number of iterations we want to peel off.
void llvm::computePeelCount(Loop *L, unsigned LoopSize,
TargetTransformInfo::UnrollingPreferences &UP,
unsigned &TripCount) {
+ assert(LoopSize > 0 && "Zero loop size is not allowed!");
UP.PeelCount = 0;
if (!canPeel(L))
return;
@@ -78,30 +135,37 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
if (!L->empty())
return;
- // Try to find a Phi node that has the same loop invariant as an input from
- // its only back edge. If there is such Phi, peeling 1 iteration from the
- // loop is profitable, because starting from 2nd iteration we will have an
- // invariant instead of this Phi.
- if (LoopSize <= UP.Threshold) {
+ // Here we try to get rid of Phis which become invariants after 1, 2, ..., N
+ // iterations of the loop. For this we compute the number for iterations after
+ // which every Phi is guaranteed to become an invariant, and try to peel the
+ // maximum number of iterations among these values, thus turning all those
+ // Phis into invariants.
+ // First, check that we can peel at least one iteration.
+ if (2 * LoopSize <= UP.Threshold && UnrollPeelMaxCount > 0) {
+ // Store the pre-calculated values here.
+ SmallDenseMap<PHINode *, unsigned> IterationsToInvariance;
+ // Now go through all Phis to calculate their the number of iterations they
+ // need to become invariants.
+ unsigned DesiredPeelCount = 0;
BasicBlock *BackEdge = L->getLoopLatch();
assert(BackEdge && "Loop is not in simplified form?");
- BasicBlock *Header = L->getHeader();
- // Iterate over Phis to find one with invariant input on back edge.
- bool FoundCandidate = false;
- PHINode *Phi;
- for (auto BI = Header->begin(); isa<PHINode>(&*BI); ++BI) {
- Phi = cast<PHINode>(&*BI);
- Value *Input = Phi->getIncomingValueForBlock(BackEdge);
- if (L->isLoopInvariant(Input)) {
- FoundCandidate = true;
- break;
- }
+ for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) {
+ PHINode *Phi = cast<PHINode>(&*BI);
+ unsigned ToInvariance = calculateIterationsToInvariance(
+ Phi, L, BackEdge, IterationsToInvariance);
+ if (ToInvariance != InfiniteIterationsToInvariance)
+ DesiredPeelCount = std::max(DesiredPeelCount, ToInvariance);
}
- if (FoundCandidate) {
- DEBUG(dbgs() << "Peel one iteration to get rid of " << *Phi
- << " because starting from 2nd iteration it is always"
- << " an invariant\n");
- UP.PeelCount = 1;
+ if (DesiredPeelCount > 0) {
+ // Pay respect to limitations implied by loop size and the max peel count.
+ unsigned MaxPeelCount = UnrollPeelMaxCount;
+ MaxPeelCount = std::min(MaxPeelCount, UP.Threshold / LoopSize - 1);
+ DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount);
+ // Consider max peel count limitation.
+ assert(DesiredPeelCount > 0 && "Wrong loop size estimation?");
+ DEBUG(dbgs() << "Peel " << DesiredPeelCount << " iteration(s) to turn"
+ << " some Phis into invariants.\n");
+ UP.PeelCount = DesiredPeelCount;
return;
}
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 127a44df5344..2f575b9d5027 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -3086,7 +3086,7 @@ static bool mergeConditionalStores(BranchInst *PBI, BranchInst *QBI) {
if ((PTB && !HasOnePredAndOneSucc(PTB, PBI->getParent(), QBI->getParent())) ||
(QTB && !HasOnePredAndOneSucc(QTB, QBI->getParent(), PostBB)))
return false;
- if (PostBB->getNumUses() != 2 || QBI->getParent()->getNumUses() != 2)
+ if (!PostBB->hasNUses(2) || !QBI->getParent()->hasNUses(2))
return false;
// OK, this is a sequence of two diamonds or triangles.
diff --git a/contrib/llvm/lib/Transforms/Utils/VNCoercion.cpp b/contrib/llvm/lib/Transforms/Utils/VNCoercion.cpp
index 4aeea02b1b1b..83bd29dbca65 100644
--- a/contrib/llvm/lib/Transforms/Utils/VNCoercion.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/VNCoercion.cpp
@@ -24,6 +24,11 @@ bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
if (DL.getTypeSizeInBits(StoredVal->getType()) < DL.getTypeSizeInBits(LoadTy))
return false;
+ // Don't coerce non-integral pointers to integers or vice versa.
+ if (DL.isNonIntegralPointerType(StoredVal->getType()) !=
+ DL.isNonIntegralPointerType(LoadTy))
+ return false;
+
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 595b2ec88943..7eb8fabe0b2f 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -422,7 +422,8 @@ protected:
// When we if-convert we need to create edge masks. We have to cache values
// so that we don't end up with exponential recursion/IR.
typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>
- EdgeMaskCache;
+ EdgeMaskCacheTy;
+ typedef DenseMap<BasicBlock *, VectorParts> BlockMaskCacheTy;
/// Create an empty loop, based on the loop ranges of the old loop.
void createEmptyLoop();
@@ -785,7 +786,8 @@ protected:
/// Store instructions that should be predicated, as a pair
/// <StoreInst, Predicate>
SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions;
- EdgeMaskCache MaskCache;
+ EdgeMaskCacheTy EdgeMaskCache;
+ BlockMaskCacheTy BlockMaskCache;
/// Trip count of the original loop.
Value *TripCount;
/// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
@@ -4560,8 +4562,8 @@ InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
// Look for cached value.
std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
- EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge);
- if (ECEntryIt != MaskCache.end())
+ EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
+ if (ECEntryIt != EdgeMaskCache.end())
return ECEntryIt->second;
VectorParts SrcMask = createBlockInMask(Src);
@@ -4580,11 +4582,11 @@ InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
for (unsigned part = 0; part < UF; ++part)
EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]);
- MaskCache[Edge] = EdgeMask;
+ EdgeMaskCache[Edge] = EdgeMask;
return EdgeMask;
}
- MaskCache[Edge] = SrcMask;
+ EdgeMaskCache[Edge] = SrcMask;
return SrcMask;
}
@@ -4592,10 +4594,17 @@ InnerLoopVectorizer::VectorParts
InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
+ // Look for cached value.
+ BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
+ if (BCEntryIt != BlockMaskCache.end())
+ return BCEntryIt->second;
+
// Loop incoming mask is all-one.
if (OrigLoop->getHeader() == BB) {
Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1);
- return getVectorValue(C);
+ const VectorParts &BlockMask = getVectorValue(C);
+ BlockMaskCache[BB] = BlockMask;
+ return BlockMask;
}
// This is the block mask. We OR all incoming edges, and with zero.
@@ -4609,6 +4618,7 @@ InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]);
}
+ BlockMaskCache[BB] = BlockMask;
return BlockMask;
}
diff --git a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index da3ac06ab464..554944404708 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -4146,8 +4146,8 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
if (AllowReorder && R.shouldReorder()) {
// Conceptually, there is nothing actually preventing us from trying to
// reorder a larger list. In fact, we do exactly this when vectorizing
- // reductions. However, at this point, we only expect to get here from
- // tryToVectorizePair().
+ // reductions. However, at this point, we only expect to get here when
+ // there are exactly two operations.
assert(Ops.size() == 2);
assert(BuildVectorSlice.empty());
Value *ReorderedOps[] = {Ops[1], Ops[0]};
@@ -4904,7 +4904,13 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
// Try to vectorize them.
unsigned NumElts = (SameTypeIt - IncIt);
DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
- if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
+ // The order in which the phi nodes appear in the program does not matter.
+ // So allow tryToVectorizeList to reorder them if it is beneficial. This
+ // is done when there are exactly two elements since tryToVectorizeList
+ // asserts that there are only two values when AllowReorder is true.
+ bool AllowReorder = NumElts == 2;
+ if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R,
+ None, AllowReorder)) {
// Success start over because instructions might have been changed.
HaveVectorizedPhiNodes = true;
Changed = true;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
index ad723a3e2b8f..573ea55de1fd 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -2082,10 +2082,7 @@ public:
const Attr *getUnusedResultAttr() const;
/// \brief Returns true if this function or its return type has the
- /// warn_unused_result attribute. If the return type has the attribute and
- /// this function is a method of the return type's class, then false will be
- /// returned to avoid spurious warnings on member methods such as assignment
- /// operators.
+ /// warn_unused_result attribute.
bool hasUnusedResultAttr() const { return getUnusedResultAttr() != nullptr; }
/// \brief Returns the storage class as written in the source. For the
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtOpenMP.h b/contrib/llvm/tools/clang/include/clang/AST/StmtOpenMP.h
index 13af142ca3ab..463af06fddab 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtOpenMP.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtOpenMP.h
@@ -318,8 +318,9 @@ class OMPLoopDirective : public OMPExecutableDirective {
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
- /// The first 9 children are nesessary for all the loop directives, and
- /// the next 10 are specific to the worksharing ones.
+ /// The first 9 children are necessary for all the loop directives,
+ /// the next 8 are specific to the worksharing ones, and the next 11 are
+ /// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
@@ -344,7 +345,7 @@ class OMPLoopDirective : public OMPExecutableDirective {
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 9,
- // The following 12 exprs are used by worksharing and distribute loops only.
+ // The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
@@ -353,13 +354,22 @@ class OMPLoopDirective : public OMPExecutableDirective {
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
NumIterationsOffset = 16,
+ // Offset to the end for worksharing loop directives.
+ WorksharingEnd = 17,
PrevLowerBoundVariableOffset = 17,
PrevUpperBoundVariableOffset = 18,
DistIncOffset = 19,
PrevEnsureUpperBoundOffset = 20,
+ CombinedLowerBoundVariableOffset = 21,
+ CombinedUpperBoundVariableOffset = 22,
+ CombinedEnsureUpperBoundOffset = 23,
+ CombinedInitOffset = 24,
+ CombinedConditionOffset = 25,
+ CombinedNextLowerBoundOffset = 26,
+ CombinedNextUpperBoundOffset = 27,
// Offset to the end (and start of the following counters/updates/finals
- // arrays) for worksharing loop directives.
- WorksharingEnd = 21,
+ // arrays) for combined distribute loop directives.
+ CombinedDistributeEnd = 28,
};
/// \brief Get the counters storage.
@@ -423,11 +433,12 @@ protected:
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
- return (isOpenMPWorksharingDirective(Kind) ||
- isOpenMPTaskLoopDirective(Kind) ||
- isOpenMPDistributeDirective(Kind))
- ? WorksharingEnd
- : DefaultEnd;
+ if (isOpenMPLoopBoundSharingDirective(Kind))
+ return CombinedDistributeEnd;
+ if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
+ isOpenMPDistributeDirective(Kind))
+ return WorksharingEnd;
+ return DefaultEnd;
}
/// \brief Children number.
@@ -515,33 +526,60 @@ protected:
*std::next(child_begin(), NumIterationsOffset) = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
*std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
*std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB;
}
void setDistInc(Expr *DistInc) {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
*std::next(child_begin(), DistIncOffset) = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
*std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB;
}
+ void setCombinedLowerBoundVariable(Expr *CombLB) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB;
+ }
+ void setCombinedUpperBoundVariable(Expr *CombUB) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB;
+ }
+ void setCombinedEnsureUpperBound(Expr *CombEUB) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB;
+ }
+ void setCombinedInit(Expr *CombInit) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedInitOffset) = CombInit;
+ }
+ void setCombinedCond(Expr *CombCond) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedConditionOffset) = CombCond;
+ }
+ void setCombinedNextLowerBound(Expr *CombNLB) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB;
+ }
+ void setCombinedNextUpperBound(Expr *CombNUB) {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB;
+ }
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
@@ -549,6 +587,33 @@ protected:
void setFinals(ArrayRef<Expr *> A);
public:
+ /// The expressions built to support OpenMP loops in combined/composite
+ /// pragmas (e.g. pragma omp distribute parallel for)
+ struct DistCombinedHelperExprs {
+ /// DistributeLowerBound - used when composing 'omp distribute' with
+ /// 'omp for' in a same construct.
+ Expr *LB;
+ /// DistributeUpperBound - used when composing 'omp distribute' with
+ /// 'omp for' in a same construct.
+ Expr *UB;
+ /// DistributeEnsureUpperBound - used when composing 'omp distribute'
+ /// with 'omp for' in a same construct, EUB depends on DistUB
+ Expr *EUB;
+ /// Distribute loop iteration variable init used when composing 'omp
+ /// distribute'
+ /// with 'omp for' in a same construct
+ Expr *Init;
+ /// Distribute Loop condition used when composing 'omp distribute'
+ /// with 'omp for' in a same construct
+ Expr *Cond;
+ /// Update of LowerBound for statically sheduled omp loops for
+ /// outer loop in combined constructs (e.g. 'distribute parallel for')
+ Expr *NLB;
+ /// Update of UpperBound for statically sheduled omp loops for
+ /// outer loop in combined constructs (e.g. 'distribute parallel for')
+ Expr *NUB;
+ };
+
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
@@ -611,6 +676,9 @@ public:
/// Init statement for all captured expressions.
Stmt *PreInits;
+ /// Expressions used when combining OpenMP loop pragmas
+ DistCombinedHelperExprs DistCombinedFields;
+
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
@@ -654,6 +722,13 @@ public:
Finals[i] = nullptr;
}
PreInits = nullptr;
+ DistCombinedFields.LB = nullptr;
+ DistCombinedFields.UB = nullptr;
+ DistCombinedFields.EUB = nullptr;
+ DistCombinedFields.Init = nullptr;
+ DistCombinedFields.Cond = nullptr;
+ DistCombinedFields.NLB = nullptr;
+ DistCombinedFields.NUB = nullptr;
}
};
@@ -757,37 +832,71 @@ public:
*std::next(child_begin(), NumIterationsOffset)));
}
Expr *getPrevLowerBoundVariable() const {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevLowerBoundVariableOffset)));
}
Expr *getPrevUpperBoundVariable() const {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevUpperBoundVariableOffset)));
}
Expr *getDistInc() const {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), DistIncOffset)));
}
Expr *getPrevEnsureUpperBound() const {
- assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
- isOpenMPTaskLoopDirective(getDirectiveKind()) ||
- isOpenMPDistributeDirective(getDirectiveKind())) &&
- "expected worksharing loop directive");
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevEnsureUpperBoundOffset)));
}
+ Expr *getCombinedLowerBoundVariable() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedLowerBoundVariableOffset)));
+ }
+ Expr *getCombinedUpperBoundVariable() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedUpperBoundVariableOffset)));
+ }
+ Expr *getCombinedEnsureUpperBound() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedEnsureUpperBoundOffset)));
+ }
+ Expr *getCombinedInit() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedInitOffset)));
+ }
+ Expr *getCombinedCond() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedConditionOffset)));
+ }
+ Expr *getCombinedNextLowerBound() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedNextLowerBoundOffset)));
+ }
+ Expr *getCombinedNextUpperBound() const {
+ assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
+ "expected loop bound sharing directive");
+ return const_cast<Expr *>(reinterpret_cast<const Expr *>(
+ *std::next(child_begin(), CombinedNextUpperBoundOffset)));
+ }
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
index 525f848a9fab..ad95f6f8effa 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -1544,7 +1544,11 @@ class DependentSizedArrayTypeLoc :
public InheritingConcreteTypeLoc<ArrayTypeLoc,
DependentSizedArrayTypeLoc,
DependentSizedArrayType> {
-
+public:
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ ArrayTypeLoc::initializeLocal(Context, Loc);
+ setSizeExpr(getTypePtr()->getSizeExpr());
+ }
};
class VariableArrayTypeLoc :
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
index c5d2c7fc618b..44893fbd036c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -248,6 +248,8 @@ def COnly : LangOpt<"CPlusPlus", 1>;
def CPlusPlus : LangOpt<"CPlusPlus">;
def OpenCL : LangOpt<"OpenCL">;
def RenderScript : LangOpt<"RenderScript">;
+def ObjC : LangOpt<"ObjC1">;
+def BlocksSupported : LangOpt<"Blocks">;
// Defines targets for target-specific attributes. The list of strings should
// specify architectures for which the target applies, based off the ArchType
@@ -270,6 +272,112 @@ def TargetMicrosoftCXXABI : TargetArch<["x86", "x86_64", "arm", "thumb"]> {
let CXXABIs = ["Microsoft"];
}
+// Attribute subject match rules that are used for #pragma clang attribute.
+//
+// A instance of AttrSubjectMatcherRule represents an individual match rule.
+// An individual match rule can correspond to a number of different attribute
+// subjects, e.g. "record" matching rule corresponds to the Record and
+// CXXRecord attribute subjects.
+//
+// Match rules are used in the subject list of the #pragma clang attribute.
+// Match rules can have sub-match rules that are instances of
+// AttrSubjectMatcherSubRule. A sub-match rule can correspond to a number
+// of different attribute subjects, and it can have a negated spelling as well.
+// For example, "variable(unless(is_parameter))" matching rule corresponds to
+// the NonParmVar attribute subject.
+class AttrSubjectMatcherSubRule<string name, list<AttrSubject> subjects,
+ bit negated = 0> {
+ string Name = name;
+ list<AttrSubject> Subjects = subjects;
+ bit Negated = negated;
+ // Lists language options, one of which is required to be true for the
+ // attribute to be applicable. If empty, the language options are taken
+ // from the parent matcher rule.
+ list<LangOpt> LangOpts = [];
+}
+class AttrSubjectMatcherRule<string name, list<AttrSubject> subjects,
+ list<AttrSubjectMatcherSubRule> subrules = []> {
+ string Name = name;
+ list<AttrSubject> Subjects = subjects;
+ list<AttrSubjectMatcherSubRule> Constraints = subrules;
+ // Lists language options, one of which is required to be true for the
+ // attribute to be applicable. If empty, no language options are required.
+ list<LangOpt> LangOpts = [];
+}
+
+// function(is_member)
+def SubRuleForCXXMethod : AttrSubjectMatcherSubRule<"is_member", [CXXMethod]> {
+ let LangOpts = [CPlusPlus];
+}
+def SubjectMatcherForFunction : AttrSubjectMatcherRule<"function", [Function], [
+ SubRuleForCXXMethod
+]>;
+// hasType is abstract, it should be used with one of the sub-rules.
+def SubjectMatcherForType : AttrSubjectMatcherRule<"hasType", [], [
+ AttrSubjectMatcherSubRule<"functionType", [FunctionLike]>
+
+ // FIXME: There's a matcher ambiguity with objc methods and blocks since
+ // functionType excludes them but functionProtoType includes them.
+ // AttrSubjectMatcherSubRule<"functionProtoType", [HasFunctionProto]>
+]>;
+def SubjectMatcherForTypedef : AttrSubjectMatcherRule<"type_alias",
+ [TypedefName]>;
+def SubjectMatcherForRecord : AttrSubjectMatcherRule<"record", [Record,
+ CXXRecord], [
+ // unless(is_union)
+ AttrSubjectMatcherSubRule<"is_union", [Struct], 1>
+]>;
+def SubjectMatcherForEnum : AttrSubjectMatcherRule<"enum", [Enum]>;
+def SubjectMatcherForEnumConstant : AttrSubjectMatcherRule<"enum_constant",
+ [EnumConstant]>;
+def SubjectMatcherForVar : AttrSubjectMatcherRule<"variable", [Var], [
+ AttrSubjectMatcherSubRule<"is_thread_local", [TLSVar]>,
+ AttrSubjectMatcherSubRule<"is_global", [GlobalVar]>,
+ AttrSubjectMatcherSubRule<"is_parameter", [ParmVar]>,
+ // unless(is_parameter)
+ AttrSubjectMatcherSubRule<"is_parameter", [NonParmVar], 1>
+]>;
+def SubjectMatcherForField : AttrSubjectMatcherRule<"field", [Field]>;
+def SubjectMatcherForNamespace : AttrSubjectMatcherRule<"namespace",
+ [Namespace]> {
+ let LangOpts = [CPlusPlus];
+}
+def SubjectMatcherForObjCInterface : AttrSubjectMatcherRule<"objc_interface",
+ [ObjCInterface]> {
+ let LangOpts = [ObjC];
+}
+def SubjectMatcherForObjCProtocol : AttrSubjectMatcherRule<"objc_protocol",
+ [ObjCProtocol]> {
+ let LangOpts = [ObjC];
+}
+def SubjectMatcherForObjCCategory : AttrSubjectMatcherRule<"objc_category",
+ [ObjCCategory]> {
+ let LangOpts = [ObjC];
+}
+def SubjectMatcherForObjCMethod : AttrSubjectMatcherRule<"objc_method",
+ [ObjCMethod], [
+ AttrSubjectMatcherSubRule<"is_instance", [ObjCInstanceMethod]>
+]> {
+ let LangOpts = [ObjC];
+}
+def SubjectMatcherForObjCProperty : AttrSubjectMatcherRule<"objc_property",
+ [ObjCProperty]> {
+ let LangOpts = [ObjC];
+}
+def SubjectMatcherForBlock : AttrSubjectMatcherRule<"block", [Block]> {
+ let LangOpts = [BlocksSupported];
+}
+
+// Aggregate attribute subject match rules are abstract match rules that can't
+// be used directly in #pragma clang attribute. Instead, users have to use
+// subject match rules that correspond to attribute subjects that derive from
+// the specified subject.
+class AttrSubjectMatcherAggregateRule<AttrSubject subject> {
+ AttrSubject Subject = subject;
+}
+
+def SubjectMatcherForNamed : AttrSubjectMatcherAggregateRule<Named>;
+
class Attr {
// The various ways in which an attribute can be spelled in source
list<Spelling> Spellings;
@@ -305,6 +413,14 @@ class Attr {
// Set to true if this attribute meaningful when applied to or inherited
// in a class template definition.
bit MeaningfulToClassTemplateDefinition = 0;
+ // Set to true if this attribute can be used with '#pragma clang attribute'.
+ // By default, when this value is false, an attribute is supported by the
+ // '#pragma clang attribute' only when:
+ // - It has documentation.
+ // - It has a subject list whose subjects can be represented using subject
+ // match rules.
+ // - It has GNU/CXX11 spelling and doesn't require delayed parsing.
+ bit ForcePragmaAttributeSupport = 0;
// Lists language options, one of which is required to be true for the
// attribute to be applicable. If empty, no language options are required.
list<LangOpt> LangOpts = [];
@@ -478,6 +594,9 @@ def AnalyzerNoReturn : InheritableAttr {
def Annotate : InheritableParamAttr {
let Spellings = [GNU<"annotate">];
let Args = [StringArgument<"Annotation">];
+ // Ensure that the annotate attribute can be used with
+ // '#pragma clang attribute' even though it has no subject list.
+ let ForcePragmaAttributeSupport = 1;
let Documentation = [Undocumented];
}
@@ -536,7 +655,7 @@ def Availability : InheritableAttr {
} }];
let HasCustomParsing = 1;
let DuplicatesAllowedWhileMerging = 1;
-// let Subjects = SubjectList<[Named]>;
+ let Subjects = SubjectList<[Named]>;
let Documentation = [AvailabilityDocs];
}
@@ -547,7 +666,7 @@ def ExternalSourceSymbol : InheritableAttr {
StringArgument<"definedIn", 1>,
BoolArgument<"generatedDeclaration", 1>];
let HasCustomParsing = 1;
-// let Subjects = SubjectList<[Named]>;
+ let Subjects = SubjectList<[Named]>;
let Documentation = [ExternalSourceSymbolDocs];
}
@@ -2242,9 +2361,8 @@ def DLLImport : InheritableAttr, TargetSpecificAttr<TargetWindows> {
let Documentation = [DLLImportDocs];
}
-def SelectAny : InheritableAttr {
- let Spellings = [Declspec<"selectany">];
- let LangOpts = [MicrosoftExt];
+def SelectAny : InheritableAttr, TargetSpecificAttr<TargetWindows> {
+ let Spellings = [Declspec<"selectany">, GCC<"selectany">];
let Documentation = [Undocumented];
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AttrSubjectMatchRules.h b/contrib/llvm/tools/clang/include/clang/Basic/AttrSubjectMatchRules.h
new file mode 100644
index 000000000000..4c88adf57f17
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AttrSubjectMatchRules.h
@@ -0,0 +1,32 @@
+//===-- AttrSubjectMatchRules.h - Attribute subject match rules -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_ATTR_SUBJECT_MATCH_RULES_H
+#define LLVM_CLANG_BASIC_ATTR_SUBJECT_MATCH_RULES_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+namespace attr {
+
+/// \brief A list of all the recognized kinds of attributes.
+enum SubjectMatchRule {
+#define ATTR_MATCH_RULE(X, Spelling, IsAbstract) X,
+#include "clang/Basic/AttrSubMatchRulesList.inc"
+};
+
+const char *getSubjectMatchRuleSpelling(SubjectMatchRule Rule);
+
+using ParsedSubjectMatchRuleSet = llvm::DenseMap<int, SourceRange>;
+
+} // end namespace attr
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index 9f5f9888a819..4cde1c81fd4d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -36,7 +36,9 @@ def GNUCompoundLiteralInitializer : DiagGroup<"gnu-compound-literal-initializer"
def BitFieldConstantConversion : DiagGroup<"bitfield-constant-conversion">;
def BitFieldEnumConversion : DiagGroup<"bitfield-enum-conversion">;
def BitFieldWidth : DiagGroup<"bitfield-width">;
-def Coroutine : DiagGroup<"coroutine">;
+def CoroutineMissingUnhandledException :
+ DiagGroup<"coroutine-missing-unhandled-exception">;
+def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException]>;
def ConstantConversion :
DiagGroup<"constant-conversion", [ BitFieldConstantConversion ] >;
def LiteralConversion : DiagGroup<"literal-conversion">;
@@ -459,7 +461,9 @@ def Uninitialized : DiagGroup<"uninitialized", [UninitializedSometimes,
def IgnoredPragmaIntrinsic : DiagGroup<"ignored-pragma-intrinsic">;
def UnknownPragmas : DiagGroup<"unknown-pragmas">;
def IgnoredPragmas : DiagGroup<"ignored-pragmas", [IgnoredPragmaIntrinsic]>;
-def Pragmas : DiagGroup<"pragmas", [UnknownPragmas, IgnoredPragmas]>;
+def PragmaClangAttribute : DiagGroup<"pragma-clang-attribute">;
+def Pragmas : DiagGroup<"pragmas", [UnknownPragmas, IgnoredPragmas,
+ PragmaClangAttribute]>;
def UnknownWarningOption : DiagGroup<"unknown-warning-option">;
def NSobjectAttribute : DiagGroup<"NSObject-attribute">;
def IndependentClassAttribute : DiagGroup<"IndependentClass-attribute">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index 7f7022b49e3d..cf33d5fba3d7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -242,6 +242,7 @@ def warn_bad_character_encoding : ExtWarn<
"illegal character encoding in character literal">,
InGroup<InvalidSourceEncoding>;
def err_lexing_string : Error<"failure when lexing a string">;
+def err_placeholder_in_source : Error<"editor placeholder in source file">;
//===----------------------------------------------------------------------===//
@@ -594,8 +595,6 @@ def err_mmap_expected_mmap_file : Error<"expected a module map file name">;
def err_mmap_module_redefinition : Error<
"redefinition of module '%0'">;
def note_mmap_prev_definition : Note<"previously defined here">;
-def err_mmap_umbrella_dir_not_found : Error<
- "umbrella directory '%0' not found">;
def err_mmap_umbrella_clash : Error<
"umbrella for module '%0' already covers this directory">;
def err_mmap_module_id : Error<
@@ -656,6 +655,9 @@ def note_implicit_top_level_module_import_here : Note<
def warn_uncovered_module_header : Warning<
"umbrella header for module '%0' does not include header '%1'">,
InGroup<IncompleteUmbrella>;
+def warn_mmap_umbrella_dir_not_found : Warning<
+ "umbrella directory '%0' not found">,
+ InGroup<IncompleteUmbrella>;
def err_expected_id_building_module : Error<
"expected a module name in '__building_module' expression">;
def warn_use_of_private_header_outside_module : Warning<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
index aebf8a9f3574..d95e43c10c55 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -979,6 +979,43 @@ def err_pragma_optimize_invalid_argument : Error<
"expected 'on' or 'off'">;
def err_pragma_optimize_extra_argument : Error<
"unexpected extra argument '%0' to '#pragma clang optimize'">;
+// - #pragma clang attribute
+def err_pragma_attribute_expected_push_pop : Error<
+ "expected 'push' or 'pop' after '#pragma clang attribute'">;
+def err_pragma_attribute_invalid_argument : Error<
+ "unexpected argument '%0' to '#pragma clang attribute'; "
+ "expected 'push' or 'pop'">;
+def err_pragma_attribute_expected_attribute : Error<
+ "expected an attribute after '('">;
+def err_pragma_attribute_expected_attribute_name : Error<
+ "expected identifier that represents an attribute name">;
+def err_pragma_attribute_extra_tokens_after_attribute : Error<
+ "extra tokens after attribute in a '#pragma clang attribute push'">;
+def err_pragma_attribute_unsupported_attribute : Error<
+ "attribute %0 is not supported by '#pragma clang attribute'">;
+def err_pragma_attribute_multiple_attributes : Error<
+ "more than one attribute specified in '#pragma clang attribute push'">;
+def err_pragma_attribute_expected_attribute_syntax : Error<
+ "expected an attribute that is specified using the GNU, C++11 or '__declspec'"
+ " syntax">;
+def note_pragma_attribute_use_attribute_kw : Note<"use the GNU '__attribute__' "
+ "syntax">;
+def err_pragma_attribute_invalid_subject_set_specifier : Error<
+ "expected attribute subject set specifier 'apply_to'">;
+def err_pragma_attribute_expected_subject_identifier : Error<
+ "expected an identifier that corresponds to an attribute subject rule">;
+def err_pragma_attribute_unknown_subject_rule : Error<
+ "unknown attribute subject rule '%0'">;
+def err_pragma_attribute_expected_subject_sub_identifier : Error<
+ "expected an identifier that corresponds to an attribute subject matcher "
+ "sub-rule; '%0' matcher %select{does not support sub-rules|supports the "
+ "following sub-rules: %2|}1">;
+def err_pragma_attribute_unknown_subject_sub_rule : Error<
+ "%select{invalid use of|unknown}2 attribute subject matcher sub-rule '%0'; "
+ "'%1' matcher %select{does not support sub-rules|supports the following "
+ "sub-rules: %3}2">;
+def err_pragma_attribute_duplicate_subject : Error<
+ "duplicate attribute subject matcher '%0'">;
def err_opencl_unroll_hint_on_non_loop : Error<
"OpenCL only supports 'opencl_unroll_hint' attribute on for, while, and do statements">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 9b2cfe495ce2..6cb872cc27c5 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -750,6 +750,25 @@ def err_pragma_loop_compatibility : Error<
def err_pragma_loop_precedes_nonloop : Error<
"expected a for, while, or do-while loop to follow '%0'">;
+def err_pragma_attribute_matcher_subrule_contradicts_rule : Error<
+ "redundant attribute subject matcher sub-rule '%0'; '%1' already matches "
+ "those declarations">;
+def err_pragma_attribute_matcher_negated_subrule_contradicts_subrule : Error<
+ "negated attribute subject matcher sub-rule '%0' contradicts sub-rule '%1'">;
+def err_pragma_attribute_invalid_matchers : Error<
+ "attribute %0 can't be applied to %1">;
+def err_pragma_attribute_stack_mismatch : Error<
+ "'#pragma clang attribute pop' with no matching '#pragma clang attribute push'">;
+def warn_pragma_attribute_unused : Warning<
+ "unused attribute %0 in '#pragma clang attribute push' region">,
+ InGroup<PragmaClangAttribute>;
+def note_pragma_attribute_region_ends_here : Note<
+ "'#pragma clang attribute push' regions ends here">;
+def err_pragma_attribute_no_pop_eof : Error<"unterminated "
+ "'#pragma clang attribute push' at end of file">;
+def note_pragma_attribute_applied_decl_here : Note<
+ "when applied to this declaration">;
+
/// Objective-C parser diagnostics
def err_duplicate_class_def : Error<
"duplicate interface definition for class %0">;
@@ -4981,6 +5000,8 @@ def note_protected_by_if_available : Note<
"jump enters controlled statement of if available">;
def note_protected_by_vla : Note<
"jump bypasses initialization of variable length array">;
+def note_protected_by_objc_fast_enumeration : Note<
+ "jump enters Objective-C fast enumeration loop">;
def note_protected_by_objc_try : Note<
"jump bypasses initialization of @try block">;
def note_protected_by_objc_catch : Note<
@@ -8854,6 +8875,11 @@ def err_coroutine_invalid_func_context : Error<
def err_implied_coroutine_type_not_found : Error<
"%0 type was not found; include <experimental/coroutine> before defining "
"a coroutine">;
+def err_implicit_coroutine_std_nothrow_type_not_found : Error<
+ "std::nothrow was not found; include <new> before defining a coroutine which "
+ "uses get_return_object_on_allocation_failure()">;
+def err_malformed_std_nothrow : Error<
+ "std::nothrow must be a valid variable declaration">;
def err_malformed_std_coroutine_handle : Error<
"std::experimental::coroutine_handle must be a class template">;
def err_coroutine_handle_missing_member : Error<
@@ -8873,16 +8899,21 @@ def err_coroutine_promise_return_ill_formed : Error<
"%0 declares both 'return_value' and 'return_void'">;
def note_coroutine_promise_implicit_await_transform_required_here : Note<
"call to 'await_transform' implicitly required by 'co_await' here">;
-def note_coroutine_promise_call_implicitly_required : Note<
+def note_coroutine_promise_suspend_implicitly_required : Note<
"call to '%select{initial_suspend|final_suspend}0' implicitly "
"required by the %select{initial suspend point|final suspend point}0">;
def err_coroutine_promise_unhandled_exception_required : Error<
"%0 is required to declare the member 'unhandled_exception()'">;
def warn_coroutine_promise_unhandled_exception_required_with_exceptions : Warning<
"%0 is required to declare the member 'unhandled_exception()' when exceptions are enabled">,
- InGroup<Coroutine>;
+ InGroup<CoroutineMissingUnhandledException>;
def err_coroutine_promise_get_return_object_on_allocation_failure : Error<
"%0: 'get_return_object_on_allocation_failure()' must be a static member function">;
+def err_coroutine_promise_new_requires_nothrow : Error<
+ "%0 is required to have a non-throwing noexcept specification when the promise "
+ "type declares 'get_return_object_on_allocation_failure()'">;
+def note_coroutine_promise_call_implicitly_required : Note<
+ "call to %0 implicitly required by coroutine function here">;
}
let CategoryName = "Documentation Issue" in {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
index a5fd14104d3c..9b1ba4a98e6f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -355,6 +355,19 @@ public:
RecomputeNeedsHandleIdentifier();
}
+ /// Return true if this identifier is an editor placeholder.
+ ///
+ /// Editor placeholders are produced by the code-completion engine and are
+ /// represented as characters between '<#' and '#>' in the source code. An
+ /// example of auto-completed call with a placeholder parameter is shown
+ /// below:
+ /// \code
+ /// function(<#int x#>);
+ /// \endcode
+ bool isEditorPlaceholder() const {
+ return getName().startswith("<#") && getName().endswith("#>");
+ }
+
/// \brief Provide less than operator for lexicographical sorting.
bool operator<(const IdentifierInfo &RHS) const {
return getName() < RHS.getName();
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
index c8e197299754..6ae34a89fe28 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
@@ -266,6 +266,8 @@ LANGOPT(SanitizeAddressFieldPadding, 2, 0, "controls how aggressive is ASan "
LANGOPT(XRayInstrument, 1, 0, "controls whether to do XRay instrumentation")
+LANGOPT(AllowEditorPlaceholders, 1, 0, "allow editor placeholders in source")
+
#undef LANGOPT
#undef COMPATIBLE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
index 48e0c33f0e86..968b203a3827 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
@@ -790,6 +790,9 @@ ANNOTATION(pragma_loop_hint)
ANNOTATION(pragma_fp)
+// Annotation for the attribute pragma directives - #pragma clang attribute ...
+ANNOTATION(pragma_attribute)
+
// Annotations for module import translated from #include etc.
ANNOTATION(module_include)
ANNOTATION(module_begin)
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CLCompatOptions.td b/contrib/llvm/tools/clang/include/clang/Driver/CLCompatOptions.td
index 9b6ab3a5ef2b..61902653e210 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CLCompatOptions.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CLCompatOptions.td
@@ -172,6 +172,12 @@ def _SLASH_Zc_trigraphs : CLFlag<"Zc:trigraphs">,
HelpText<"Enable trigraphs">, Alias<ftrigraphs>;
def _SLASH_Zc_trigraphs_off : CLFlag<"Zc:trigraphs-">,
HelpText<"Disable trigraphs (default)">, Alias<fno_trigraphs>;
+def _SLASH_Zc_twoPhase : CLFlag<"Zc:twoPhase">,
+ HelpText<"Enable two-phase name lookup in templates">,
+ Alias<fno_delayed_template_parsing>;
+def _SLASH_Zc_twoPhase_ : CLFlag<"Zc:twoPhase-">,
+ HelpText<"Disable two-phase name lookup in templates">,
+ Alias<fdelayed_template_parsing>;
def _SLASH_Z7 : CLFlag<"Z7">,
HelpText<"Enable CodeView debug information in object files">;
def _SLASH_Zd : CLFlag<"Zd">,
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index 36b24a02b2fe..1272a36ecc70 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -1487,6 +1487,12 @@ def fstrict_return : Flag<["-"], "fstrict-return">, Group<f_Group>,
def fno_strict_return : Flag<["-"], "fno-strict-return">, Group<f_Group>,
Flags<[CC1Option]>;
+def fallow_editor_placeholders : Flag<["-"], "fallow-editor-placeholders">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Treat editor placeholders as valid source code">;
+def fno_allow_editor_placeholders : Flag<["-"],
+ "fno-allow-editor-placeholders">, Group<f_Group>;
+
def fdebug_types_section: Flag <["-"], "fdebug-types-section">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Place debug types in their own section (ELF Only)">;
def fno_debug_types_section: Flag<["-"], "fno-debug-types-section">, Group<f_Group>,
@@ -1646,6 +1652,8 @@ def march_EQ : Joined<["-"], "march=">, Group<m_Group>;
def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Flags<[DriverOption]>;
def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>;
def mimplicit_it_EQ : Joined<["-"], "mimplicit-it=">, Group<m_Group>;
+def mdefault_build_attributes : Joined<["-"], "mdefault-build-attributes">, Group<m_Group>;
+def mno_default_build_attributes : Joined<["-"], "mno-default-build-attributes">, Group<m_Group>;
def mconstant_cfstrings : Flag<["-"], "mconstant-cfstrings">, Group<clang_ignored_m_Group>;
def mconsole : Joined<["-"], "mconsole">, Group<m_Group>, Flags<[DriverOption]>;
def mwindows : Joined<["-"], "mwindows">, Group<m_Group>, Flags<[DriverOption]>;
@@ -1653,6 +1661,7 @@ def mdll : Joined<["-"], "mdll">, Group<m_Group>, Flags<[DriverOption]>;
def municode : Joined<["-"], "municode">, Group<m_Group>, Flags<[DriverOption]>;
def mthreads : Joined<["-"], "mthreads">, Group<m_Group>, Flags<[DriverOption]>;
def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>;
+def mmcu_EQ : Joined<["-"], "mmcu=">, Group<m_Group>;
def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
def mfix_and_continue : Flag<["-"], "mfix-and-continue">, Group<clang_ignored_m_Group>;
def mieee_fp : Flag<["-"], "mieee-fp">, Group<clang_ignored_m_Group>;
diff --git a/contrib/llvm/tools/clang/include/clang/Index/IndexSymbol.h b/contrib/llvm/tools/clang/include/clang/Index/IndexSymbol.h
index 217d6b1fb1cc..bc34938fb405 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/IndexSymbol.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/IndexSymbol.h
@@ -106,8 +106,9 @@ enum class SymbolRole : uint32_t {
RelationAccessorOf = 1 << 15,
RelationContainedBy = 1 << 16,
RelationIBTypeOf = 1 << 17,
+ RelationSpecializationOf = 1 << 18,
};
-static const unsigned SymbolRoleBitNum = 18;
+static const unsigned SymbolRoleBitNum = 19;
typedef unsigned SymbolRoleSet;
/// Represents a relation to another symbol for a symbol occurrence.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
index 830c25a2e4d2..6ac6316d1248 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -638,6 +638,8 @@ private:
bool IsStartOfConflictMarker(const char *CurPtr);
bool HandleEndOfConflictMarker(const char *CurPtr);
+ bool lexEditorPlaceholder(Token &Result, const char *CurPtr);
+
bool isCodeCompletionPoint(const char *CurPtr) const;
void cutOffLexing() { BufferPtr = BufferEnd; }
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
index 4393e205ffaf..02a1fef70f2b 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -84,6 +84,7 @@ public:
StringifiedInMacro = 0x100, // This string or character literal is formed by
// macro stringizing or charizing operator.
CommaAfterElided = 0x200, // The comma following this token was elided (MS).
+ IsEditorPlaceholder = 0x400, // This identifier is a placeholder.
};
tok::TokenKind getKind() const { return Kind; }
@@ -298,6 +299,13 @@ public:
/// Returns true if the comma after this token was elided.
bool commaAfterElided() const { return getFlag(CommaAfterElided); }
+
+ /// Returns true if this token is an editor placeholder.
+ ///
+ /// Editor placeholders are produced by the code-completion engine and are
+ /// represented as characters between '<#' and '#>' in the source code. The
+ /// lexer uses identifier tokens to represent placeholders.
+ bool isEditorPlaceholder() const { return getFlag(IsEditorPlaceholder); }
};
/// \brief Information about the conditional stack (\#if directives)
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
index 5f4e5fb4b215..8d0935dec1b6 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -184,6 +184,7 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
+ std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
@@ -565,6 +566,12 @@ private:
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
+ bool ParsePragmaAttributeSubjectMatchRuleSet(
+ attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
+ SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
+
+ void HandlePragmaAttribute();
+
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
index 7c1678086c2f..f3b042c9ce79 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CLANG_SEMA_ATTRIBUTELIST_H
#define LLVM_CLANG_SEMA_ATTRIBUTELIST_H
+#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/VersionTuple.h"
@@ -509,9 +510,14 @@ public:
unsigned getMaxArgs() const;
bool hasVariadicArg() const;
bool diagnoseAppertainsTo(class Sema &S, const Decl *D) const;
+ bool appliesToDecl(const Decl *D, attr::SubjectMatchRule MatchRule) const;
+ void getMatchRules(const LangOptions &LangOpts,
+ SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>>
+ &MatchRules) const;
bool diagnoseLangOpts(class Sema &S) const;
bool existsInTarget(const TargetInfo &Target) const;
bool isKnownToGCC() const;
+ bool isSupportedByPragmaAttribute() const;
/// \brief If the parsed attribute has a semantic equivalent, and it would
/// have a semantic Spelling enumeration (due to having semantically-distinct
@@ -774,6 +780,8 @@ public:
void clear() { list = nullptr; pool.clear(); }
AttributeList *getList() const { return list; }
+ void clearListOnly() { list = nullptr; }
+
/// Returns a reference to the attribute list. Try not to introduce
/// dependencies on this method, it may not be long-lived.
AttributeList *&getListRef() { return list; }
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
index 5a3cdfb77c9c..bd68842c9f73 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -437,6 +437,20 @@ public:
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
+ /// \brief This represents the stack of attributes that were pushed by
+ /// \#pragma clang attribute.
+ struct PragmaAttributeEntry {
+ SourceLocation Loc;
+ AttributeList *Attribute;
+ SmallVector<attr::SubjectMatchRule, 4> MatchRules;
+ bool IsUsed;
+ };
+ SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack;
+
+ /// \brief The declaration that is currently receiving an attribute from the
+ /// #pragma attribute stack.
+ const Decl *PragmaAttributeCurrentTargetDecl;
+
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
@@ -7206,9 +7220,13 @@ public:
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
+ if (PragmaAttributeCurrentTargetDecl)
+ PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
+ void PrintPragmaAttributeInstantiationPoint();
+
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
@@ -8152,6 +8170,20 @@ public:
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
+ /// \brief Called on well-formed '\#pragma clang attribute push'.
+ void ActOnPragmaAttributePush(AttributeList &Attribute,
+ SourceLocation PragmaLoc,
+ attr::ParsedSubjectMatchRuleSet Rules);
+
+ /// \brief Called on well-formed '\#pragma clang attribute pop'.
+ void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
+
+ /// \brief Adds the attributes that have been specified using the
+ /// '\#pragma clang attribute push' directives to the given declaration.
+ void AddPragmaAttributes(Scope *S, Decl *D);
+
+ void DiagnoseUnterminatedPragmaAttribute();
+
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
index 7b337b061a03..4626052a8acb 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -9414,10 +9414,8 @@ createDynTypedNode(const NestedNameSpecifierLoc &Node) {
if (!NodeOrVector.template is<ASTContext::ParentVector *>()) {
auto *Vector = new ASTContext::ParentVector(
1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- if (auto *Node =
- NodeOrVector
- .template dyn_cast<ast_type_traits::DynTypedNode *>())
- delete Node;
+ delete NodeOrVector
+ .template dyn_cast<ast_type_traits::DynTypedNode *>();
NodeOrVector = Vector;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
index 2b22e5bb50a5..094e8dcff088 100644
--- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -3003,9 +3003,7 @@ SourceRange FunctionDecl::getExceptionSpecSourceRange() const {
const Attr *FunctionDecl::getUnusedResultAttr() const {
QualType RetType = getReturnType();
if (RetType->isRecordType()) {
- const CXXRecordDecl *Ret = RetType->getAsCXXRecordDecl();
- const auto *MD = dyn_cast<CXXMethodDecl>(this);
- if (Ret && !(MD && MD->getCorrespondingMethodInClass(Ret, true))) {
+ if (const CXXRecordDecl *Ret = RetType->getAsCXXRecordDecl()) {
if (const auto *R = Ret->getAttr<WarnUnusedResultAttr>())
return R;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
index 5d841a197f26..bc8a34c93653 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -478,6 +478,11 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isFunctionTemplateSpecialization())
Out << "template<> ";
+ else if (!D->getDescribedFunctionTemplate()) {
+ for (unsigned I = 0, NumTemplateParams = D->getNumTemplateParameterLists();
+ I < NumTemplateParams; ++I)
+ printTemplateParameters(D->getTemplateParameterList(I));
+ }
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
@@ -1055,6 +1060,12 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
prettyPrintPragmas(D->getTemplatedDecl());
+ // Print any leading template parameter lists.
+ if (const FunctionDecl *FD = D->getTemplatedDecl()) {
+ for (unsigned I = 0, NumTemplateParams = FD->getNumTemplateParameterLists();
+ I < NumTemplateParams; ++I)
+ printTemplateParameters(FD->getTemplateParameterList(I));
+ }
VisitRedeclarableTemplateDecl(D);
// Never print "instantiations" for deduction guides (they don't really
diff --git a/contrib/llvm/tools/clang/lib/AST/ExternalASTMerger.cpp b/contrib/llvm/tools/clang/lib/AST/ExternalASTMerger.cpp
index 2d4d0185ff2a..8849cfc3c80b 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExternalASTMerger.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExternalASTMerger.cpp
@@ -89,25 +89,21 @@ bool IsForwardDeclaration(Decl *D) {
}
}
+template <typename CallbackType>
void ForEachMatchingDC(
const DeclContext *DC,
llvm::ArrayRef<ExternalASTMerger::ImporterPair> Importers,
- std::function<void(const ExternalASTMerger::ImporterPair &IP,
- Source<const DeclContext *> SourceDC)>
- Callback) {
+ CallbackType Callback) {
for (const ExternalASTMerger::ImporterPair &IP : Importers) {
- Source<TranslationUnitDecl *> SourceTU(
- IP.Forward->getFromContext().getTranslationUnitDecl());
- Source<const DeclContext *> SourceDC =
- LookupSameContext(SourceTU, DC, *IP.Reverse);
- if (SourceDC.get()) {
+ Source<TranslationUnitDecl *> SourceTU =
+ IP.Forward->getFromContext().getTranslationUnitDecl();
+ if (auto SourceDC = LookupSameContext(SourceTU, DC, *IP.Reverse))
Callback(IP, SourceDC);
- }
}
}
bool HasDeclOfSameType(llvm::ArrayRef<Candidate> Decls, const Candidate &C) {
- return std::any_of(Decls.begin(), Decls.end(), [&C](const Candidate &D) {
+ return llvm::any_of(Decls, [&](const Candidate &D) {
return C.first.get()->getKind() == D.first.get()->getKind();
});
}
@@ -139,15 +135,15 @@ bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC,
}
};
- ForEachMatchingDC(DC, Importers, [Name, &FilterFoundDecl](
- const ImporterPair &IP,
- Source<const DeclContext *> SourceDC) {
- DeclarationName FromName = IP.Reverse->Import(Name);
- DeclContextLookupResult Result = SourceDC.get()->lookup(FromName);
- for (NamedDecl *FromD : Result) {
- FilterFoundDecl(std::make_pair(FromD, IP.Forward.get()));
- }
- });
+ ForEachMatchingDC(
+ DC, Importers,
+ [&](const ImporterPair &IP, Source<const DeclContext *> SourceDC) {
+ DeclarationName FromName = IP.Reverse->Import(Name);
+ DeclContextLookupResult Result = SourceDC.get()->lookup(FromName);
+ for (NamedDecl *FromD : Result) {
+ FilterFoundDecl(std::make_pair(FromD, IP.Forward.get()));
+ }
+ });
llvm::ArrayRef<Candidate> DeclsToReport =
CompleteDecls.empty() ? ForwardDecls : CompleteDecls;
@@ -170,15 +166,14 @@ void ExternalASTMerger::FindExternalLexicalDecls(
const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
SmallVectorImpl<Decl *> &Result) {
ForEachMatchingDC(
- DC, Importers, [DC, IsKindWeWant](const ImporterPair &IP,
- Source<const DeclContext *> SourceDC) {
+ DC, Importers,
+ [&](const ImporterPair &IP, Source<const DeclContext *> SourceDC) {
for (const Decl *SourceDecl : SourceDC.get()->decls()) {
if (IsKindWeWant(SourceDecl->getKind())) {
Decl *ImportedDecl =
IP.Forward->Import(const_cast<Decl *>(SourceDecl));
assert(ImportedDecl->getDeclContext() == DC);
(void)ImportedDecl;
- (void)DC;
}
}
});
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
index 29fcdd7be924..7db0b4d8e4ff 100644
--- a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -1455,10 +1455,12 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
Out << 'N';
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
Qualifiers MethodQuals =
- Qualifiers::fromCVRMask(Method->getTypeQualifiers());
+ Qualifiers::fromCVRUMask(Method->getTypeQualifiers());
// We do not consider restrict a distinguishing attribute for overloading
// purposes so we must not mangle it.
MethodQuals.removeRestrict();
+ // __unaligned is not currently mangled in any way, so remove it.
+ MethodQuals.removeUnaligned();
mangleQualifiers(MethodQuals);
mangleRefQualifier(Method->getRefQualifier());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp
index a812884cd927..cccb2f075b65 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtOpenMP.cpp
@@ -147,10 +147,6 @@ OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -201,10 +197,6 @@ OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -368,10 +360,6 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -421,10 +409,6 @@ OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -759,10 +743,6 @@ OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -904,10 +884,6 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -957,10 +933,6 @@ OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1009,10 +981,6 @@ OMPDistributeDirective *OMPDistributeDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1095,6 +1063,13 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
return Dir;
}
@@ -1153,6 +1128,13 @@ OMPDistributeParallelForSimdDirective::Create(
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
return Dir;
}
@@ -1200,10 +1182,6 @@ OMPDistributeSimdDirective *OMPDistributeSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1256,10 +1234,6 @@ OMPTargetParallelForSimdDirective *OMPTargetParallelForSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1352,10 +1326,6 @@ OMPTeamsDistributeDirective *OMPTeamsDistributeDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1407,10 +1377,6 @@ OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1474,6 +1440,13 @@ OMPTeamsDistributeParallelForSimdDirective::Create(
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
return Dir;
}
@@ -1534,6 +1507,13 @@ OMPTeamsDistributeParallelForDirective::Create(
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
return Dir;
}
@@ -1606,10 +1586,6 @@ OMPTargetTeamsDistributeDirective *OMPTargetTeamsDistributeDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1676,6 +1652,13 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
return Dir;
}
@@ -1739,6 +1722,13 @@ OMPTargetTeamsDistributeParallelForSimdDirective::Create(
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
return Dir;
}
@@ -1789,10 +1779,6 @@ OMPTargetTeamsDistributeSimdDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
- Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
- Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
- Dir->setDistInc(Exprs.DistInc);
- Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 9fa693038194..6f935620888f 100644
--- a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -296,6 +296,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isCatchAll);
REGISTER_MATCHER(isClass);
REGISTER_MATCHER(isConst);
+ REGISTER_MATCHER(isConstexpr);
REGISTER_MATCHER(isConstQualified);
REGISTER_MATCHER(isCopyAssignmentOperator);
REGISTER_MATCHER(isCopyConstructor);
diff --git a/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp b/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp
index c215366fc398..b7570d03c85a 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Attributes.cpp
@@ -1,4 +1,5 @@
#include "clang/Basic/Attributes.h"
+#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -15,3 +16,13 @@ int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
return 0;
}
+
+const char *attr::getSubjectMatchRuleSpelling(attr::SubjectMatchRule Rule) {
+ switch (Rule) {
+#define ATTR_MATCH_RULE(NAME, SPELLING, IsAbstract) \
+ case attr::NAME: \
+ return SPELLING;
+#include "clang/Basic/AttrSubMatchRulesList.inc"
+ }
+ llvm_unreachable("Invalid subject match rule");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp
index 5a8bb61eaadf..76a0e18c2d73 100644
--- a/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp
@@ -854,14 +854,10 @@ bool clang::isOpenMPTaskingDirective(OpenMPDirectiveKind Kind) {
bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
return Kind == OMPD_distribute_parallel_for ||
Kind == OMPD_distribute_parallel_for_simd ||
- Kind == OMPD_distribute_simd || Kind == OMPD_teams_distribute ||
- Kind == OMPD_teams_distribute_simd ||
Kind == OMPD_teams_distribute_parallel_for_simd ||
Kind == OMPD_teams_distribute_parallel_for ||
- Kind == OMPD_target_teams_distribute ||
Kind == OMPD_target_teams_distribute_parallel_for ||
- Kind == OMPD_target_teams_distribute_parallel_for_simd ||
- Kind == OMPD_target_teams_distribute_simd;
+ Kind == OMPD_target_teams_distribute_parallel_for_simd;
}
void clang::getOpenMPCaptureRegions(
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index a457f6deee75..d7f1793e377c 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -5467,9 +5467,11 @@ public:
Builder.defineMacro("__arm__");
// For bare-metal none-eabi.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- getTriple().getEnvironment() == llvm::Triple::EABI)
+ (getTriple().getEnvironment() == llvm::Triple::EABI ||
+ getTriple().getEnvironment() == llvm::Triple::EABIHF))
Builder.defineMacro("__ELF__");
+
// Target properties.
Builder.defineMacro("__REGISTER_PREFIX__", "");
@@ -6118,6 +6120,11 @@ public:
MacroBuilder &Builder) const override {
// Target identification.
Builder.defineMacro("__aarch64__");
+ // For bare-metal none-eabi.
+ if (getTriple().getOS() == llvm::Triple::UnknownOS &&
+ (getTriple().getEnvironment() == llvm::Triple::EABI ||
+ getTriple().getEnvironment() == llvm::Triple::EABIHF))
+ Builder.defineMacro("__ELF__");
// Target properties.
Builder.defineMacro("_LP64");
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
index 855d6795b9d6..20059d922f90 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -83,9 +83,6 @@ class EmitAssemblyHelper {
return TargetIRAnalysis();
}
- /// Set LLVM command line options passed through -backend-option.
- void setCommandLineOpts();
-
void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM);
/// Generates the TargetMachine.
@@ -372,7 +369,9 @@ static void initTargetOptions(llvm::TargetOptions &Options,
// Set FP fusion mode.
switch (LangOpts.getDefaultFPContractMode()) {
case LangOptions::FPC_Off:
- Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
+ // Preserve any contraction performed by the front-end. (Strict performs
+ // splitting of the muladd instrinsic in the backend.)
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
case LangOptions::FPC_On:
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
@@ -604,7 +603,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.populateModulePassManager(MPM);
}
-void EmitAssemblyHelper::setCommandLineOpts() {
+static void setCommandLineOpts(const CodeGenOptions &CodeGenOpts) {
SmallVector<const char *, 16> BackendArgs;
BackendArgs.push_back("clang"); // Fake program name.
if (!CodeGenOpts.DebugPass.empty()) {
@@ -677,7 +676,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
- setCommandLineOpts();
+ setCommandLineOpts(CodeGenOpts);
bool UsesCodeGen = (Action != Backend_EmitNothing &&
Action != Backend_EmitBC &&
@@ -806,7 +805,7 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
- setCommandLineOpts();
+ setCommandLineOpts(CodeGenOpts);
// The new pass manager always makes a target machine available to passes
// during construction.
@@ -944,6 +943,8 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
ModuleToDefinedGVSummaries;
CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+ setCommandLineOpts(CGOpts);
+
// We can simply import the values mentioned in the combined index, since
// we should only invoke this using the individual indexes written out
// via a WriteIndexesThinBackend.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 8af32055fc4c..26235257b19d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -1586,9 +1586,10 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- // indirect arguments are always on the stack, which is addr space #0.
+ // indirect arguments are always on the stack, which is alloca addr space.
llvm::Type *LTy = ConvertTypeForMem(it->type);
- ArgTypes[FirstIRArg] = LTy->getPointerTo();
+ ArgTypes[FirstIRArg] = LTy->getPointerTo(
+ CGM.getDataLayout().getAllocaAddrSpace());
break;
}
@@ -1761,7 +1762,7 @@ void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
void CodeGenModule::ConstructAttributeList(
StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
- AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
+ llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
@@ -1930,13 +1931,8 @@ void CodeGenModule::ConstructAttributeList(
RetAttrs.addAttribute(llvm::Attribute::NonNull);
}
- // Attach return attributes.
- if (RetAttrs.hasAttributes()) {
- PAL.push_back(llvm::AttributeList::get(
- getLLVMContext(), llvm::AttributeList::ReturnIndex, RetAttrs));
- }
-
bool hasUsedSRet = false;
+ SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
@@ -1945,16 +1941,16 @@ void CodeGenModule::ConstructAttributeList(
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
- PAL.push_back(llvm::AttributeList::get(
- getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
+ ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
+ llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
}
// Attach attributes to inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
Attrs.addAttribute(llvm::Attribute::InAlloca);
- PAL.push_back(llvm::AttributeList::get(
- getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
+ ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
+ llvm::AttributeSet::get(getLLVMContext(), Attrs);
}
unsigned ArgNo = 0;
@@ -1967,10 +1963,12 @@ void CodeGenModule::ConstructAttributeList(
// Add attribute for padding argument, if necessary.
if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
- if (AI.getPaddingInReg())
- PAL.push_back(llvm::AttributeList::get(
- getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
- llvm::Attribute::InReg));
+ if (AI.getPaddingInReg()) {
+ ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ llvm::AttributeSet::get(
+ getLLVMContext(),
+ llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
+ }
}
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
@@ -2085,15 +2083,15 @@ void CodeGenModule::ConstructAttributeList(
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
for (unsigned i = 0; i < NumIRArgs; i++)
- PAL.push_back(llvm::AttributeList::get(getLLVMContext(),
- FirstIRArg + i + 1, Attrs));
+ ArgAttrs[FirstIRArg + i] =
+ llvm::AttributeSet::get(getLLVMContext(), Attrs);
}
}
assert(ArgNo == FI.arg_size());
- if (FuncAttrs.hasAttributes())
- PAL.push_back(llvm::AttributeList::get(
- getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs));
+ AttrList = llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
+ llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
}
/// An argument came in as a promoted argument; demote it back to its
@@ -2204,8 +2202,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (IRFunctionArgs.hasSRetArg()) {
auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
AI->setName("agg.result");
- AI->addAttr(llvm::AttributeList::get(getLLVMContext(), AI->getArgNo() + 1,
- llvm::Attribute::NoAlias));
+ AI->addAttr(llvm::Attribute::NoAlias);
}
// Track if we received the parameter as a pointer (indirect, byval, or
@@ -2296,9 +2293,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
PVD->getFunctionScopeIndex()))
- AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NonNull));
+ AI->addAttr(llvm::Attribute::NonNull);
QualType OTy = PVD->getOriginalType();
if (const auto *ArrTy =
@@ -2315,12 +2310,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::AttrBuilder Attrs;
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
- AI->addAttr(llvm::AttributeList::get(
- getLLVMContext(), AI->getArgNo() + 1, Attrs));
+ AI->addAttrs(Attrs);
} else if (getContext().getTargetAddressSpace(ETy) == 0) {
- AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NonNull));
+ AI->addAttr(llvm::Attribute::NonNull);
}
}
} else if (const auto *ArrTy =
@@ -2330,34 +2322,26 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// we know that it must be nonnull.
if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
!getContext().getTargetAddressSpace(ArrTy->getElementType()))
- AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NonNull));
+ AI->addAttr(llvm::Attribute::NonNull);
}
const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
if (!AVAttr)
if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
- if (AVAttr) {
+ if (AVAttr) {
llvm::Value *AlignmentValue =
EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI =
cast<llvm::ConstantInt>(AlignmentValue);
- unsigned Alignment =
- std::min((unsigned) AlignmentCI->getZExtValue(),
- +llvm::Value::MaximumAlignment);
-
- llvm::AttrBuilder Attrs;
- Attrs.addAlignmentAttr(Alignment);
- AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
- AI->getArgNo() + 1, Attrs));
+ unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
+ +llvm::Value::MaximumAlignment);
+ AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
}
}
if (Arg->getType().isRestrictQualified())
- AI->addAttr(llvm::AttributeList::get(
- getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias));
+ AI->addAttr(llvm::Attribute::NoAlias);
// LLVM expects swifterror parameters to be used in very restricted
// ways. Copy the value into a less-restricted temporary.
@@ -4113,13 +4097,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Compute the calling convention and attributes.
unsigned CallingConv;
- CodeGen::AttributeListType AttributeList;
+ llvm::AttributeList Attrs;
CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
- Callee.getAbstractInfo(),
- AttributeList, CallingConv,
+ Callee.getAbstractInfo(), Attrs, CallingConv,
/*AttrOnCallSite=*/true);
- llvm::AttributeList Attrs =
- llvm::AttributeList::get(getLLVMContext(), AttributeList);
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
index 97221e20c195..7e10407fc31c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -39,7 +39,6 @@ namespace clang {
class VarDecl;
namespace CodeGen {
-typedef SmallVector<llvm::AttributeList, 8> AttributeListType;
/// Abstract information about a function or function prototype.
class CGCalleeInfo {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index 818b51543d30..2f6a2b95fb61 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -3466,17 +3466,17 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
// functions there won't be an implicit param at arg1 and
// otherwise it is 'self' or 'this'.
if (isa<ImplicitParamDecl>(VD) && ArgNo && *ArgNo == 1)
- Flags |= llvm::DINode::FlagObjectPointer;
- if (auto *Arg = dyn_cast<llvm::Argument>(Storage))
- if (Arg->getType()->isPointerTy() && !Arg->hasByValAttr() &&
- !VD->getType()->isPointerType())
- Expr.push_back(llvm::dwarf::DW_OP_deref);
+ Flags |= llvm::DINode::FlagObjectPointer;
+ // Note: Older versions of clang used to emit byval references with an extra
+ // DW_OP_deref, because they referenced the IR arg directly instead of
+ // referencing an alloca. Newer versions of LLVM don't treat allocas
+ // differently from other function arguments when used in a dbg.declare.
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
-
StringRef Name = VD->getName();
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
+ // Here, we need an offset *into* the alloca.
CharUnits offset = CharUnits::fromQuantity(32);
Expr.push_back(llvm::dwarf::DW_OP_plus);
// offset of __forwarding field
@@ -3488,22 +3488,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
Expr.push_back(offset.getQuantity());
-
- // Create the descriptor for the variable.
- auto *D = ArgNo
- ? DBuilder.createParameterVariable(Scope, VD->getName(),
- *ArgNo, Unit, Line, Ty)
- : DBuilder.createAutoVariable(Scope, VD->getName(), Unit,
- Line, Ty, Align);
-
- // Insert an llvm.dbg.declare into the current block.
- DBuilder.insertDeclare(
- Storage, D, DBuilder.createExpression(Expr),
- llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
- Builder.GetInsertBlock());
- return;
- } else if (isa<VariableArrayType>(VD->getType()))
- Expr.push_back(llvm::dwarf::DW_OP_deref);
+ }
} else if (const auto *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
@@ -3606,8 +3591,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
SmallVector<int64_t, 9> addr;
- if (isa<llvm::AllocaInst>(Storage))
- addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus);
addr.push_back(offset.getQuantity());
if (isByRef) {
@@ -3633,12 +3617,11 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
// Insert an llvm.dbg.declare into the current block.
auto DL =
llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back(), CurInlinedAt);
+ auto *Expr = DBuilder.createExpression(addr);
if (InsertPoint)
- DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL,
- InsertPoint);
+ DBuilder.insertDeclare(Storage, D, Expr, DL, InsertPoint);
else
- DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL,
- Builder.GetInsertBlock());
+ DBuilder.insertDeclare(Storage, D, Expr, DL, Builder.GetInsertBlock());
}
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index 0f959043a22e..10a0b46d9028 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -924,7 +924,7 @@ llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
return nullptr;
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
- Addr = Builder.CreateBitCast(Addr, Int8PtrTy);
+ Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
C->setDoesNotThrow();
@@ -932,7 +932,7 @@ llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
}
void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
- Addr = Builder.CreateBitCast(Addr, Int8PtrTy);
+ Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
C->setDoesNotThrow();
@@ -1728,7 +1728,7 @@ llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
if (LifetimeStartFn)
return LifetimeStartFn;
LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
- llvm::Intrinsic::lifetime_start, Int8PtrTy);
+ llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
return LifetimeStartFn;
}
@@ -1737,7 +1737,7 @@ llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
if (LifetimeEndFn)
return LifetimeEndFn;
LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
- llvm::Intrinsic::lifetime_end, Int8PtrTy);
+ llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
return LifetimeEndFn;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index 265ef27a46b0..719147a58e08 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -533,6 +533,15 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const {
SanOpts.has(SanitizerKind::Vptr);
}
+/// Check if a runtime null check for \p Ptr can be omitted.
+static bool canOmitPointerNullCheck(llvm::Value *Ptr) {
+ // Note: do not perform any constant-folding in this function. That is best
+ // left to the IR builder.
+
+ // Pointers to alloca'd memory are non-null.
+ return isa<llvm::AllocaInst>(Ptr->stripPointerCastsNoFollowAliases());
+}
+
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *Ptr, QualType Ty,
CharUnits Alignment,
@@ -554,19 +563,28 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
TCK == TCK_UpcastToVirtualBase;
if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
- !SkippedChecks.has(SanitizerKind::Null)) {
+ !SkippedChecks.has(SanitizerKind::Null) &&
+ !canOmitPointerNullCheck(Ptr)) {
// The glvalue must not be an empty glvalue.
llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr);
- if (AllowNullPointers) {
- // When performing pointer casts, it's OK if the value is null.
- // Skip the remaining checks in that case.
- Done = createBasicBlock("null");
- llvm::BasicBlock *Rest = createBasicBlock("not.null");
- Builder.CreateCondBr(IsNonNull, Rest, Done);
- EmitBlock(Rest);
- } else {
- Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
+ // The IR builder can constant-fold the null check if the pointer points to
+ // a constant.
+ bool PtrIsNonNull =
+ IsNonNull == llvm::ConstantInt::getTrue(getLLVMContext());
+
+ // Skip the null check if the pointer is known to be non-null.
+ if (!PtrIsNonNull) {
+ if (AllowNullPointers) {
+ // When performing pointer casts, it's OK if the value is null.
+ // Skip the remaining checks in that case.
+ Done = createBasicBlock("null");
+ llvm::BasicBlock *Rest = createBasicBlock("not.null");
+ Builder.CreateCondBr(IsNonNull, Rest, Done);
+ EmitBlock(Rest);
+ } else {
+ Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
+ }
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index 3db15c646f43..53c184130709 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -201,7 +201,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
if (CGM.getDataLayout().isBigEndian()) {
- Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp.lshrInPlace(NewFieldWidth);
Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining high bits.
@@ -210,7 +210,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining low bits.
- FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue.lshrInPlace(BitsInPreviousByte);
FieldValue = FieldValue.trunc(NewFieldWidth);
}
}
@@ -273,7 +273,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
// We want the low bits.
Tmp = FieldValue.trunc(CharWidth);
- FieldValue = FieldValue.lshr(CharWidth);
+ FieldValue.lshrInPlace(CharWidth);
}
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index 3a09a15dbc15..76e7df861f74 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -126,10 +126,12 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
QualType IdTy(CGM.getContext().getObjCIdType());
llvm::Constant *Constant =
CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName);
- Address Addr(Constant, Context.getTypeAlignInChars(IdTy));
- LValue LV = MakeAddrLValue(Addr, IdTy);
- return Builder.CreateBitCast(EmitLoadOfScalar(LV, E->getLocStart()),
- ConvertType(E->getType()));
+ LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
+ llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getLocStart());
+ cast<llvm::LoadInst>(Ptr)->setMetadata(
+ CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(getLLVMContext(), None));
+ return Builder.CreateBitCast(Ptr, ConvertType(E->getType()));
}
// Compute the type of the array we're initializing.
@@ -1848,12 +1850,8 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
F->addFnAttr(llvm::Attribute::NonLazyBind);
}
- if (IsForwarding(Name)) {
- llvm::AttrBuilder B;
- B.addAttribute(llvm::Attribute::Returned);
-
- F->arg_begin()->addAttr(llvm::AttributeList::get(F->getContext(), 1, B));
- }
+ if (IsForwarding(Name))
+ F->arg_begin()->addAttr(llvm::Attribute::Returned);
}
return RTF;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index 0ebfd99363c1..683f366ebe45 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -1166,7 +1166,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
if (Rem)
Rem--;
SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
- LHS++;
+ ++LHS;
}
return;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index 7b0c8bf7d6e9..1869c0e809df 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -379,12 +379,9 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
// Apply the standard set of call attributes.
unsigned CallingConv;
- CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(CalleePtr->getName(),
- *CurFnInfo, MD, AttributeList,
+ llvm::AttributeList Attrs;
+ CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, MD, Attrs,
CallingConv, /*AttrOnCallSite=*/true);
- llvm::AttributeList Attrs =
- llvm::AttributeList::get(getLLVMContext(), AttributeList);
Call->setAttributes(Attrs);
Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index d48bff9c30a3..19203973ff1b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -111,6 +111,8 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
C.getTargetInfo().getMaxPointerWidth());
Int8PtrTy = Int8Ty->getPointerTo(0);
Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+ AllocaInt8PtrTy = Int8Ty->getPointerTo(
+ M.getDataLayout().getAllocaAddrSpace());
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
BuiltinCC = getTargetCodeGenInfo().getABIInfo().getBuiltinCC();
@@ -839,10 +841,9 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
const CGFunctionInfo &Info,
llvm::Function *F) {
unsigned CallingConv;
- AttributeListType AttributeList;
- ConstructAttributeList(F->getName(), Info, D, AttributeList, CallingConv,
- false);
- F->setAttributes(llvm::AttributeList::get(getLLVMContext(), AttributeList));
+ llvm::AttributeList PAL;
+ ConstructAttributeList(F->getName(), Info, D, PAL, CallingConv, false);
+ F->setAttributes(PAL);
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -3793,6 +3794,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
AddDeferredUnusedCoverageMapping(D);
break;
+ case Decl::CXXDeductionGuide:
+ // Function-like, but does not result in code emission.
+ break;
+
case Decl::Var:
case Decl::Decomposition:
// Skip variable templates
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index d0b2dd717c8c..c4985ba41db1 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -1020,11 +1020,12 @@ public:
/// \param CalleeInfo - The callee information these attributes are being
/// constructed for. If valid, the attributes applied to this decl may
/// contribute to the function attributes and calling convention.
- /// \param PAL [out] - On return, the attribute list to use.
+ /// \param Attrs [out] - On return, the attribute list to use.
/// \param CallingConv [out] - On return, the LLVM calling convention to use.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info,
- CGCalleeInfo CalleeInfo, AttributeListType &PAL,
- unsigned &CallingConv, bool AttrOnCallSite);
+ CGCalleeInfo CalleeInfo,
+ llvm::AttributeList &Attrs, unsigned &CallingConv,
+ bool AttrOnCallSite);
/// Adds attributes to F according to our CodeGenOptions and LangOptions, as
/// though we had emitted it ourselves. We remove any attributes on F that
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h
index 47e26bcaa1b6..8ce9860cc638 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -60,6 +60,12 @@ struct CodeGenTypeCache {
llvm::PointerType *Int8PtrPtrTy;
};
+ /// void* in alloca address space
+ union {
+ llvm::PointerType *AllocaVoidPtrTy;
+ llvm::PointerType *AllocaInt8PtrTy;
+ };
+
/// The size and alignment of the builtin C type 'int'. This comes
/// up enough in various ABI lowering tasks to be worth pre-computing.
union {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
index 89090c8b6a1b..fc642850d60a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -197,7 +197,7 @@ namespace {
// Provide some coverage mapping even for methods that aren't emitted.
// Don't do this for templated classes though, as they may not be
// instantiable.
- if (!MD->getParent()->getDescribedClassTemplate())
+ if (!MD->getParent()->isDependentContext())
Builder->AddDeferredUnusedCoverageMapping(MD);
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
index b05596a99f6e..8e61aadbf326 100644
--- a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
@@ -469,34 +469,12 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
int LegacySanitizeCoverage;
if (Arg->getNumValues() == 1 &&
!StringRef(Arg->getValue(0))
- .getAsInteger(0, LegacySanitizeCoverage) &&
- LegacySanitizeCoverage >= 0 && LegacySanitizeCoverage <= 4) {
- switch (LegacySanitizeCoverage) {
- case 0:
- CoverageFeatures = 0;
- Arg->claim();
- break;
- case 1:
- D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args)
- << "-fsanitize-coverage=func";
- CoverageFeatures = CoverageFunc;
- break;
- case 2:
- D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args)
- << "-fsanitize-coverage=bb";
- CoverageFeatures = CoverageBB;
- break;
- case 3:
- D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args)
- << "-fsanitize-coverage=edge";
- CoverageFeatures = CoverageEdge;
- break;
- case 4:
+ .getAsInteger(0, LegacySanitizeCoverage)) {
+ CoverageFeatures = 0;
+ Arg->claim();
+ if (LegacySanitizeCoverage != 0) {
D.Diag(diag::warn_drv_deprecated_arg)
- << Arg->getAsString(Args)
- << "-fsanitize-coverage=edge,indirect-calls";
- CoverageFeatures = CoverageEdge | CoverageIndirCall;
- break;
+ << Arg->getAsString(Args) << "-fsanitize-coverage=trace-pc-guard";
}
continue;
}
@@ -530,16 +508,14 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Basic block tracing and 8-bit counters require some type of coverage
// enabled.
int CoverageTypes = CoverageFunc | CoverageBB | CoverageEdge;
- if ((CoverageFeatures & CoverageTraceBB) &&
- !(CoverageFeatures & CoverageTypes))
- D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ if (CoverageFeatures & CoverageTraceBB)
+ D.Diag(clang::diag::warn_drv_deprecated_arg)
<< "-fsanitize-coverage=trace-bb"
- << "-fsanitize-coverage=(func|bb|edge)";
- if ((CoverageFeatures & Coverage8bitCounters) &&
- !(CoverageFeatures & CoverageTypes))
- D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fsanitize-coverage=trace-pc-guard";
+ if (CoverageFeatures & Coverage8bitCounters)
+ D.Diag(clang::diag::warn_drv_deprecated_arg)
<< "-fsanitize-coverage=8bit-counters"
- << "-fsanitize-coverage=(func|bb|edge)";
+ << "-fsanitize-coverage=trace-pc-guard";
// trace-pc w/o func/bb/edge implies edge.
if ((CoverageFeatures & (CoverageTracePC | CoverageTracePCGuard)) &&
!(CoverageFeatures & CoverageTypes))
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp
index f8eeeb4eef69..49708e7d7242 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp
@@ -649,8 +649,24 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
CmdArgs.push_back("-x");
if (Args.hasArg(options::OPT_rewrite_objc))
CmdArgs.push_back(types::getTypeName(types::TY_PP_ObjCXX));
- else
- CmdArgs.push_back(types::getTypeName(Input.getType()));
+ else {
+ // Map the driver type to the frontend type. This is mostly an identity
+ // mapping, except that the distinction between module interface units
+ // and other source files does not exist at the frontend layer.
+ const char *ClangType;
+ switch (Input.getType()) {
+ case types::TY_CXXModule:
+ ClangType = "c++";
+ break;
+ case types::TY_PP_CXXModule:
+ ClangType = "c++-cpp-output";
+ break;
+ default:
+ ClangType = types::getTypeName(Input.getType());
+ break;
+ }
+ CmdArgs.push_back(ClangType);
+ }
}
static void appendUserToPath(SmallVectorImpl<char> &Result) {
@@ -2290,6 +2306,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(options::OPT_fstrict_return, options::OPT_fno_strict_return,
true))
CmdArgs.push_back("-fno-strict-return");
+ if (Args.hasFlag(options::OPT_fallow_editor_placeholders,
+ options::OPT_fno_allow_editor_placeholders, false))
+ CmdArgs.push_back("-fallow-editor-placeholders");
if (Args.hasFlag(options::OPT_fstrict_vtable_pointers,
options::OPT_fno_strict_vtable_pointers,
false))
@@ -4996,6 +5015,19 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::x86_64:
AddX86TargetArgs(Args, CmdArgs);
break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ // This isn't in AddARMTargetArgs because we want to do this for assembly
+ // only, not C/C++.
+ if (Args.hasFlag(options::OPT_mdefault_build_attributes,
+ options::OPT_mno_default_build_attributes, true)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-arm-add-build-attributes");
+ }
+ break;
}
// Consume all the warning flags. Usually this would be handled more
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.cpp
index e5f4a3b8d605..93b66eb6954a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -261,6 +261,12 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
arm::getARMArchCPUFromArgs(Args, MArch, MCPU, FromAs);
return arm::getARMTargetCPU(MCPU, MArch, T);
}
+
+ case llvm::Triple::avr:
+ if (const Arg *A = Args.getLastArg(options::OPT_mmcu_EQ))
+ return A->getValue();
+ return "";
+
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -426,11 +432,12 @@ void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
}
}
-void tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
- const ArgList &Args) {
+bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
+ const ArgList &Args, bool IsOffloadingHost,
+ bool GompNeedsRT) {
if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
options::OPT_fno_openmp, false))
- return;
+ return false;
switch (TC.getDriver().getOpenMPRuntime(Args)) {
case Driver::OMPRT_OMP:
@@ -438,16 +445,24 @@ void tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
break;
case Driver::OMPRT_GOMP:
CmdArgs.push_back("-lgomp");
+
+ if (GompNeedsRT)
+ CmdArgs.push_back("-lrt");
break;
case Driver::OMPRT_IOMP5:
CmdArgs.push_back("-liomp5");
break;
case Driver::OMPRT_Unknown:
// Already diagnosed.
- break;
+ return false;
}
+ if (IsOffloadingHost)
+ CmdArgs.push_back("-lomptarget");
+
addArchSpecificRPath(TC, Args, CmdArgs);
+
+ return true;
}
static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.h
index f5747aa85f22..fdeb6669b0a8 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -59,8 +59,10 @@ void AddAssemblerKPIC(const ToolChain &ToolChain,
void addArchSpecificRPath(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
-void addOpenMPRuntime(llvm::opt::ArgStringList &CmdArgs, const ToolChain &TC,
- const llvm::opt::ArgList &Args);
+/// Returns true, if an OpenMP runtime has been added.
+bool addOpenMPRuntime(llvm::opt::ArgStringList &CmdArgs, const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ bool IsOffloadingHost = false, bool GompNeedsRT = false);
llvm::opt::Arg *getLastProfileUseArg(const llvm::opt::ArgList &Args);
llvm::opt::Arg *getLastProfileSampleUseArg(const llvm::opt::ArgList &Args);
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
index 549e24cbd2b3..313bf38c2860 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -586,37 +586,15 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool WantPthread = Args.hasArg(options::OPT_pthread) ||
Args.hasArg(options::OPT_pthreads);
- if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
- options::OPT_fno_openmp, false)) {
+ // FIXME: Only pass GompNeedsRT = true for platforms with libgomp that
+ // require librt. Most modern Linux platforms do, but some may not.
+ if (addOpenMPRuntime(CmdArgs, ToolChain, Args,
+ JA.isHostOffloading(Action::OFK_OpenMP),
+ /* GompNeedsRT= */ true))
// OpenMP runtimes implies pthreads when using the GNU toolchain.
// FIXME: Does this really make sense for all GNU toolchains?
WantPthread = true;
- // Also link the particular OpenMP runtimes.
- switch (ToolChain.getDriver().getOpenMPRuntime(Args)) {
- case Driver::OMPRT_OMP:
- CmdArgs.push_back("-lomp");
- break;
- case Driver::OMPRT_GOMP:
- CmdArgs.push_back("-lgomp");
-
- // FIXME: Exclude this for platforms with libgomp that don't require
- // librt. Most modern Linux platforms require it, but some may not.
- CmdArgs.push_back("-lrt");
- break;
- case Driver::OMPRT_IOMP5:
- CmdArgs.push_back("-liomp5");
- break;
- case Driver::OMPRT_Unknown:
- // Already diagnosed.
- break;
- }
- if (JA.isHostOffloading(Action::OFK_OpenMP))
- CmdArgs.push_back("-lomptarget");
-
- addArchSpecificRPath(ToolChain, Args, CmdArgs);
- }
-
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
if (WantPthread && !isAndroid)
@@ -770,6 +748,12 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
break;
}
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be: {
+ Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
+ break;
+ }
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.cpp
index ca5bf06f7e7d..7550bab486f1 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -285,28 +285,30 @@ void toolchains::MinGW::findGccLibDir() {
}
}
+llvm::ErrorOr<std::string> toolchains::MinGW::findGcc() {
+ llvm::SmallVector<llvm::SmallString<32>, 2> Gccs;
+ Gccs.emplace_back(getTriple().getArchName());
+ Gccs[0] += "-w64-mingw32-gcc";
+ Gccs.emplace_back("mingw32-gcc");
+ // Please do not add "gcc" here
+ for (StringRef CandidateGcc : Gccs)
+ if (llvm::ErrorOr<std::string> GPPName = llvm::sys::findProgramByName(CandidateGcc))
+ return GPPName;
+ return make_error_code(std::errc::no_such_file_or_directory);
+}
+
toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
-// In Windows there aren't any standard install locations, we search
-// for gcc on the PATH. In Linux the base is always /usr.
-#ifdef LLVM_ON_WIN32
if (getDriver().SysRoot.size())
Base = getDriver().SysRoot;
- else if (llvm::ErrorOr<std::string> GPPName =
- llvm::sys::findProgramByName("gcc"))
+ else if (llvm::ErrorOr<std::string> GPPName = findGcc())
Base = llvm::sys::path::parent_path(
llvm::sys::path::parent_path(GPPName.get()));
else
Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
-#else
- if (getDriver().SysRoot.size())
- Base = getDriver().SysRoot;
- else
- Base = "/usr";
-#endif
Base += llvm::sys::path::get_separator();
findGccLibDir();
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.h
index 9d2468ffa234..cf1628a4ccdd 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/MinGW.h
@@ -93,6 +93,7 @@ private:
mutable std::unique_ptr<tools::gcc::Preprocessor> Preprocessor;
mutable std::unique_ptr<tools::gcc::Compiler> Compiler;
void findGccLibDir();
+ llvm::ErrorOr<std::string> findGcc();
};
} // end namespace toolchains
diff --git a/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp
index 4ee43d6937e0..1acc0c306512 100644
--- a/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp
@@ -467,6 +467,9 @@ FormatToken *FormatTokenLexer::getNextToken() {
if (pos >= 0 && Text[pos] == '\r')
--pos;
// See whether there is an odd number of '\' before this.
+ // FIXME: This is wrong. A '\' followed by a newline is always removed,
+ // regardless of whether there is another '\' before it.
+ // FIXME: Newlines can also be escaped by a '?' '?' '/' trigraph.
unsigned count = 0;
for (; pos >= 0; --pos, ++count)
if (Text[pos] != '\\')
diff --git a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp
index 004800fc2a4e..bbc2d1e52b63 100644
--- a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp
@@ -796,10 +796,11 @@ private:
while (CurrentToken) {
FormatToken *Tok = CurrentToken;
next();
- if (Tok->isOneOf(Keywords.kw___has_include,
- Keywords.kw___has_include_next)) {
+ if (Tok->is(tok::l_paren))
+ parseParens();
+ else if (Tok->isOneOf(Keywords.kw___has_include,
+ Keywords.kw___has_include_next))
parseHasInclude();
- }
}
return Type;
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index ab9f20304c9c..0e0eb40eb334 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -1353,13 +1353,11 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("cl", IK_OpenCL)
.Case("cuda", IK_CUDA)
.Case("c++", IK_CXX)
- .Case("c++-module", IK_CXX)
.Case("objective-c", IK_ObjC)
.Case("objective-c++", IK_ObjCXX)
.Case("cpp-output", IK_PreprocessedC)
.Case("assembler-with-cpp", IK_Asm)
.Case("c++-cpp-output", IK_PreprocessedCXX)
- .Case("c++-module-cpp-output", IK_PreprocessedCXX)
.Case("cuda-cpp-output", IK_PreprocessedCuda)
.Case("objective-c-cpp-output", IK_PreprocessedObjC)
.Case("objc-cpp-output", IK_PreprocessedObjC)
@@ -2324,6 +2322,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Args.getAllArgValues(OPT_fxray_always_instrument);
Opts.XRayNeverInstrumentFiles =
Args.getAllArgValues(OPT_fxray_never_instrument);
+
+ // -fallow-editor-placeholders
+ Opts.AllowEditorPlaceholders = Args.hasArg(OPT_fallow_editor_placeholders);
}
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
index dccba4e40b2d..1c94aca69381 100644
--- a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -56,8 +56,7 @@
/// __m128 _mm_ceil_ps(__m128 X);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDPS / ROUNDPS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDPS / ROUNDPS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded up.
@@ -74,8 +73,7 @@
/// __m128d _mm_ceil_pd(__m128d X);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDPD / ROUNDPD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDPD / ROUNDPD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double] values to be rounded up.
@@ -94,8 +92,7 @@
/// __m128 _mm_ceil_ss(__m128 X, __m128 Y);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDSS / ROUNDSS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDSS / ROUNDSS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are
@@ -120,8 +117,7 @@
/// __m128d _mm_ceil_sd(__m128d X, __m128d Y);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDSD / ROUNDSD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDSD / ROUNDSD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is
@@ -144,8 +140,7 @@
/// __m128 _mm_floor_ps(__m128 X);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDPS / ROUNDPS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDPS / ROUNDPS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded down.
@@ -162,8 +157,7 @@
/// __m128d _mm_floor_pd(__m128d X);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDPD / ROUNDPD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDPD / ROUNDPD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double].
@@ -182,8 +176,7 @@
/// __m128 _mm_floor_ss(__m128 X, __m128 Y);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDSS / ROUNDSS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDSS / ROUNDSS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are
@@ -208,8 +201,7 @@
/// __m128d _mm_floor_sd(__m128d X, __m128d Y);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDSD / ROUNDSD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDSD / ROUNDSD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is
@@ -233,8 +225,7 @@
/// __m128 _mm_round_ps(__m128 X, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDPS / ROUNDPS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDPS / ROUNDPS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float].
@@ -269,8 +260,7 @@
/// __m128 _mm_round_ss(__m128 X, __m128 Y, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDSS / ROUNDSS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDSS / ROUNDSS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are
@@ -310,8 +300,7 @@
/// __m128d _mm_round_pd(__m128d X, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDPD / ROUNDPD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDPD / ROUNDPD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double].
@@ -333,7 +322,6 @@
#define _mm_round_pd(X, M) __extension__ ({ \
(__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); })
-
/// \brief Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
/// Rounds the lower element of the second 128-bit vector operand to an
@@ -347,8 +335,7 @@
/// __m128d _mm_round_sd(__m128d X, __m128d Y, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VROUNDSD / ROUNDSD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VROUNDSD / ROUNDSD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is
@@ -388,8 +375,7 @@
/// __m128d _mm_blend_pd(__m128d V1, __m128d V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VBLENDPD / BLENDPD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
///
/// \param V1
/// A 128-bit vector of [2 x double].
@@ -419,8 +405,7 @@
/// __m128 _mm_blend_ps(__m128 V1, __m128 V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VBLENDPS / BLENDPS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VBLENDPS / BLENDPS </c> instruction.
///
/// \param V1
/// A 128-bit vector of [4 x float].
@@ -447,8 +432,7 @@
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VBLENDVPD / BLENDVPD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VBLENDVPD / BLENDVPD </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [2 x double].
@@ -475,8 +459,7 @@ _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VBLENDVPS / BLENDVPS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VBLENDVPS / BLENDVPS </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x float].
@@ -503,8 +486,7 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPBLENDVB / PBLENDVB </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPBLENDVB / PBLENDVB </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [16 x i8].
@@ -535,8 +517,7 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// __m128i _mm_blend_epi16(__m128i V1, __m128i V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPBLENDW / PBLENDW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPBLENDW / PBLENDW </c> instruction.
///
/// \param V1
/// A 128-bit vector of [8 x i16].
@@ -569,8 +550,7 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMULLD / PMULLD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMULLD / PMULLD </c> instruction.
///
/// \param __V1
/// A 128-bit integer vector.
@@ -589,8 +569,7 @@ _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMULDQ / PMULDQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMULDQ / PMULDQ </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x i32].
@@ -617,8 +596,7 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// __m128 _mm_dp_ps(__m128 X, __m128 Y, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VDPPS / DPPS </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VDPPS / DPPS </c> instruction.
///
/// \param X
/// A 128-bit vector of [4 x float].
@@ -652,8 +630,7 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// __m128d _mm_dp_pd(__m128d X, __m128d Y, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VDPPD / DPPD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VDPPD / DPPD </c> instruction.
///
/// \param X
/// A 128-bit vector of [2 x double].
@@ -680,8 +657,7 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VMOVNTDQA / MOVNTDQA </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VMOVNTDQA / MOVNTDQA </c> instruction.
///
/// \param __V
/// A pointer to a 128-bit aligned memory location that contains the integer
@@ -701,8 +677,7 @@ _mm_stream_load_si128 (__m128i const *__V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMINSB / PMINSB </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMINSB / PMINSB </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [16 x i8].
@@ -721,8 +696,7 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMAXSB / PMAXSB </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMAXSB / PMAXSB </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [16 x i8].
@@ -741,8 +715,7 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMINUW / PMINUW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMINUW / PMINUW </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [8 x u16].
@@ -761,8 +734,7 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMAXUW / PMAXUW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMAXUW / PMAXUW </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [8 x u16].
@@ -781,8 +753,7 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMINSD / PMINSD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMINSD / PMINSD </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x i32].
@@ -801,8 +772,7 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMAXSD / PMAXSD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMAXSD / PMAXSD </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x i32].
@@ -821,8 +791,7 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMINUD / PMINUD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMINUD / PMINUD </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x u32].
@@ -841,8 +810,7 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMAXUD / PMAXUD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMAXUD / PMAXUD </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x u32].
@@ -867,7 +835,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// __m128 _mm_insert_ps(__m128 X, __m128 Y, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VINSERTPS </i> </c> instruction.
+/// This intrinsic corresponds to the <c> VINSERTPS </c> instruction.
///
/// \param X
/// A 128-bit vector source operand of [4 x float]. With the exception of
@@ -907,7 +875,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// int _mm_extract_ps(__m128 X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VEXTRACTPS / EXTRACTPS </i> </c>
+/// This intrinsic corresponds to the <c> VEXTRACTPS / EXTRACTPS </c>
/// instruction.
///
/// \param X
@@ -951,8 +919,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// __m128i _mm_insert_epi8(__m128i X, int I, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPINSRB / PINSRB </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPINSRB / PINSRB </c> instruction.
///
/// \param X
/// A 128-bit integer vector of [16 x i8]. This vector is copied to the
@@ -997,8 +964,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// __m128i _mm_insert_epi32(__m128i X, int I, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPINSRD / PINSRD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPINSRD / PINSRD </c> instruction.
///
/// \param X
/// A 128-bit integer vector of [4 x i32]. This vector is copied to the
@@ -1009,7 +975,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// specified by \a N.
/// \param N
/// An immediate value. Bits [1:0] specify the bit offset in the result at
-/// which the integer \a I is written.
+/// which the integer \a I is written. \n
/// 00: Bits [31:0] of the result are used for insertion. \n
/// 01: Bits [63:32] of the result are used for insertion. \n
/// 10: Bits [95:64] of the result are used for insertion. \n
@@ -1019,6 +985,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
({ __v4si __a = (__v4si)(__m128i)(X); \
__a[(N) & 3] = (I); \
(__m128i)__a;}))
+
#ifdef __x86_64__
/// \brief Constructs a 128-bit vector of [2 x i64] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 64-bit
@@ -1031,8 +998,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// __m128i _mm_insert_epi64(__m128i X, long long I, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPINSRQ / PINSRQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPINSRQ / PINSRQ </c> instruction.
///
/// \param X
/// A 128-bit integer vector of [2 x i64]. This vector is copied to the
@@ -1043,7 +1009,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// specified by \a N.
/// \param N
/// An immediate value. Bit [0] specifies the bit offset in the result at
-/// which the integer \a I is written.
+/// which the integer \a I is written. \n
/// 0: Bits [63:0] of the result are used for insertion. \n
/// 1: Bits [127:64] of the result are used for insertion. \n
/// \returns A 128-bit integer vector containing the constructed values.
@@ -1065,14 +1031,13 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// int _mm_extract_epi8(__m128i X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPEXTRB / PEXTRB </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPEXTRB / PEXTRB </c> instruction.
///
/// \param X
/// A 128-bit integer vector.
/// \param N
-/// An immediate value. Bits [3:0] specify which 8-bit vector element
-/// from the argument \a X to extract and copy to the result. \n
+/// An immediate value. Bits [3:0] specify which 8-bit vector element from
+/// the argument \a X to extract and copy to the result. \n
/// 0000: Bits [7:0] of parameter \a X are extracted. \n
/// 0001: Bits [15:8] of the parameter \a X are extracted. \n
/// 0010: Bits [23:16] of the parameter \a X are extracted. \n
@@ -1105,14 +1070,13 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// int _mm_extract_epi32(__m128i X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPEXTRD / PEXTRD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPEXTRD / PEXTRD </c> instruction.
///
/// \param X
/// A 128-bit integer vector.
/// \param N
-/// An immediate value. Bits [1:0] specify which 32-bit vector element
-/// from the argument \a X to extract and copy to the result. \n
+/// An immediate value. Bits [1:0] specify which 32-bit vector element from
+/// the argument \a X to extract and copy to the result. \n
/// 00: Bits [31:0] of the parameter \a X are extracted. \n
/// 01: Bits [63:32] of the parameter \a X are extracted. \n
/// 10: Bits [95:64] of the parameter \a X are extracted. \n
@@ -1122,6 +1086,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
#define _mm_extract_epi32(X, N) (__extension__ \
({ __v4si __a = (__v4si)(__m128i)(X); \
(int)__a[(N) & 3];}))
+
#ifdef __x86_64__
/// \brief Extracts a 64-bit element from the 128-bit integer vector of
/// [2 x i64], using the immediate value parameter \a N as a selector.
@@ -1132,14 +1097,13 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// long long _mm_extract_epi64(__m128i X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPEXTRQ / PEXTRQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
///
/// \param X
/// A 128-bit integer vector.
/// \param N
-/// An immediate value. Bit [0] specifies which 64-bit vector element
-/// from the argument \a X to return. \n
+/// An immediate value. Bit [0] specifies which 64-bit vector element from
+/// the argument \a X to return. \n
/// 0: Bits [63:0] are returned. \n
/// 1: Bits [127:64] are returned. \n
/// \returns A 64-bit integer.
@@ -1154,8 +1118,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPTEST / PTEST </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
///
/// \param __M
/// A 128-bit integer vector containing the bits to be tested.
@@ -1173,8 +1136,7 @@ _mm_testz_si128(__m128i __M, __m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPTEST / PTEST </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
///
/// \param __M
/// A 128-bit integer vector containing the bits to be tested.
@@ -1192,8 +1154,7 @@ _mm_testc_si128(__m128i __M, __m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPTEST / PTEST </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
///
/// \param __M
/// A 128-bit integer vector containing the bits to be tested.
@@ -1216,8 +1177,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// int _mm_test_all_ones(__m128i V);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPTEST / PTEST </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
///
/// \param V
/// A 128-bit integer vector containing the bits to be tested.
@@ -1234,8 +1194,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// int _mm_test_mix_ones_zeros(__m128i M, __m128i V);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPTEST / PTEST </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
///
/// \param M
/// A 128-bit integer vector containing the bits to be tested.
@@ -1254,8 +1213,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// int _mm_test_all_zeros(__m128i M, __m128i V);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPTEST / PTEST </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPTEST / PTEST </c> instruction.
///
/// \param M
/// A 128-bit integer vector containing the bits to be tested.
@@ -1270,8 +1228,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPCMPEQQ / PCMPEQQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPCMPEQQ / PCMPEQQ </c> instruction.
///
/// \param __V1
/// A 128-bit integer vector.
@@ -1292,8 +1249,7 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVSXBW / PMOVSXBW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
///
/// \param __V
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
@@ -1314,8 +1270,7 @@ _mm_cvtepi8_epi16(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVSXBD / PMOVSXBD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVSXBD / PMOVSXBD </c> instruction.
///
/// \param __V
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are sign-
@@ -1336,8 +1291,7 @@ _mm_cvtepi8_epi32(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVSXBQ / PMOVSXBQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVSXBQ / PMOVSXBQ </c> instruction.
///
/// \param __V
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are sign-
@@ -1358,8 +1312,7 @@ _mm_cvtepi8_epi64(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVSXWD / PMOVSXWD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVSXWD / PMOVSXWD </c> instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are sign-
@@ -1378,8 +1331,7 @@ _mm_cvtepi16_epi32(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVSXWQ / PMOVSXWQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVSXWQ / PMOVSXWQ </c> instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are sign-
@@ -1398,8 +1350,7 @@ _mm_cvtepi16_epi64(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVSXDQ / PMOVSXDQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVSXDQ / PMOVSXDQ </c> instruction.
///
/// \param __V
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are sign-
@@ -1419,8 +1370,7 @@ _mm_cvtepi32_epi64(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVZXBW / PMOVZXBW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVZXBW / PMOVZXBW </c> instruction.
///
/// \param __V
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are zero-
@@ -1439,8 +1389,7 @@ _mm_cvtepu8_epi16(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVZXBD / PMOVZXBD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVZXBD / PMOVZXBD </c> instruction.
///
/// \param __V
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are zero-
@@ -1459,8 +1408,7 @@ _mm_cvtepu8_epi32(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVZXBQ / PMOVZXBQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVZXBQ / PMOVZXBQ </c> instruction.
///
/// \param __V
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are zero-
@@ -1479,8 +1427,7 @@ _mm_cvtepu8_epi64(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVZXWD / PMOVZXWD </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVZXWD / PMOVZXWD </c> instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are zero-
@@ -1499,8 +1446,7 @@ _mm_cvtepu16_epi32(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVZXWQ / PMOVZXWQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVZXWQ / PMOVZXWQ </c> instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are zero-
@@ -1519,8 +1465,7 @@ _mm_cvtepu16_epi64(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPMOVZXDQ / PMOVZXDQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPMOVZXDQ / PMOVZXDQ </c> instruction.
///
/// \param __V
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are zero-
@@ -1540,8 +1485,7 @@ _mm_cvtepu32_epi64(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPACKUSDW / PACKUSDW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPACKUSDW / PACKUSDW </c> instruction.
///
/// \param __V1
/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
@@ -1574,8 +1518,7 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// __m128i _mm_mpsadbw_epu8(__m128i X, __m128i Y, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VMPSADBW / MPSADBW </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VMPSADBW / MPSADBW </c> instruction.
///
/// \param X
/// A 128-bit vector of [16 x i8].
@@ -1608,7 +1551,7 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPHMINPOSUW / PHMINPOSUW </i> </c>
+/// This intrinsic corresponds to the <c> VPHMINPOSUW / PHMINPOSUW </c>
/// instruction.
///
/// \param __V
@@ -1668,7 +1611,7 @@ _mm_minpos_epu16(__m128i __V)
/// __m128i _mm_cmpistrm(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRM / PCMPISTRM </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRM / PCMPISTRM </c>
/// instruction.
///
/// \param A
@@ -1724,7 +1667,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpistri(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRI / PCMPISTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
/// instruction.
///
/// \param A
@@ -1778,7 +1721,7 @@ _mm_minpos_epu16(__m128i __V)
/// __m128i _mm_cmpestrm(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRM / PCMPESTRM </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPESTRM / PCMPESTRM </c>
/// instruction.
///
/// \param A
@@ -1839,7 +1782,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpestri(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRI / PCMPESTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
/// instruction.
///
/// \param A
@@ -1899,7 +1842,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpistra(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRI / PCMPISTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
/// instruction.
///
/// \param A
@@ -1949,7 +1892,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpistrc(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRI / PCMPISTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
/// instruction.
///
/// \param A
@@ -1997,7 +1940,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpistro(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRI / PCMPISTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
/// instruction.
///
/// \param A
@@ -2046,7 +1989,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpistrs(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRI / PCMPISTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
/// instruction.
///
/// \param A
@@ -2096,7 +2039,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpistrz(__m128i A, __m128i B, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPISTRI / PCMPISTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPISTRI / PCMPISTRI </c>
/// instruction.
///
/// \param A
@@ -2146,7 +2089,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpestra(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRI / PCMPESTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
/// instruction.
///
/// \param A
@@ -2201,7 +2144,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpestrc(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRI / PCMPESTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
/// instruction.
///
/// \param A
@@ -2243,6 +2186,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M))
+
/// \brief Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns bit 0 of the resulting bit mask.
@@ -2253,7 +2197,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpestro(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRI / PCMPESTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
/// instruction.
///
/// \param A
@@ -2307,7 +2251,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpestrs(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRI / PCMPESTRI </i> </c>
+/// This intrinsic corresponds to the <c> VPCMPESTRI / PCMPESTRI </c>
/// instruction.
///
/// \param A
@@ -2362,7 +2306,7 @@ _mm_minpos_epu16(__m128i __V)
/// int _mm_cmpestrz(__m128i A, int LA, __m128i B, int LB, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the <c> <i> VPCMPESTRI </i> </c> instruction.
+/// This intrinsic corresponds to the <c> VPCMPESTRI </c> instruction.
///
/// \param A
/// A 128-bit integer vector containing one of the source operands to be
@@ -2412,8 +2356,7 @@ _mm_minpos_epu16(__m128i __V)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> VPCMPGTQ / PCMPGTQ </i> </c>
-/// instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTQ / PCMPGTQ </c> instruction.
///
/// \param __V1
/// A 128-bit integer vector.
@@ -2432,7 +2375,7 @@ _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> CRC32B </i> </c> instruction.
+/// This intrinsic corresponds to the <c> CRC32B </c> instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
@@ -2452,7 +2395,7 @@ _mm_crc32_u8(unsigned int __C, unsigned char __D)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> CRC32W </i> </c> instruction.
+/// This intrinsic corresponds to the <c> CRC32W </c> instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
@@ -2472,7 +2415,7 @@ _mm_crc32_u16(unsigned int __C, unsigned short __D)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> CRC32L </i> </c> instruction.
+/// This intrinsic corresponds to the <c> CRC32L </c> instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
@@ -2493,7 +2436,7 @@ _mm_crc32_u32(unsigned int __C, unsigned int __D)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> <i> CRC32Q </i> </c> instruction.
+/// This intrinsic corresponds to the <c> CRC32Q </c> instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
index bb8e29cd9052..5c312c08efb6 100644
--- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -2540,7 +2540,7 @@ void _mm_setcsr(unsigned int __i);
/// A 128-bit vector of [4 x float].
/// \param mask
/// An immediate value containing an 8-bit value specifying which elements to
-/// copy from \ a and \a b. \n
+/// copy from \a a and \a b. \n
/// Bits [3:0] specify the values copied from operand \a a. \n
/// Bits [7:4] specify the values copied from operand \a b. \n
/// The destinations within the 128-bit destination are assigned values as
diff --git a/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp b/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp
index 08acc96c4efb..e568c838b7b0 100644
--- a/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/CommentToXML.cpp
@@ -592,12 +592,10 @@ void CommentASTToXMLConverter::formatTextOfDeclaration(
unsigned Offset = 0;
unsigned Length = Declaration.size();
- bool IncompleteFormat = false;
format::FormatStyle Style = format::getLLVMStyle();
Style.FixNamespaceComments = false;
tooling::Replacements Replaces =
- reformat(Style, StringDecl, tooling::Range(Offset, Length), "xmldecl.xd",
- &IncompleteFormat);
+ reformat(Style, StringDecl, tooling::Range(Offset, Length), "xmldecl.xd");
auto FormattedStringDecl = applyAllReplacements(StringDecl, Replaces);
if (static_cast<bool>(FormattedStringDecl)) {
Declaration = *FormattedStringDecl;
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp b/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp
index dae0cdc0d9c9..0e893505516f 100644
--- a/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp
@@ -495,8 +495,18 @@ public:
ClassTemplateSpecializationDecl *D) {
// FIXME: Notify subsequent callbacks if info comes from implicit
// instantiation.
- if (D->isThisDeclarationADefinition())
- IndexCtx.indexTagDecl(D);
+ if (D->isThisDeclarationADefinition()) {
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ Template = D->getSpecializedTemplateOrPartial();
+ const Decl *SpecializationOf =
+ Template.is<ClassTemplateDecl *>()
+ ? (Decl *)Template.get<ClassTemplateDecl *>()
+ : Template.get<ClassTemplatePartialSpecializationDecl *>();
+ IndexCtx.indexTagDecl(
+ D, SymbolRelation(SymbolRoleSet(SymbolRole::RelationSpecializationOf),
+ SpecializationOf));
+ }
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexSymbol.cpp b/contrib/llvm/tools/clang/lib/Index/IndexSymbol.cpp
index fe3c17845daa..ea66b7017951 100644
--- a/contrib/llvm/tools/clang/lib/Index/IndexSymbol.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/IndexSymbol.cpp
@@ -346,6 +346,7 @@ bool index::applyForEachSymbolRoleInterruptible(SymbolRoleSet Roles,
APPLY_FOR_ROLE(RelationAccessorOf);
APPLY_FOR_ROLE(RelationContainedBy);
APPLY_FOR_ROLE(RelationIBTypeOf);
+ APPLY_FOR_ROLE(RelationSpecializationOf);
#undef APPLY_FOR_ROLE
@@ -386,6 +387,7 @@ void index::printSymbolRoles(SymbolRoleSet Roles, raw_ostream &OS) {
case SymbolRole::RelationAccessorOf: OS << "RelAcc"; break;
case SymbolRole::RelationContainedBy: OS << "RelCont"; break;
case SymbolRole::RelationIBTypeOf: OS << "RelIBType"; break;
+ case SymbolRole::RelationSpecializationOf: OS << "RelSpecialization"; break;
}
});
}
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexTypeSourceInfo.cpp b/contrib/llvm/tools/clang/lib/Index/IndexTypeSourceInfo.cpp
index 0645d5be5268..a3566a9f2ae8 100644
--- a/contrib/llvm/tools/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -208,11 +208,12 @@ void IndexingContext::indexNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
}
}
-void IndexingContext::indexTagDecl(const TagDecl *D) {
+void IndexingContext::indexTagDecl(const TagDecl *D,
+ ArrayRef<SymbolRelation> Relations) {
if (!shouldIndexFunctionLocalSymbols() && isFunctionLocalSymbol(D))
return;
- if (handleDecl(D)) {
+ if (handleDecl(D, /*Roles=*/SymbolRoleSet(), Relations)) {
if (D->isThisDeclarationADefinition()) {
indexNestedNameSpecifierLoc(D->getQualifierLoc(), D);
if (auto CXXRD = dyn_cast<CXXRecordDecl>(D)) {
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexingContext.cpp b/contrib/llvm/tools/clang/lib/Index/IndexingContext.cpp
index f393b11ab884..85574d0a314d 100644
--- a/contrib/llvm/tools/clang/lib/Index/IndexingContext.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/IndexingContext.cpp
@@ -233,6 +233,7 @@ static bool shouldReportOccurrenceForSystemDeclOnlyMode(
case SymbolRole::RelationReceivedBy:
case SymbolRole::RelationCalledBy:
case SymbolRole::RelationContainedBy:
+ case SymbolRole::RelationSpecializationOf:
return true;
}
llvm_unreachable("Unsupported SymbolRole value!");
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexingContext.h b/contrib/llvm/tools/clang/lib/Index/IndexingContext.h
index 933b0a2cda07..1ebf6f9ce67a 100644
--- a/contrib/llvm/tools/clang/lib/Index/IndexingContext.h
+++ b/contrib/llvm/tools/clang/lib/Index/IndexingContext.h
@@ -80,7 +80,8 @@ public:
bool indexDecl(const Decl *D);
- void indexTagDecl(const TagDecl *D);
+ void indexTagDecl(const TagDecl *D,
+ ArrayRef<SymbolRelation> Relations = None);
void indexTypeSourceInfo(TypeSourceInfo *TInfo, const NamedDecl *Parent,
const DeclContext *DC = nullptr,
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index 4c051939471c..003c9b5eed1b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -1171,6 +1171,8 @@ const char *Lexer::SkipEscapedNewLines(const char *P) {
// If not a trigraph for escape, bail out.
if (P[1] != '?' || P[2] != '/')
return P;
+ // FIXME: Take LangOpts into account; the language might not
+ // support trigraphs.
AfterEscape = P+3;
} else {
return P;
@@ -1282,12 +1284,6 @@ Slash:
Size += EscapedNewLineSize;
Ptr += EscapedNewLineSize;
- // If the char that we finally got was a \n, then we must have had
- // something like \<newline><newline>. We don't want to consume the
- // second newline.
- if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
- return ' ';
-
// Use slow version to accumulate a correct size field.
return getCharAndSizeSlow(Ptr, Size, Tok);
}
@@ -1338,12 +1334,6 @@ Slash:
Size += EscapedNewLineSize;
Ptr += EscapedNewLineSize;
- // If the char that we finally got was a \n, then we must have had
- // something like \<newline><newline>. We don't want to consume the
- // second newline.
- if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
- return ' ';
-
// Use slow version to accumulate a correct size field.
return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
}
@@ -2070,8 +2060,11 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
// Scan over the body of the comment. The common case, when scanning, is that
// the comment contains normal ascii characters with nothing interesting in
// them. As such, optimize for this case with the inner loop.
+ //
+ // This loop terminates with CurPtr pointing at the newline (or end of buffer)
+ // character that ends the line comment.
char C;
- do {
+ while (true) {
C = *CurPtr;
// Skip over characters in the fast loop.
while (C != 0 && // Potentially EOF.
@@ -2088,10 +2081,12 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
HasSpace = true;
}
- if (*EscapePtr == '\\') // Escaped newline.
+ if (*EscapePtr == '\\')
+ // Escaped newline.
CurPtr = EscapePtr;
else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
- EscapePtr[-2] == '?') // Trigraph-escaped newline.
+ EscapePtr[-2] == '?' && LangOpts.Trigraphs)
+ // Trigraph-escaped newline.
CurPtr = EscapePtr-2;
else
break; // This is a newline, we're done.
@@ -2140,9 +2135,9 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
}
}
- if (CurPtr == BufferEnd+1) {
- --CurPtr;
- break;
+ if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) {
+ --CurPtr;
+ break;
}
if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
@@ -2150,8 +2145,7 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
cutOffLexing();
return false;
}
-
- } while (C != '\n' && C != '\r');
+ }
// Found but did not consume the newline. Notify comment handlers about the
// comment unless we're in a #if 0 block.
@@ -2722,6 +2716,37 @@ bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
return false;
}
+static const char *findPlaceholderEnd(const char *CurPtr,
+ const char *BufferEnd) {
+ if (CurPtr == BufferEnd)
+ return nullptr;
+ BufferEnd -= 1; // Scan until the second last character.
+ for (; CurPtr != BufferEnd; ++CurPtr) {
+ if (CurPtr[0] == '#' && CurPtr[1] == '>')
+ return CurPtr + 2;
+ }
+ return nullptr;
+}
+
+bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) {
+ assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!");
+ if (!PP || LexingRawMode)
+ return false;
+ const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd);
+ if (!End)
+ return false;
+ const char *Start = CurPtr - 1;
+ if (!LangOpts.AllowEditorPlaceholders)
+ Diag(Start, diag::err_placeholder_in_source);
+ Result.startToken();
+ FormTokenWithChars(Result, End, tok::raw_identifier);
+ Result.setRawIdentifierData(Start);
+ PP->LookUpIdentifierInfo(Result);
+ Result.setFlag(Token::IsEditorPlaceholder);
+ BufferPtr = End;
+ return true;
+}
+
bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
if (PP && PP->isCodeCompletionEnabled()) {
SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
@@ -3479,6 +3504,8 @@ LexNextToken:
} else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{'
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::l_brace;
+ } else if (Char == '#' && lexEditorPlaceholder(Result, CurPtr)) {
+ return true;
} else {
Kind = tok::less;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
index 4f3db8dd6436..5b60ed3f812a 100644
--- a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -2002,9 +2002,8 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
if (!Dir) {
- Diags.Report(DirNameLoc, diag::err_mmap_umbrella_dir_not_found)
+ Diags.Report(DirNameLoc, diag::warn_mmap_umbrella_dir_not_found)
<< DirName;
- HadError = true;
return;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index 4db17c344b67..cf0c953b61f8 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -303,9 +303,8 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (const FileEntry *FE = CurPPLexer->getFileEntry()) {
HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
if (MacroInfo *MI =
- getMacroInfo(const_cast<IdentifierInfo*>(ControllingMacro))) {
- MI->UsedForHeaderGuard = true;
- }
+ getMacroInfo(const_cast<IdentifierInfo*>(ControllingMacro)))
+ MI->setUsedForHeaderGuard(true);
if (const IdentifierInfo *DefinedMacro =
CurPPLexer->MIOpt.GetDefinedMacro()) {
if (!isMacroDefined(ControllingMacro) &&
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
index c8de6b35f9ef..c34cd09a6238 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -183,6 +183,17 @@ private:
Sema &Actions;
};
+/// PragmaAttributeHandler - "\#pragma clang attribute ...".
+struct PragmaAttributeHandler : public PragmaHandler {
+ PragmaAttributeHandler(AttributeFactory &AttrFactory)
+ : PragmaHandler("attribute"), AttributesForPragmaAttribute(AttrFactory) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+
+ /// A pool of attributes that were parsed in \#pragma clang attribute.
+ ParsedAttributes AttributesForPragmaAttribute;
+};
+
} // end namespace
void Parser::initializePragmaHandlers() {
@@ -275,6 +286,9 @@ void Parser::initializePragmaHandlers() {
FPHandler.reset(new PragmaFPHandler());
PP.AddPragmaHandler("clang", FPHandler.get());
+
+ AttributePragmaHandler.reset(new PragmaAttributeHandler(AttrFactory));
+ PP.AddPragmaHandler("clang", AttributePragmaHandler.get());
}
void Parser::resetPragmaHandlers() {
@@ -356,6 +370,9 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", FPHandler.get());
FPHandler.reset();
+
+ PP.RemovePragmaHandler("clang", AttributePragmaHandler.get());
+ AttributePragmaHandler.reset();
}
/// \brief Handle the annotation token produced for #pragma unused(...)
@@ -966,6 +983,422 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
return true;
}
+namespace {
+struct PragmaAttributeInfo {
+ enum ActionType { Push, Pop };
+ ParsedAttributes &Attributes;
+ ActionType Action;
+ ArrayRef<Token> Tokens;
+
+ PragmaAttributeInfo(ParsedAttributes &Attributes) : Attributes(Attributes) {}
+};
+
+#include "clang/Parse/AttrSubMatchRulesParserStringSwitches.inc"
+
+} // end anonymous namespace
+
+static StringRef getIdentifier(const Token &Tok) {
+ if (Tok.is(tok::identifier))
+ return Tok.getIdentifierInfo()->getName();
+ const char *S = tok::getKeywordSpelling(Tok.getKind());
+ if (!S)
+ return "";
+ return S;
+}
+
+static bool isAbstractAttrMatcherRule(attr::SubjectMatchRule Rule) {
+ using namespace attr;
+ switch (Rule) {
+#define ATTR_MATCH_RULE(Value, Spelling, IsAbstract) \
+ case Value: \
+ return IsAbstract;
+#include "clang/Basic/AttrSubMatchRulesList.inc"
+ }
+ llvm_unreachable("Invalid attribute subject match rule");
+ return false;
+}
+
+static void diagnoseExpectedAttributeSubjectSubRule(
+ Parser &PRef, attr::SubjectMatchRule PrimaryRule, StringRef PrimaryRuleName,
+ SourceLocation SubRuleLoc) {
+ auto Diagnostic =
+ PRef.Diag(SubRuleLoc,
+ diag::err_pragma_attribute_expected_subject_sub_identifier)
+ << PrimaryRuleName;
+ if (const char *SubRules = validAttributeSubjectMatchSubRules(PrimaryRule))
+ Diagnostic << /*SubRulesSupported=*/1 << SubRules;
+ else
+ Diagnostic << /*SubRulesSupported=*/0;
+}
+
+static void diagnoseUnknownAttributeSubjectSubRule(
+ Parser &PRef, attr::SubjectMatchRule PrimaryRule, StringRef PrimaryRuleName,
+ StringRef SubRuleName, SourceLocation SubRuleLoc) {
+
+ auto Diagnostic =
+ PRef.Diag(SubRuleLoc, diag::err_pragma_attribute_unknown_subject_sub_rule)
+ << SubRuleName << PrimaryRuleName;
+ if (const char *SubRules = validAttributeSubjectMatchSubRules(PrimaryRule))
+ Diagnostic << /*SubRulesSupported=*/1 << SubRules;
+ else
+ Diagnostic << /*SubRulesSupported=*/0;
+}
+
+bool Parser::ParsePragmaAttributeSubjectMatchRuleSet(
+ attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc,
+ SourceLocation &LastMatchRuleEndLoc) {
+ bool IsAny = false;
+ BalancedDelimiterTracker AnyParens(*this, tok::l_paren);
+ if (getIdentifier(Tok) == "any") {
+ AnyLoc = ConsumeToken();
+ IsAny = true;
+ if (AnyParens.expectAndConsume())
+ return true;
+ }
+
+ do {
+ // Parse the subject matcher rule.
+ StringRef Name = getIdentifier(Tok);
+ if (Name.empty()) {
+ Diag(Tok, diag::err_pragma_attribute_expected_subject_identifier);
+ return true;
+ }
+ std::pair<Optional<attr::SubjectMatchRule>,
+ Optional<attr::SubjectMatchRule> (*)(StringRef, bool)>
+ Rule = isAttributeSubjectMatchRule(Name);
+ if (!Rule.first) {
+ Diag(Tok, diag::err_pragma_attribute_unknown_subject_rule) << Name;
+ return true;
+ }
+ attr::SubjectMatchRule PrimaryRule = *Rule.first;
+ SourceLocation RuleLoc = ConsumeToken();
+
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ if (isAbstractAttrMatcherRule(PrimaryRule)) {
+ if (Parens.expectAndConsume())
+ return true;
+ } else if (Parens.consumeOpen()) {
+ if (!SubjectMatchRules
+ .insert(
+ std::make_pair(PrimaryRule, SourceRange(RuleLoc, RuleLoc)))
+ .second)
+ Diag(RuleLoc, diag::err_pragma_attribute_duplicate_subject)
+ << Name
+ << FixItHint::CreateRemoval(SourceRange(
+ RuleLoc, Tok.is(tok::comma) ? Tok.getLocation() : RuleLoc));
+ LastMatchRuleEndLoc = RuleLoc;
+ continue;
+ }
+
+ // Parse the sub-rules.
+ StringRef SubRuleName = getIdentifier(Tok);
+ if (SubRuleName.empty()) {
+ diagnoseExpectedAttributeSubjectSubRule(*this, PrimaryRule, Name,
+ Tok.getLocation());
+ return true;
+ }
+ attr::SubjectMatchRule SubRule;
+ if (SubRuleName == "unless") {
+ SourceLocation SubRuleLoc = ConsumeToken();
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ if (Parens.expectAndConsume())
+ return true;
+ SubRuleName = getIdentifier(Tok);
+ if (SubRuleName.empty()) {
+ diagnoseExpectedAttributeSubjectSubRule(*this, PrimaryRule, Name,
+ SubRuleLoc);
+ return true;
+ }
+ auto SubRuleOrNone = Rule.second(SubRuleName, /*IsUnless=*/true);
+ if (!SubRuleOrNone) {
+ std::string SubRuleUnlessName = "unless(" + SubRuleName.str() + ")";
+ diagnoseUnknownAttributeSubjectSubRule(*this, PrimaryRule, Name,
+ SubRuleUnlessName, SubRuleLoc);
+ return true;
+ }
+ SubRule = *SubRuleOrNone;
+ ConsumeToken();
+ if (Parens.consumeClose())
+ return true;
+ } else {
+ auto SubRuleOrNone = Rule.second(SubRuleName, /*IsUnless=*/false);
+ if (!SubRuleOrNone) {
+ diagnoseUnknownAttributeSubjectSubRule(*this, PrimaryRule, Name,
+ SubRuleName, Tok.getLocation());
+ return true;
+ }
+ SubRule = *SubRuleOrNone;
+ ConsumeToken();
+ }
+ SourceLocation RuleEndLoc = Tok.getLocation();
+ LastMatchRuleEndLoc = RuleEndLoc;
+ if (Parens.consumeClose())
+ return true;
+ if (!SubjectMatchRules
+ .insert(std::make_pair(SubRule, SourceRange(RuleLoc, RuleEndLoc)))
+ .second) {
+ Diag(RuleLoc, diag::err_pragma_attribute_duplicate_subject)
+ << attr::getSubjectMatchRuleSpelling(SubRule)
+ << FixItHint::CreateRemoval(SourceRange(
+ RuleLoc, Tok.is(tok::comma) ? Tok.getLocation() : RuleEndLoc));
+ continue;
+ }
+ } while (IsAny && TryConsumeToken(tok::comma));
+
+ if (IsAny)
+ if (AnyParens.consumeClose())
+ return true;
+
+ return false;
+}
+
+namespace {
+
+/// Describes the stage at which attribute subject rule parsing was interruped.
+enum class MissingAttributeSubjectRulesRecoveryPoint {
+ Comma,
+ ApplyTo,
+ Equals,
+ Any,
+ None,
+};
+
+MissingAttributeSubjectRulesRecoveryPoint
+getAttributeSubjectRulesRecoveryPointForToken(const Token &Tok) {
+ if (const auto *II = Tok.getIdentifierInfo()) {
+ if (II->isStr("apply_to"))
+ return MissingAttributeSubjectRulesRecoveryPoint::ApplyTo;
+ if (II->isStr("any"))
+ return MissingAttributeSubjectRulesRecoveryPoint::Any;
+ }
+ if (Tok.is(tok::equal))
+ return MissingAttributeSubjectRulesRecoveryPoint::Equals;
+ return MissingAttributeSubjectRulesRecoveryPoint::None;
+}
+
+/// Creates a diagnostic for the attribute subject rule parsing diagnostic that
+/// suggests the possible attribute subject rules in a fix-it together with
+/// any other missing tokens.
+DiagnosticBuilder createExpectedAttributeSubjectRulesTokenDiagnostic(
+ unsigned DiagID, AttributeList &Attribute,
+ MissingAttributeSubjectRulesRecoveryPoint Point, Parser &PRef) {
+ SourceLocation Loc = PRef.getEndOfPreviousToken();
+ if (Loc.isInvalid())
+ Loc = PRef.getCurToken().getLocation();
+ auto Diagnostic = PRef.Diag(Loc, DiagID);
+ std::string FixIt;
+ MissingAttributeSubjectRulesRecoveryPoint EndPoint =
+ getAttributeSubjectRulesRecoveryPointForToken(PRef.getCurToken());
+ if (Point == MissingAttributeSubjectRulesRecoveryPoint::Comma)
+ FixIt = ", ";
+ if (Point <= MissingAttributeSubjectRulesRecoveryPoint::ApplyTo &&
+ EndPoint > MissingAttributeSubjectRulesRecoveryPoint::ApplyTo)
+ FixIt += "apply_to";
+ if (Point <= MissingAttributeSubjectRulesRecoveryPoint::Equals &&
+ EndPoint > MissingAttributeSubjectRulesRecoveryPoint::Equals)
+ FixIt += " = ";
+ SourceRange FixItRange(Loc);
+ if (EndPoint == MissingAttributeSubjectRulesRecoveryPoint::None) {
+ // Gather the subject match rules that are supported by the attribute.
+ SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4> SubjectMatchRuleSet;
+ Attribute.getMatchRules(PRef.getLangOpts(), SubjectMatchRuleSet);
+ if (SubjectMatchRuleSet.empty()) {
+ // FIXME: We can emit a "fix-it" with a subject list placeholder when
+ // placeholders will be supported by the fix-its.
+ return Diagnostic;
+ }
+ FixIt += "any(";
+ bool NeedsComma = false;
+ for (const auto &I : SubjectMatchRuleSet) {
+ // Ensure that the missing rule is reported in the fix-it only when it's
+ // supported in the current language mode.
+ if (!I.second)
+ continue;
+ if (NeedsComma)
+ FixIt += ", ";
+ else
+ NeedsComma = true;
+ FixIt += attr::getSubjectMatchRuleSpelling(I.first);
+ }
+ FixIt += ")";
+ // Check if we need to remove the range
+ PRef.SkipUntil(tok::eof, Parser::StopBeforeMatch);
+ FixItRange.setEnd(PRef.getCurToken().getLocation());
+ }
+ if (FixItRange.getBegin() == FixItRange.getEnd())
+ Diagnostic << FixItHint::CreateInsertion(FixItRange.getBegin(), FixIt);
+ else
+ Diagnostic << FixItHint::CreateReplacement(
+ CharSourceRange::getCharRange(FixItRange), FixIt);
+ return Diagnostic;
+}
+
+} // end anonymous namespace
+
+void Parser::HandlePragmaAttribute() {
+ assert(Tok.is(tok::annot_pragma_attribute) &&
+ "Expected #pragma attribute annotation token");
+ SourceLocation PragmaLoc = Tok.getLocation();
+ auto *Info = static_cast<PragmaAttributeInfo *>(Tok.getAnnotationValue());
+ if (Info->Action == PragmaAttributeInfo::Pop) {
+ ConsumeToken();
+ Actions.ActOnPragmaAttributePop(PragmaLoc);
+ return;
+ }
+ // Parse the actual attribute with its arguments.
+ assert(Info->Action == PragmaAttributeInfo::Push &&
+ "Unexpected #pragma attribute command");
+ PP.EnterTokenStream(Info->Tokens, /*DisableMacroExpansion=*/false);
+ ConsumeToken();
+
+ ParsedAttributes &Attrs = Info->Attributes;
+ Attrs.clearListOnly();
+
+ auto SkipToEnd = [this]() {
+ SkipUntil(tok::eof, StopBeforeMatch);
+ ConsumeToken();
+ };
+
+ if (Tok.is(tok::l_square) && NextToken().is(tok::l_square)) {
+ // Parse the CXX11 style attribute.
+ ParseCXX11AttributeSpecifier(Attrs);
+ } else if (Tok.is(tok::kw___attribute)) {
+ ConsumeToken();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "attribute"))
+ return SkipToEnd();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, "("))
+ return SkipToEnd();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_pragma_attribute_expected_attribute_name);
+ SkipToEnd();
+ return;
+ }
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ AttributeList::AS_GNU);
+ else
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
+ /*ScopeName=*/nullptr,
+ /*ScopeLoc=*/SourceLocation(),
+ AttributeList::AS_GNU,
+ /*Declarator=*/nullptr);
+
+ if (ExpectAndConsume(tok::r_paren))
+ return SkipToEnd();
+ if (ExpectAndConsume(tok::r_paren))
+ return SkipToEnd();
+ } else if (Tok.is(tok::kw___declspec)) {
+ ParseMicrosoftDeclSpecs(Attrs);
+ } else {
+ Diag(Tok, diag::err_pragma_attribute_expected_attribute_syntax);
+ if (Tok.getIdentifierInfo()) {
+ // If we suspect that this is an attribute suggest the use of
+ // '__attribute__'.
+ if (AttributeList::getKind(Tok.getIdentifierInfo(), /*ScopeName=*/nullptr,
+ AttributeList::AS_GNU) !=
+ AttributeList::UnknownAttribute) {
+ SourceLocation InsertStartLoc = Tok.getLocation();
+ ConsumeToken();
+ if (Tok.is(tok::l_paren)) {
+ ConsumeAnyToken();
+ SkipUntil(tok::r_paren, StopBeforeMatch);
+ if (Tok.isNot(tok::r_paren))
+ return SkipToEnd();
+ }
+ Diag(Tok, diag::note_pragma_attribute_use_attribute_kw)
+ << FixItHint::CreateInsertion(InsertStartLoc, "__attribute__((")
+ << FixItHint::CreateInsertion(Tok.getEndLoc(), "))");
+ }
+ }
+ SkipToEnd();
+ return;
+ }
+
+ if (!Attrs.getList() || Attrs.getList()->isInvalid()) {
+ SkipToEnd();
+ return;
+ }
+
+ // Ensure that we don't have more than one attribute.
+ if (Attrs.getList()->getNext()) {
+ SourceLocation Loc = Attrs.getList()->getNext()->getLoc();
+ Diag(Loc, diag::err_pragma_attribute_multiple_attributes);
+ SkipToEnd();
+ return;
+ }
+
+ if (!Attrs.getList()->isSupportedByPragmaAttribute()) {
+ Diag(PragmaLoc, diag::err_pragma_attribute_unsupported_attribute)
+ << Attrs.getList()->getName();
+ SkipToEnd();
+ return;
+ }
+ AttributeList &Attribute = *Attrs.getList();
+
+ // Parse the subject-list.
+ if (!TryConsumeToken(tok::comma)) {
+ createExpectedAttributeSubjectRulesTokenDiagnostic(
+ diag::err_expected, Attribute,
+ MissingAttributeSubjectRulesRecoveryPoint::Comma, *this)
+ << tok::comma;
+ SkipToEnd();
+ return;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ createExpectedAttributeSubjectRulesTokenDiagnostic(
+ diag::err_pragma_attribute_invalid_subject_set_specifier, Attribute,
+ MissingAttributeSubjectRulesRecoveryPoint::ApplyTo, *this);
+ SkipToEnd();
+ return;
+ }
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->isStr("apply_to")) {
+ createExpectedAttributeSubjectRulesTokenDiagnostic(
+ diag::err_pragma_attribute_invalid_subject_set_specifier, Attribute,
+ MissingAttributeSubjectRulesRecoveryPoint::ApplyTo, *this);
+ SkipToEnd();
+ return;
+ }
+ ConsumeToken();
+
+ if (!TryConsumeToken(tok::equal)) {
+ createExpectedAttributeSubjectRulesTokenDiagnostic(
+ diag::err_expected, Attribute,
+ MissingAttributeSubjectRulesRecoveryPoint::Equals, *this)
+ << tok::equal;
+ SkipToEnd();
+ return;
+ }
+
+ attr::ParsedSubjectMatchRuleSet SubjectMatchRules;
+ SourceLocation AnyLoc, LastMatchRuleEndLoc;
+ if (ParsePragmaAttributeSubjectMatchRuleSet(SubjectMatchRules, AnyLoc,
+ LastMatchRuleEndLoc)) {
+ SkipToEnd();
+ return;
+ }
+
+ // Tokens following an ill-formed attribute will remain in the token stream
+ // and must be removed.
+ if (Tok.isNot(tok::eof)) {
+ Diag(Tok, diag::err_pragma_attribute_extra_tokens_after_attribute);
+ SkipToEnd();
+ return;
+ }
+
+ // Consume the eof terminator token.
+ ConsumeToken();
+
+ Actions.ActOnPragmaAttributePush(Attribute, PragmaLoc,
+ std::move(SubjectMatchRules));
+}
+
// #pragma GCC visibility comes in two variants:
// 'push' '(' [visibility] ')'
// 'pop'
@@ -2395,3 +2828,104 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma(
PP.Diag(FirstTok.getLocation(),
diag::warn_pragma_force_cuda_host_device_bad_arg);
}
+
+/// \brief Handle the #pragma clang attribute directive.
+///
+/// The syntax is:
+/// \code
+/// #pragma clang attribute push(attribute, subject-set)
+/// #pragma clang attribute pop
+/// \endcode
+///
+/// The subject-set clause defines the set of declarations which receive the
+/// attribute. Its exact syntax is described in the LanguageExtensions document
+/// in Clang's documentation.
+///
+/// This directive instructs the compiler to begin/finish applying the specified
+/// attribute to the set of attribute-specific declarations in the active range
+/// of the pragma.
+void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstToken) {
+ Token Tok;
+ PP.Lex(Tok);
+ auto *Info = new (PP.getPreprocessorAllocator())
+ PragmaAttributeInfo(AttributesForPragmaAttribute);
+
+ // Parse the 'push' or 'pop'.
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_expected_push_pop);
+ return;
+ }
+ const auto *II = Tok.getIdentifierInfo();
+ if (II->isStr("push"))
+ Info->Action = PragmaAttributeInfo::Push;
+ else if (II->isStr("pop"))
+ Info->Action = PragmaAttributeInfo::Pop;
+ else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_invalid_argument)
+ << PP.getSpelling(Tok);
+ return;
+ }
+ PP.Lex(Tok);
+
+ // Parse the actual attribute.
+ if (Info->Action == PragmaAttributeInfo::Push) {
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
+ return;
+ }
+ PP.Lex(Tok);
+
+ // Lex the attribute tokens.
+ SmallVector<Token, 16> AttributeTokens;
+ int OpenParens = 1;
+ while (Tok.isNot(tok::eod)) {
+ if (Tok.is(tok::l_paren))
+ OpenParens++;
+ else if (Tok.is(tok::r_paren)) {
+ OpenParens--;
+ if (OpenParens == 0)
+ break;
+ }
+
+ AttributeTokens.push_back(Tok);
+ PP.Lex(Tok);
+ }
+
+ if (AttributeTokens.empty()) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_expected_attribute);
+ return;
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_expected) << tok::r_paren;
+ return;
+ }
+ SourceLocation EndLoc = Tok.getLocation();
+ PP.Lex(Tok);
+
+ // Terminate the attribute for parsing.
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(EndLoc);
+ AttributeTokens.push_back(EOFTok);
+
+ Info->Tokens =
+ llvm::makeArrayRef(AttributeTokens).copy(PP.getPreprocessorAllocator());
+ }
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang attribute";
+
+ // Generate the annotated pragma token.
+ auto TokenArray = llvm::make_unique<Token[]>(1);
+ TokenArray[0].startToken();
+ TokenArray[0].setKind(tok::annot_pragma_attribute);
+ TokenArray[0].setLocation(FirstToken.getLocation());
+ TokenArray[0].setAnnotationEndLoc(FirstToken.getLocation());
+ TokenArray[0].setAnnotationValue(static_cast<void *>(Info));
+ PP.EnterTokenStream(std::move(TokenArray), 1,
+ /*DisableMacroExpansion=*/false);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
index eaff9fe8eedf..7d78046d0684 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -382,6 +382,10 @@ Retry:
case tok::annot_pragma_dump:
HandlePragmaDump();
return StmtEmpty();
+
+ case tok::annot_pragma_attribute:
+ HandlePragmaAttribute();
+ return StmtEmpty();
}
// If we reached this code, the statement must end in a semicolon.
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
index aa8ed91d382f..edbfc636bc46 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -602,6 +602,10 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
ConsumeToken();
return false;
+ case tok::annot_pragma_attribute:
+ HandlePragmaAttribute();
+ return false;
+
case tok::eof:
// Late template parsing can begin.
if (getLangOpts().DelayedTemplateParsing)
@@ -847,6 +851,10 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
default:
dont_know:
+ if (Tok.isEditorPlaceholder()) {
+ ConsumeToken();
+ return nullptr;
+ }
// We can't tell whether this is a function-definition or declaration yet.
return ParseDeclarationOrFunctionDefinition(attrs, DS);
}
@@ -1675,6 +1683,8 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
return false;
}
}
+ if (Tok.isEditorPlaceholder())
+ return true;
Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
return true;
diff --git a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
index 55e9601bf5e5..724db456785f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/SemaInternal.h"
@@ -160,12 +161,16 @@ struct ParsedAttrInfo {
unsigned IsType : 1;
unsigned IsStmt : 1;
unsigned IsKnownToGCC : 1;
+ unsigned IsSupportedByPragmaAttribute : 1;
bool (*DiagAppertainsToDecl)(Sema &S, const AttributeList &Attr,
const Decl *);
bool (*DiagLangOpts)(Sema &S, const AttributeList &Attr);
bool (*ExistsInTarget)(const TargetInfo &Target);
unsigned (*SpellingIndexToSemanticSpelling)(const AttributeList &Attr);
+ void (*GetPragmaAttributeMatchRules)(
+ llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
+ const LangOptions &LangOpts);
};
namespace {
@@ -192,6 +197,18 @@ bool AttributeList::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
}
+bool AttributeList::appliesToDecl(const Decl *D,
+ attr::SubjectMatchRule MatchRule) const {
+ return checkAttributeMatchRuleAppliesTo(D, MatchRule);
+}
+
+void AttributeList::getMatchRules(
+ const LangOptions &LangOpts,
+ SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &MatchRules)
+ const {
+ return getInfo(*this).GetPragmaAttributeMatchRules(MatchRules, LangOpts);
+}
+
bool AttributeList::diagnoseLangOpts(Sema &S) const {
return getInfo(*this).DiagLangOpts(S, *this);
}
@@ -216,6 +233,10 @@ bool AttributeList::isKnownToGCC() const {
return getInfo(*this).IsKnownToGCC;
}
+bool AttributeList::isSupportedByPragmaAttribute() const {
+ return getInfo(*this).IsSupportedByPragmaAttribute;
+}
+
unsigned AttributeList::getSemanticSpelling() const {
return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
index 899d3fa83cc3..865aea9e2284 100644
--- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -287,6 +287,15 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
IndirectJumpTargets.push_back(cast<AddrLabelExpr>(S)->getLabel());
break;
+ case Stmt::ObjCForCollectionStmtClass: {
+ auto *CS = cast<ObjCForCollectionStmt>(S);
+ unsigned Diag = diag::note_protected_by_objc_fast_enumeration;
+ unsigned NewParentScope = Scopes.size();
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, S->getLocStart()));
+ BuildScopeInformation(CS->getBody(), NewParentScope);
+ return;
+ }
+
case Stmt::IndirectGotoStmtClass:
// "goto *&&lbl;" is a special case which we treat as equivalent
// to a normal goto. In addition, we don't calculate scope in the
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 294b56059b33..950f04088822 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -71,42 +71,35 @@ void Sema::ActOnTranslationUnitScope(Scope *S) {
}
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
- TranslationUnitKind TUKind,
- CodeCompleteConsumer *CodeCompleter)
- : ExternalSource(nullptr),
- isMultiplexExternalSource(false), FPFeatures(pp.getLangOpts()),
- LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
- Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
- CollectStats(false), CodeCompleter(CodeCompleter),
- CurContext(nullptr), OriginalLexicalContext(nullptr),
- MSStructPragmaOn(false),
- MSPointerToMemberRepresentationMethod(
- LangOpts.getMSPointerToMemberRepresentationMethod()),
- VtorDispStack(MSVtorDispAttr::Mode(LangOpts.VtorDispMode)),
- PackStack(0), DataSegStack(nullptr), BSSSegStack(nullptr),
- ConstSegStack(nullptr), CodeSegStack(nullptr), CurInitSeg(nullptr),
- VisContext(nullptr),
- IsBuildingRecoveryCallExpr(false),
- Cleanup{}, LateTemplateParser(nullptr),
- LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
- StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
- CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr),
- NSNumberDecl(nullptr), NSValueDecl(nullptr),
- NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
- ValueWithBytesObjCTypeMethod(nullptr),
- NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr),
- NSDictionaryDecl(nullptr), DictionaryWithObjectsMethod(nullptr),
- GlobalNewDeleteDeclared(false),
- TUKind(TUKind),
- NumSFINAEErrors(0),
- CachedFakeTopLevelModule(nullptr),
- AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
- NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
- CurrentInstantiationScope(nullptr), DisableTypoCorrection(false),
- TyposCorrected(0), AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
- VarDataSharingAttributesStack(nullptr), CurScope(nullptr),
- Ident_super(nullptr), Ident___float128(nullptr)
-{
+ TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
+ : ExternalSource(nullptr), isMultiplexExternalSource(false),
+ FPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
+ Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
+ SourceMgr(PP.getSourceManager()), CollectStats(false),
+ CodeCompleter(CodeCompleter), CurContext(nullptr),
+ OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
+ MSPointerToMemberRepresentationMethod(
+ LangOpts.getMSPointerToMemberRepresentationMethod()),
+ VtorDispStack(MSVtorDispAttr::Mode(LangOpts.VtorDispMode)), PackStack(0),
+ DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
+ CodeSegStack(nullptr), CurInitSeg(nullptr), VisContext(nullptr),
+ PragmaAttributeCurrentTargetDecl(nullptr),
+ IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
+ LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
+ StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
+ CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr), NSNumberDecl(nullptr),
+ NSValueDecl(nullptr), NSStringDecl(nullptr),
+ StringWithUTF8StringMethod(nullptr),
+ ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
+ ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
+ DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
+ TUKind(TUKind), NumSFINAEErrors(0), CachedFakeTopLevelModule(nullptr),
+ AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
+ NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
+ CurrentInstantiationScope(nullptr), DisableTypoCorrection(false),
+ TyposCorrected(0), AnalysisWarnings(*this),
+ ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
+ CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
TUScope = nullptr;
LoadedExternalKnownNamespaces = false;
@@ -731,6 +724,8 @@ void Sema::ActOnEndOfTranslationUnit() {
CheckDelayedMemberExceptionSpecs();
}
+ DiagnoseUnterminatedPragmaAttribute();
+
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
// FIXME: This is wrong for TUKind == TU_Prefix. In that case, we need to
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
index c6e3cc886316..76ca65373dda 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -368,6 +368,219 @@ void Sema::AddCFAuditedAttribute(Decl *D) {
D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Loc));
}
+namespace {
+
+Optional<attr::SubjectMatchRule>
+getParentAttrMatcherRule(attr::SubjectMatchRule Rule) {
+ using namespace attr;
+ switch (Rule) {
+ default:
+ return None;
+#define ATTR_MATCH_RULE(Value, Spelling, IsAbstract)
+#define ATTR_MATCH_SUB_RULE(Value, Spelling, IsAbstract, Parent, IsNegated) \
+ case Value: \
+ return Parent;
+#include "clang/Basic/AttrSubMatchRulesList.inc"
+ }
+}
+
+bool isNegatedAttrMatcherSubRule(attr::SubjectMatchRule Rule) {
+ using namespace attr;
+ switch (Rule) {
+ default:
+ return false;
+#define ATTR_MATCH_RULE(Value, Spelling, IsAbstract)
+#define ATTR_MATCH_SUB_RULE(Value, Spelling, IsAbstract, Parent, IsNegated) \
+ case Value: \
+ return IsNegated;
+#include "clang/Basic/AttrSubMatchRulesList.inc"
+ }
+}
+
+CharSourceRange replacementRangeForListElement(const Sema &S,
+ SourceRange Range) {
+ // Make sure that the ',' is removed as well.
+ SourceLocation AfterCommaLoc = Lexer::findLocationAfterToken(
+ Range.getEnd(), tok::comma, S.getSourceManager(), S.getLangOpts(),
+ /*SkipTrailingWhitespaceAndNewLine=*/false);
+ if (AfterCommaLoc.isValid())
+ return CharSourceRange::getCharRange(Range.getBegin(), AfterCommaLoc);
+ else
+ return CharSourceRange::getTokenRange(Range);
+}
+
+std::string
+attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ for (const auto &I : llvm::enumerate(Rules)) {
+ if (I.index())
+ OS << (I.index() == Rules.size() - 1 ? ", and " : ", ");
+ OS << "'" << attr::getSubjectMatchRuleSpelling(I.value()) << "'";
+ }
+ return OS.str();
+}
+
+} // end anonymous namespace
+
+void Sema::ActOnPragmaAttributePush(AttributeList &Attribute,
+ SourceLocation PragmaLoc,
+ attr::ParsedSubjectMatchRuleSet Rules) {
+ SmallVector<attr::SubjectMatchRule, 4> SubjectMatchRules;
+ // Gather the subject match rules that are supported by the attribute.
+ SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4>
+ StrictSubjectMatchRuleSet;
+ Attribute.getMatchRules(LangOpts, StrictSubjectMatchRuleSet);
+
+ // Figure out which subject matching rules are valid.
+ if (StrictSubjectMatchRuleSet.empty()) {
+ // Check for contradicting match rules. Contradicting match rules are
+ // either:
+ // - a top-level rule and one of its sub-rules. E.g. variable and
+ // variable(is_parameter).
+ // - a sub-rule and a sibling that's negated. E.g.
+ // variable(is_thread_local) and variable(unless(is_parameter))
+ llvm::SmallDenseMap<int, std::pair<int, SourceRange>, 2>
+ RulesToFirstSpecifiedNegatedSubRule;
+ for (const auto &Rule : Rules) {
+ attr::SubjectMatchRule MatchRule = attr::SubjectMatchRule(Rule.first);
+ Optional<attr::SubjectMatchRule> ParentRule =
+ getParentAttrMatcherRule(MatchRule);
+ if (!ParentRule)
+ continue;
+ auto It = Rules.find(*ParentRule);
+ if (It != Rules.end()) {
+ // A sub-rule contradicts a parent rule.
+ Diag(Rule.second.getBegin(),
+ diag::err_pragma_attribute_matcher_subrule_contradicts_rule)
+ << attr::getSubjectMatchRuleSpelling(MatchRule)
+ << attr::getSubjectMatchRuleSpelling(*ParentRule) << It->second
+ << FixItHint::CreateRemoval(
+ replacementRangeForListElement(*this, Rule.second));
+ // Keep going without removing this rule as it won't change the set of
+ // declarations that receive the attribute.
+ continue;
+ }
+ if (isNegatedAttrMatcherSubRule(MatchRule))
+ RulesToFirstSpecifiedNegatedSubRule.insert(
+ std::make_pair(*ParentRule, Rule));
+ }
+ bool IgnoreNegatedSubRules = false;
+ for (const auto &Rule : Rules) {
+ attr::SubjectMatchRule MatchRule = attr::SubjectMatchRule(Rule.first);
+ Optional<attr::SubjectMatchRule> ParentRule =
+ getParentAttrMatcherRule(MatchRule);
+ if (!ParentRule)
+ continue;
+ auto It = RulesToFirstSpecifiedNegatedSubRule.find(*ParentRule);
+ if (It != RulesToFirstSpecifiedNegatedSubRule.end() &&
+ It->second != Rule) {
+ // Negated sub-rule contradicts another sub-rule.
+ Diag(
+ It->second.second.getBegin(),
+ diag::
+ err_pragma_attribute_matcher_negated_subrule_contradicts_subrule)
+ << attr::getSubjectMatchRuleSpelling(
+ attr::SubjectMatchRule(It->second.first))
+ << attr::getSubjectMatchRuleSpelling(MatchRule) << Rule.second
+ << FixItHint::CreateRemoval(
+ replacementRangeForListElement(*this, It->second.second));
+ // Keep going but ignore all of the negated sub-rules.
+ IgnoreNegatedSubRules = true;
+ RulesToFirstSpecifiedNegatedSubRule.erase(It);
+ }
+ }
+
+ if (!IgnoreNegatedSubRules) {
+ for (const auto &Rule : Rules)
+ SubjectMatchRules.push_back(attr::SubjectMatchRule(Rule.first));
+ } else {
+ for (const auto &Rule : Rules) {
+ if (!isNegatedAttrMatcherSubRule(attr::SubjectMatchRule(Rule.first)))
+ SubjectMatchRules.push_back(attr::SubjectMatchRule(Rule.first));
+ }
+ }
+ Rules.clear();
+ } else {
+ for (const auto &Rule : StrictSubjectMatchRuleSet) {
+ if (Rules.erase(Rule.first)) {
+ // Add the rule to the set of attribute receivers only if it's supported
+ // in the current language mode.
+ if (Rule.second)
+ SubjectMatchRules.push_back(Rule.first);
+ }
+ }
+ }
+
+ if (!Rules.empty()) {
+ auto Diagnostic =
+ Diag(PragmaLoc, diag::err_pragma_attribute_invalid_matchers)
+ << Attribute.getName();
+ SmallVector<attr::SubjectMatchRule, 2> ExtraRules;
+ for (const auto &Rule : Rules) {
+ ExtraRules.push_back(attr::SubjectMatchRule(Rule.first));
+ Diagnostic << FixItHint::CreateRemoval(
+ replacementRangeForListElement(*this, Rule.second));
+ }
+ Diagnostic << attrMatcherRuleListToString(ExtraRules);
+ }
+
+ PragmaAttributeStack.push_back(
+ {PragmaLoc, &Attribute, std::move(SubjectMatchRules), /*IsUsed=*/false});
+}
+
+void Sema::ActOnPragmaAttributePop(SourceLocation PragmaLoc) {
+ if (PragmaAttributeStack.empty()) {
+ Diag(PragmaLoc, diag::err_pragma_attribute_stack_mismatch);
+ return;
+ }
+ const PragmaAttributeEntry &Entry = PragmaAttributeStack.back();
+ if (!Entry.IsUsed) {
+ assert(Entry.Attribute && "Expected an attribute");
+ Diag(Entry.Attribute->getLoc(), diag::warn_pragma_attribute_unused)
+ << Entry.Attribute->getName();
+ Diag(PragmaLoc, diag::note_pragma_attribute_region_ends_here);
+ }
+ PragmaAttributeStack.pop_back();
+}
+
+void Sema::AddPragmaAttributes(Scope *S, Decl *D) {
+ if (PragmaAttributeStack.empty())
+ return;
+ for (auto &Entry : PragmaAttributeStack) {
+ const AttributeList *Attribute = Entry.Attribute;
+ assert(Attribute && "Expected an attribute");
+
+ // Ensure that the attribute can be applied to the given declaration.
+ bool Applies = false;
+ for (const auto &Rule : Entry.MatchRules) {
+ if (Attribute->appliesToDecl(D, Rule)) {
+ Applies = true;
+ break;
+ }
+ }
+ if (!Applies)
+ continue;
+ Entry.IsUsed = true;
+ assert(!Attribute->getNext() && "Expected just one attribute");
+ PragmaAttributeCurrentTargetDecl = D;
+ ProcessDeclAttributeList(S, D, Attribute);
+ PragmaAttributeCurrentTargetDecl = nullptr;
+ }
+}
+
+void Sema::PrintPragmaAttributeInstantiationPoint() {
+ assert(PragmaAttributeCurrentTargetDecl && "Expected an active declaration");
+ Diags.Report(PragmaAttributeCurrentTargetDecl->getLocStart(),
+ diag::note_pragma_attribute_applied_decl_here);
+}
+
+void Sema::DiagnoseUnterminatedPragmaAttribute() {
+ if (PragmaAttributeStack.empty())
+ return;
+ Diag(PragmaAttributeStack.back().Loc, diag::err_pragma_attribute_no_pop_eof);
+}
+
void Sema::ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc) {
if(On)
OptimizeOffPragmaLocation = SourceLocation();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 57471de78d3e..6da4d2a26191 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -480,6 +480,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon,
bool OnlyNamespace) {
+ if (IdInfo.Identifier->isEditorPlaceholder())
+ return true;
LookupResult Found(*this, IdInfo.Identifier, IdInfo.IdentifierLoc,
OnlyNamespace ? LookupNamespaceName
: LookupNestedNameSpecifierName);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index 81db0d3d00a7..45523b30ef22 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -1391,8 +1391,6 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
}
bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- llvm::APSInt Result;
-
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -1439,8 +1437,6 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
- llvm::APSInt Result;
-
if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp
index 4a55e51495a8..c709a1a723d0 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCoroutine.cpp
@@ -454,7 +454,7 @@ static bool actOnCoroutineBodyStart(Sema &S, Scope *SC, SourceLocation KWLoc,
/*IsImplicit*/ true);
Suspend = S.ActOnFinishFullExpr(Suspend.get());
if (Suspend.isInvalid()) {
- S.Diag(Loc, diag::note_coroutine_promise_call_implicitly_required)
+ S.Diag(Loc, diag::note_coroutine_promise_suspend_implicitly_required)
<< ((Name == "initial_suspend") ? 0 : 1);
S.Diag(KWLoc, diag::note_declared_coroutine_here) << Keyword;
return StmtError();
@@ -660,6 +660,39 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
return Res;
}
+/// Look up the std::nothrow object.
+static Expr *buildStdNoThrowDeclRef(Sema &S, SourceLocation Loc) {
+ NamespaceDecl *Std = S.getStdNamespace();
+ assert(Std && "Should already be diagnosed");
+
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("nothrow"), Loc,
+ Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ // FIXME: <experimental/coroutine> should have been included already.
+ // If we require it to include <new> then this diagnostic is no longer
+ // needed.
+ S.Diag(Loc, diag::err_implicit_coroutine_std_nothrow_type_not_found);
+ return nullptr;
+ }
+
+ // FIXME: Mark the variable as ODR used. This currently does not work
+ // likely due to the scope at in which this function is called.
+ auto *VD = Result.getAsSingle<VarDecl>();
+ if (!VD) {
+ Result.suppressDiagnostics();
+ // We found something weird. Complain about the first thing we found.
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Found->getLocation(), diag::err_malformed_std_nothrow);
+ return nullptr;
+ }
+
+ ExprResult DR = S.BuildDeclRefExpr(VD, VD->getType(), VK_LValue, Loc);
+ if (DR.isInvalid())
+ return nullptr;
+
+ return DR.get();
+}
+
// Find an appropriate delete for the promise.
static FunctionDecl *findDeleteForPromise(Sema &S, SourceLocation Loc,
QualType PromiseType) {
@@ -847,23 +880,53 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
if (S.RequireCompleteType(Loc, PromiseType, diag::err_incomplete_type))
return false;
- // FIXME: Add nothrow_t placement arg for global alloc
- // if ReturnStmtOnAllocFailure != nullptr.
+ const bool RequiresNoThrowAlloc = ReturnStmtOnAllocFailure != nullptr;
+
// FIXME: Add support for stateful allocators.
FunctionDecl *OperatorNew = nullptr;
FunctionDecl *OperatorDelete = nullptr;
FunctionDecl *UnusedResult = nullptr;
bool PassAlignment = false;
+ SmallVector<Expr *, 1> PlacementArgs;
S.FindAllocationFunctions(Loc, SourceRange(),
/*UseGlobal*/ false, PromiseType,
- /*isArray*/ false, PassAlignment,
- /*PlacementArgs*/ None, OperatorNew, UnusedResult);
+ /*isArray*/ false, PassAlignment, PlacementArgs,
+ OperatorNew, UnusedResult);
+
+ bool IsGlobalOverload =
+ OperatorNew && !isa<CXXRecordDecl>(OperatorNew->getDeclContext());
+ // If we didn't find a class-local new declaration and non-throwing new
+ // was is required then we need to lookup the non-throwing global operator
+ // instead.
+ if (RequiresNoThrowAlloc && (!OperatorNew || IsGlobalOverload)) {
+ auto *StdNoThrow = buildStdNoThrowDeclRef(S, Loc);
+ if (!StdNoThrow)
+ return false;
+ PlacementArgs = {StdNoThrow};
+ OperatorNew = nullptr;
+ S.FindAllocationFunctions(Loc, SourceRange(),
+ /*UseGlobal*/ true, PromiseType,
+ /*isArray*/ false, PassAlignment, PlacementArgs,
+ OperatorNew, UnusedResult);
+ }
- OperatorDelete = findDeleteForPromise(S, Loc, PromiseType);
+ assert(OperatorNew && "expected definition of operator new to be found");
- if (!OperatorDelete || !OperatorNew)
+ if (RequiresNoThrowAlloc) {
+ const auto *FT = OperatorNew->getType()->getAs<FunctionProtoType>();
+ if (!FT->isNothrow(S.Context, /*ResultIfDependent*/ false)) {
+ S.Diag(OperatorNew->getLocation(),
+ diag::err_coroutine_promise_new_requires_nothrow)
+ << OperatorNew;
+ S.Diag(Loc, diag::note_coroutine_promise_call_implicitly_required)
+ << OperatorNew;
+ return false;
+ }
+ }
+
+ if ((OperatorDelete = findDeleteForPromise(S, Loc, PromiseType)) == nullptr)
return false;
Expr *FramePtr =
@@ -879,8 +942,13 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
if (NewRef.isInvalid())
return false;
+ SmallVector<Expr *, 2> NewArgs(1, FrameSize);
+ for (auto Arg : PlacementArgs)
+ NewArgs.push_back(Arg);
+
ExprResult NewExpr =
- S.ActOnCallExpr(S.getCurScope(), NewRef.get(), Loc, FrameSize, Loc);
+ S.ActOnCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
+ NewExpr = S.ActOnFinishFullExpr(NewExpr.get());
if (NewExpr.isInvalid())
return false;
@@ -906,6 +974,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
ExprResult DeleteExpr =
S.ActOnCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
+ DeleteExpr = S.ActOnFinishFullExpr(DeleteExpr.get());
if (DeleteExpr.isInvalid())
return false;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index c6a0b0101d37..f3ffcf5d696c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -628,6 +628,9 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates) {
+ // Don't report typename errors for editor placeholders.
+ if (II->isEditorPlaceholder())
+ return;
// We don't have anything to suggest (yet).
SuggestedType = nullptr;
@@ -13674,6 +13677,7 @@ CreateNewDecl:
if (Attr)
ProcessDeclAttributeList(S, New, Attr);
+ AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
if (TUK == TUK_Friend) {
@@ -15185,6 +15189,7 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
// Process attributes.
if (Attr) ProcessDeclAttributeList(S, New, Attr);
+ AddPragmaAttributes(S, New);
// Register this decl in the current scope stack.
New->setAccess(TheEnumDecl->getAccess());
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index a1ba9de368db..027b3fe0e782 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -2398,10 +2398,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
<< Platform->Ident;
NamedDecl *ND = dyn_cast<NamedDecl>(D);
- if (!ND) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ if (!ND) // We warned about this already, so just return.
return;
- }
AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
@@ -2511,12 +2509,6 @@ static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
assert(checkAttributeAtMostNumArgs(S, Attr, 3) &&
"Invalid number of arguments in an external_source_symbol attribute");
- if (!isa<NamedDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedNamedDecl;
- return;
- }
-
StringRef Language;
if (const auto *SE = dyn_cast_or_null<StringLiteral>(Attr.getArgAsExpr(0)))
Language = SE->getString();
@@ -5765,18 +5757,21 @@ static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const AttributeList &Attr) {
static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
const AttributeList &Attr) {
// Several attributes carry different semantics than the parsing requires, so
- // those are opted out of the common handling.
+ // those are opted out of the common argument checks.
//
// We also bail on unknown and ignored attributes because those are handled
// as part of the target-specific handling logic.
- if (Attr.hasCustomParsing() ||
- Attr.getKind() == AttributeList::UnknownAttribute)
+ if (Attr.getKind() == AttributeList::UnknownAttribute)
return false;
-
// Check whether the attribute requires specific language extensions to be
// enabled.
if (!Attr.diagnoseLangOpts(S))
return true;
+ // Check whether the attribute appertains to the given subject.
+ if (!Attr.diagnoseAppertainsTo(S, D))
+ return true;
+ if (Attr.hasCustomParsing())
+ return false;
if (Attr.getMinArgs() == Attr.getMaxArgs()) {
// If there are no optional arguments, then checking for the argument count
@@ -5793,10 +5788,6 @@ static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
return true;
}
- // Check whether the attribute appertains to the given subject.
- if (!Attr.diagnoseAppertainsTo(S, D))
- return true;
-
return false;
}
@@ -6676,6 +6667,9 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
// Finally, apply any attributes on the decl itself.
if (const AttributeList *Attrs = PD.getAttributes())
ProcessDeclAttributeList(S, D, Attrs);
+
+ // Apply additional attributes specified by '#pragma clang attribute'.
+ AddPragmaAttributes(S, D);
}
/// Is the given declaration allowed to use a forbidden type?
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
index fd3f266c9a08..b543a731641f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -8445,6 +8445,7 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
Namespc->setInvalidDecl();
ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
+ AddPragmaAttributes(DeclRegionScope, Namespc);
// FIXME: Should we be merging attributes?
if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
@@ -9931,6 +9932,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
NewTD->setInvalidDecl();
ProcessDeclAttributeList(S, NewTD, AttrList);
+ AddPragmaAttributes(S, NewTD);
CheckTypedefForVariablyModifiedType(S, NewTD);
Invalid |= NewTD->isInvalidDecl();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index e50f8b206779..4f51cd399c0c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -993,6 +993,7 @@ ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc,
if (AttrList)
ProcessDeclAttributeList(TUScope, IDecl, AttrList);
+ AddPragmaAttributes(TUScope, IDecl);
PushOnScopeChains(IDecl, TUScope);
// Start the definition of this class. If we're in a redefinition case, there
@@ -1176,7 +1177,8 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
if (AttrList)
ProcessDeclAttributeList(TUScope, PDecl, AttrList);
-
+ AddPragmaAttributes(TUScope, PDecl);
+
// Merge attributes from previous declarations.
if (PrevDecl)
mergeDeclAttributes(PDecl, PrevDecl);
@@ -1706,7 +1708,8 @@ Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
if (attrList)
ProcessDeclAttributeList(TUScope, PDecl, attrList);
-
+ AddPragmaAttributes(TUScope, PDecl);
+
if (PrevDecl)
mergeDeclAttributes(PDecl, PrevDecl);
@@ -1805,6 +1808,7 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
if (AttrList)
ProcessDeclAttributeList(TUScope, CDecl, AttrList);
+ AddPragmaAttributes(TUScope, CDecl);
CheckObjCDeclScope(CDecl);
return ActOnObjCContainerStartDefinition(CDecl);
@@ -1954,6 +1958,7 @@ Decl *Sema::ActOnStartClassImplementation(
ClassName, /*typeParamList=*/nullptr,
/*PrevDecl=*/nullptr, ClassLoc,
true);
+ AddPragmaAttributes(TUScope, IDecl);
IDecl->startDefinition();
if (SDecl) {
IDecl->setSuperClass(Context.getTrivialTypeSourceInfo(
@@ -3043,7 +3048,7 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
ClassName, TypeParams, PrevIDecl,
IdentLocs[i]);
IDecl->setAtEndRange(IdentLocs[i]);
-
+
PushOnScopeChains(IDecl, TUScope);
CheckObjCDeclScope(IDecl);
DeclsInGroup.push_back(IDecl);
@@ -4399,6 +4404,7 @@ Decl *Sema::ActOnMethodDeclaration(
// Apply the attributes to the parameter.
ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs);
+ AddPragmaAttributes(TUScope, Param);
if (Param->hasAttr<BlocksAttr>()) {
Diag(Param->getLocation(), diag::err_block_on_nonlocal);
@@ -4429,6 +4435,7 @@ Decl *Sema::ActOnMethodDeclaration(
if (AttrList)
ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList);
+ AddPragmaAttributes(TUScope, ObjCMethod);
// Add the method now.
const ObjCMethodDecl *PrevMethod = nullptr;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index bb174521c72c..5a56f7093777 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -2129,6 +2129,12 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *II = Name.getAsIdentifierInfo();
SourceLocation NameLoc = NameInfo.getLoc();
+ if (II && II->isEditorPlaceholder()) {
+ // FIXME: When typed placeholders are supported we can create a typed
+ // placeholder expression node.
+ return ExprError();
+ }
+
// C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
// -- an identifier that was declared with a dependent type,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp
index 616ab05eaec8..d7a6d2e5885e 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp
@@ -4068,7 +4068,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
// Build variables passed into runtime, necessary for worksharing directives.
- ExprResult LB, UB, IL, ST, EUB, PrevLB, PrevUB;
+ ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind)) {
// Lower bound variable, initialized with zero.
@@ -4116,8 +4116,32 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// enclosing region. E.g. in 'distribute parallel for' the bounds obtained
// by scheduling 'distribute' have to be passed to the schedule of 'for'.
if (isOpenMPLoopBoundSharingDirective(DKind)) {
- auto *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
+ // Lower bound variable, initialized with zero.
+ VarDecl *CombLBDecl =
+ buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.lb");
+ CombLB = buildDeclRefExpr(SemaRef, CombLBDecl, VType, InitLoc);
+ SemaRef.AddInitializerToDecl(
+ CombLBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(),
+ /*DirectInit*/ false);
+
+ // Upper bound variable, initialized with last iteration number.
+ VarDecl *CombUBDecl =
+ buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.ub");
+ CombUB = buildDeclRefExpr(SemaRef, CombUBDecl, VType, InitLoc);
+ SemaRef.AddInitializerToDecl(CombUBDecl, LastIteration.get(),
+ /*DirectInit*/ false);
+
+ ExprResult CombIsUBGreater = SemaRef.BuildBinOp(
+ CurScope, InitLoc, BO_GT, CombUB.get(), LastIteration.get());
+ ExprResult CombCondOp =
+ SemaRef.ActOnConditionalOp(InitLoc, InitLoc, CombIsUBGreater.get(),
+ LastIteration.get(), CombUB.get());
+ CombEUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, CombUB.get(),
+ CombCondOp.get());
+ CombEUB = SemaRef.ActOnFinishFullExpr(CombEUB.get());
+
+ auto *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
// We expect to have at least 2 more parameters than the 'parallel'
// directive does - the lower and upper bounds of the previous schedule.
assert(CD->getNumParams() >= 4 &&
@@ -4139,7 +4163,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Build the iteration variable and its initialization before loop.
ExprResult IV;
- ExprResult Init;
+ ExprResult Init, CombInit;
{
VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
@@ -4150,6 +4174,18 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
: SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
Init = SemaRef.ActOnFinishFullExpr(Init.get());
+
+ if (isOpenMPLoopBoundSharingDirective(DKind)) {
+ Expr *CombRHS =
+ (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind))
+ ? CombLB.get()
+ : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
+ CombInit =
+ SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), CombRHS);
+ CombInit = SemaRef.ActOnFinishFullExpr(CombInit.get());
+ }
}
// Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
@@ -4160,7 +4196,11 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
: SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
NumIterations.get());
-
+ ExprResult CombCond;
+ if (isOpenMPLoopBoundSharingDirective(DKind)) {
+ CombCond =
+ SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
+ }
// Loop increment (IV = IV + 1)
SourceLocation IncLoc;
ExprResult Inc =
@@ -4175,7 +4215,9 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
// Used for directives with static scheduling.
- ExprResult NextLB, NextUB;
+ // In combined construct, add combined version that use CombLB and CombUB
+ // base variables for the update
+ ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind)) {
// LB + ST
@@ -4198,9 +4240,32 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
if (!NextUB.isUsable())
return 0;
- }
-
- // Create: increment expression for distribute loop when combined in a same
+ if (isOpenMPLoopBoundSharingDirective(DKind)) {
+ CombNextLB =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombLB.get(), ST.get());
+ if (!NextLB.isUsable())
+ return 0;
+ // LB = LB + ST
+ CombNextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombLB.get(),
+ CombNextLB.get());
+ CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get());
+ if (!CombNextLB.isUsable())
+ return 0;
+ // UB + ST
+ CombNextUB =
+ SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, CombUB.get(), ST.get());
+ if (!CombNextUB.isUsable())
+ return 0;
+ // UB = UB + ST
+ CombNextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombUB.get(),
+ CombNextUB.get());
+ CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get());
+ if (!CombNextUB.isUsable())
+ return 0;
+ }
+ }
+
+ // Create increment expression for distribute loop when combined in a same
// directive with for as IV = IV + ST; ensure upper bound expression based
// on PrevUB instead of NumIterations - used to implement 'for' when found
// in combination with 'distribute', like in 'distribute parallel for'
@@ -4346,6 +4411,13 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.PrevUB = PrevUB.get();
Built.DistInc = DistInc.get();
Built.PrevEUB = PrevEUB.get();
+ Built.DistCombinedFields.LB = CombLB.get();
+ Built.DistCombinedFields.UB = CombUB.get();
+ Built.DistCombinedFields.EUB = CombEUB.get();
+ Built.DistCombinedFields.Init = CombInit.get();
+ Built.DistCombinedFields.Cond = CombCond.get();
+ Built.DistCombinedFields.NLB = CombNextLB.get();
+ Built.DistCombinedFields.NUB = CombNextUB.get();
Expr *CounterVal = SemaRef.DefaultLvalueConversion(IV.get()).get();
// Fill data for doacross depend clauses.
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 9be1c56f0622..9ffc23b5adba 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -1783,6 +1783,7 @@ StmtResult
Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc) {
+ getCurFunction()->setHasBranchProtectedScope();
ExprResult CollectionExprResult =
CheckObjCForCollectionOperand(ForLoc, collection);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
index f522e76b0673..61b4df40964c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -1636,11 +1636,22 @@ private:
transformFunctionTypeParam(ParmVarDecl *OldParam,
MultiLevelTemplateArgumentList &Args) {
TypeSourceInfo *OldDI = OldParam->getTypeSourceInfo();
- TypeSourceInfo *NewDI =
- Args.getNumLevels()
- ? SemaRef.SubstType(OldDI, Args, OldParam->getLocation(),
- OldParam->getDeclName())
- : OldDI;
+ TypeSourceInfo *NewDI;
+ if (!Args.getNumLevels())
+ NewDI = OldDI;
+ else if (auto PackTL = OldDI->getTypeLoc().getAs<PackExpansionTypeLoc>()) {
+ // Expand out the one and only element in each inner pack.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, 0);
+ NewDI =
+ SemaRef.SubstType(PackTL.getPatternLoc(), Args,
+ OldParam->getLocation(), OldParam->getDeclName());
+ if (!NewDI) return nullptr;
+ NewDI =
+ SemaRef.CheckPackExpansion(NewDI, PackTL.getEllipsisLoc(),
+ PackTL.getTypePtr()->getNumExpansions());
+ } else
+ NewDI = SemaRef.SubstType(OldDI, Args, OldParam->getLocation(),
+ OldParam->getDeclName());
if (!NewDI)
return nullptr;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
index edd6edfce9dc..2d44489023ef 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -2605,10 +2605,11 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
== TSK_ExplicitSpecialization)
continue;
- if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ if ((Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment()) &&
TSK == TSK_ExplicitInstantiationDeclaration) {
- // In MSVC mode, explicit instantiation decl of the outer class doesn't
- // affect the inner class.
+ // In MSVC and Windows Itanium mode, explicit instantiation decl of the
+ // outer class doesn't affect the inner class.
continue;
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
index a12fb8cf95a0..3d314a85ff17 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2578,6 +2578,13 @@ void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
D->setPrevUpperBoundVariable(Record.readSubExpr());
D->setDistInc(Record.readSubExpr());
D->setPrevEnsureUpperBound(Record.readSubExpr());
+ D->setCombinedLowerBoundVariable(Record.readSubExpr());
+ D->setCombinedUpperBoundVariable(Record.readSubExpr());
+ D->setCombinedEnsureUpperBound(Record.readSubExpr());
+ D->setCombinedInit(Record.readSubExpr());
+ D->setCombinedCond(Record.readSubExpr());
+ D->setCombinedNextLowerBound(Record.readSubExpr());
+ D->setCombinedNextUpperBound(Record.readSubExpr());
}
SmallVector<Expr *, 4> Sub;
unsigned CollapsedNum = D->getCollapsedNumber();
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
index 1a2edac65886..90a732e575e2 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -2248,6 +2248,13 @@ void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
Record.AddStmt(D->getPrevUpperBoundVariable());
Record.AddStmt(D->getDistInc());
Record.AddStmt(D->getPrevEnsureUpperBound());
+ Record.AddStmt(D->getCombinedLowerBoundVariable());
+ Record.AddStmt(D->getCombinedUpperBoundVariable());
+ Record.AddStmt(D->getCombinedEnsureUpperBound());
+ Record.AddStmt(D->getCombinedInit());
+ Record.AddStmt(D->getCombinedCond());
+ Record.AddStmt(D->getCombinedNextLowerBound());
+ Record.AddStmt(D->getCombinedNextUpperBound());
}
for (auto I : D->counters()) {
Record.AddStmt(I);
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
index 2fa8edb81ae4..33d957658cf0 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -506,12 +506,12 @@ int cc1as_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
// FIXME: Remove this, one day.
if (!Asm.LLVMArgs.empty()) {
unsigned NumArgs = Asm.LLVMArgs.size();
- const char **Args = new const char*[NumArgs + 2];
+ auto Args = llvm::make_unique<const char*[]>(NumArgs + 2);
Args[0] = "clang (LLVM option parsing)";
for (unsigned i = 0; i != NumArgs; ++i)
Args[i + 1] = Asm.LLVMArgs[i].c_str();
Args[NumArgs + 1] = nullptr;
- llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args);
+ llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args.get());
}
// Execute the invocation, unless there were parsing errors.
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
index 8aaa28beaac2..981445675343 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -12,13 +12,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Error.h"
@@ -1522,6 +1524,408 @@ static void emitClangAttrLateParsedList(RecordKeeper &Records, raw_ostream &OS)
OS << "#endif // CLANG_ATTR_LATE_PARSED_LIST\n\n";
}
+static bool hasGNUorCXX11Spelling(const Record &Attribute) {
+ std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(Attribute);
+ for (const auto &I : Spellings) {
+ if (I.variety() == "GNU" || I.variety() == "CXX11")
+ return true;
+ }
+ return false;
+}
+
+namespace {
+
+struct AttributeSubjectMatchRule {
+ const Record *MetaSubject;
+ const Record *Constraint;
+
+ AttributeSubjectMatchRule(const Record *MetaSubject, const Record *Constraint)
+ : MetaSubject(MetaSubject), Constraint(Constraint) {
+ assert(MetaSubject && "Missing subject");
+ }
+
+ bool isSubRule() const { return Constraint != nullptr; }
+
+ std::vector<Record *> getSubjects() const {
+ return (Constraint ? Constraint : MetaSubject)
+ ->getValueAsListOfDefs("Subjects");
+ }
+
+ std::vector<Record *> getLangOpts() const {
+ if (Constraint) {
+ // Lookup the options in the sub-rule first, in case the sub-rule
+ // overrides the rules options.
+ std::vector<Record *> Opts = Constraint->getValueAsListOfDefs("LangOpts");
+ if (!Opts.empty())
+ return Opts;
+ }
+ return MetaSubject->getValueAsListOfDefs("LangOpts");
+ }
+
+ // Abstract rules are used only for sub-rules
+ bool isAbstractRule() const { return getSubjects().empty(); }
+
+ std::string getName() const {
+ return (Constraint ? Constraint : MetaSubject)->getValueAsString("Name");
+ }
+
+ bool isNegatedSubRule() const {
+ assert(isSubRule() && "Not a sub-rule");
+ return Constraint->getValueAsBit("Negated");
+ }
+
+ std::string getSpelling() const {
+ std::string Result = MetaSubject->getValueAsString("Name");
+ if (isSubRule()) {
+ Result += '(';
+ if (isNegatedSubRule())
+ Result += "unless(";
+ Result += getName();
+ if (isNegatedSubRule())
+ Result += ')';
+ Result += ')';
+ }
+ return Result;
+ }
+
+ std::string getEnumValueName() const {
+ std::string Result =
+ "SubjectMatchRule_" + MetaSubject->getValueAsString("Name");
+ if (isSubRule()) {
+ Result += "_";
+ if (isNegatedSubRule())
+ Result += "not_";
+ Result += Constraint->getValueAsString("Name");
+ }
+ if (isAbstractRule())
+ Result += "_abstract";
+ return Result;
+ }
+
+ std::string getEnumValue() const { return "attr::" + getEnumValueName(); }
+
+ static const char *EnumName;
+};
+
+const char *AttributeSubjectMatchRule::EnumName = "attr::SubjectMatchRule";
+
+struct PragmaClangAttributeSupport {
+ std::vector<AttributeSubjectMatchRule> Rules;
+
+ class RuleOrAggregateRuleSet {
+ std::vector<AttributeSubjectMatchRule> Rules;
+ bool IsRule;
+ RuleOrAggregateRuleSet(ArrayRef<AttributeSubjectMatchRule> Rules,
+ bool IsRule)
+ : Rules(Rules), IsRule(IsRule) {}
+
+ public:
+ bool isRule() const { return IsRule; }
+
+ const AttributeSubjectMatchRule &getRule() const {
+ assert(IsRule && "not a rule!");
+ return Rules[0];
+ }
+
+ ArrayRef<AttributeSubjectMatchRule> getAggregateRuleSet() const {
+ return Rules;
+ }
+
+ static RuleOrAggregateRuleSet
+ getRule(const AttributeSubjectMatchRule &Rule) {
+ return RuleOrAggregateRuleSet(Rule, /*IsRule=*/true);
+ }
+ static RuleOrAggregateRuleSet
+ getAggregateRuleSet(ArrayRef<AttributeSubjectMatchRule> Rules) {
+ return RuleOrAggregateRuleSet(Rules, /*IsRule=*/false);
+ }
+ };
+ llvm::DenseMap<const Record *, RuleOrAggregateRuleSet> SubjectsToRules;
+
+ PragmaClangAttributeSupport(RecordKeeper &Records);
+
+ bool isAttributedSupported(const Record &Attribute);
+
+ void emitMatchRuleList(raw_ostream &OS);
+
+ std::string generateStrictConformsTo(const Record &Attr, raw_ostream &OS);
+
+ void generateParsingHelpers(raw_ostream &OS);
+};
+
+} // end anonymous namespace
+
+static bool doesDeclDeriveFrom(const Record *D, const Record *Base) {
+ const Record *CurrentBase = D->getValueAsDef("Base");
+ if (!CurrentBase)
+ return false;
+ if (CurrentBase == Base)
+ return true;
+ return doesDeclDeriveFrom(CurrentBase, Base);
+}
+
+PragmaClangAttributeSupport::PragmaClangAttributeSupport(
+ RecordKeeper &Records) {
+ std::vector<Record *> MetaSubjects =
+ Records.getAllDerivedDefinitions("AttrSubjectMatcherRule");
+ auto MapFromSubjectsToRules = [this](const Record *SubjectContainer,
+ const Record *MetaSubject,
+ const Record *Constraint = nullptr) {
+ Rules.emplace_back(MetaSubject, Constraint);
+ std::vector<Record *> ApplicableSubjects =
+ SubjectContainer->getValueAsListOfDefs("Subjects");
+ for (const auto *Subject : ApplicableSubjects) {
+ bool Inserted =
+ SubjectsToRules
+ .try_emplace(Subject, RuleOrAggregateRuleSet::getRule(
+ AttributeSubjectMatchRule(MetaSubject,
+ Constraint)))
+ .second;
+ if (!Inserted) {
+ PrintFatalError("Attribute subject match rules should not represent"
+ "same attribute subjects.");
+ }
+ }
+ };
+ for (const auto *MetaSubject : MetaSubjects) {
+ MapFromSubjectsToRules(MetaSubject, MetaSubject);
+ std::vector<Record *> Constraints =
+ MetaSubject->getValueAsListOfDefs("Constraints");
+ for (const auto *Constraint : Constraints)
+ MapFromSubjectsToRules(Constraint, MetaSubject, Constraint);
+ }
+
+ std::vector<Record *> Aggregates =
+ Records.getAllDerivedDefinitions("AttrSubjectMatcherAggregateRule");
+ std::vector<Record *> DeclNodes = Records.getAllDerivedDefinitions("DDecl");
+ for (const auto *Aggregate : Aggregates) {
+ Record *SubjectDecl = Aggregate->getValueAsDef("Subject");
+
+ // Gather sub-classes of the aggregate subject that act as attribute
+ // subject rules.
+ std::vector<AttributeSubjectMatchRule> Rules;
+ for (const auto *D : DeclNodes) {
+ if (doesDeclDeriveFrom(D, SubjectDecl)) {
+ auto It = SubjectsToRules.find(D);
+ if (It == SubjectsToRules.end())
+ continue;
+ if (!It->second.isRule() || It->second.getRule().isSubRule())
+ continue; // Assume that the rule will be included as well.
+ Rules.push_back(It->second.getRule());
+ }
+ }
+
+ bool Inserted =
+ SubjectsToRules
+ .try_emplace(SubjectDecl,
+ RuleOrAggregateRuleSet::getAggregateRuleSet(Rules))
+ .second;
+ if (!Inserted) {
+ PrintFatalError("Attribute subject match rules should not represent"
+ "same attribute subjects.");
+ }
+ }
+}
+
+static PragmaClangAttributeSupport &
+getPragmaAttributeSupport(RecordKeeper &Records) {
+ static PragmaClangAttributeSupport Instance(Records);
+ return Instance;
+}
+
+void PragmaClangAttributeSupport::emitMatchRuleList(raw_ostream &OS) {
+ OS << "#ifndef ATTR_MATCH_SUB_RULE\n";
+ OS << "#define ATTR_MATCH_SUB_RULE(Value, Spelling, IsAbstract, Parent, "
+ "IsNegated) "
+ << "ATTR_MATCH_RULE(Value, Spelling, IsAbstract)\n";
+ OS << "#endif\n";
+ for (const auto &Rule : Rules) {
+ OS << (Rule.isSubRule() ? "ATTR_MATCH_SUB_RULE" : "ATTR_MATCH_RULE") << '(';
+ OS << Rule.getEnumValueName() << ", \"" << Rule.getSpelling() << "\", "
+ << Rule.isAbstractRule();
+ if (Rule.isSubRule())
+ OS << ", "
+ << AttributeSubjectMatchRule(Rule.MetaSubject, nullptr).getEnumValue()
+ << ", " << Rule.isNegatedSubRule();
+ OS << ")\n";
+ }
+ OS << "#undef ATTR_MATCH_SUB_RULE\n";
+}
+
+bool PragmaClangAttributeSupport::isAttributedSupported(
+ const Record &Attribute) {
+ if (Attribute.getValueAsBit("ForcePragmaAttributeSupport"))
+ return true;
+ // Opt-out rules:
+ // FIXME: The documentation check should be moved before
+ // the ForcePragmaAttributeSupport check after annotate is documented.
+ // No documentation present.
+ if (Attribute.isValueUnset("Documentation"))
+ return false;
+ std::vector<Record *> Docs = Attribute.getValueAsListOfDefs("Documentation");
+ if (Docs.empty())
+ return false;
+ if (Docs.size() == 1 && Docs[0]->getName() == "Undocumented")
+ return false;
+ // An attribute requires delayed parsing (LateParsed is on)
+ if (Attribute.getValueAsBit("LateParsed"))
+ return false;
+ // An attribute has no GNU/CXX11 spelling
+ if (!hasGNUorCXX11Spelling(Attribute))
+ return false;
+ // An attribute subject list has a subject that isn't covered by one of the
+ // subject match rules or has no subjects at all.
+ if (Attribute.isValueUnset("Subjects"))
+ return false;
+ const Record *SubjectObj = Attribute.getValueAsDef("Subjects");
+ std::vector<Record *> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
+ if (Subjects.empty())
+ return false;
+ for (const auto *Subject : Subjects) {
+ if (SubjectsToRules.find(Subject) == SubjectsToRules.end())
+ return false;
+ }
+ return true;
+}
+
+std::string
+PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
+ raw_ostream &OS) {
+ if (!isAttributedSupported(Attr))
+ return "nullptr";
+ // Generate a function that constructs a set of matching rules that describe
+ // to which declarations the attribute should apply to.
+ std::string FnName = "matchRulesFor" + Attr.getName().str();
+ std::stringstream SS;
+ SS << "static void " << FnName << "(llvm::SmallVectorImpl<std::pair<"
+ << AttributeSubjectMatchRule::EnumName
+ << ", bool>> &MatchRules, const LangOptions &LangOpts) {\n";
+ if (Attr.isValueUnset("Subjects")) {
+ SS << "}\n\n";
+ OS << SS.str();
+ return FnName;
+ }
+ const Record *SubjectObj = Attr.getValueAsDef("Subjects");
+ std::vector<Record *> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
+ for (const auto *Subject : Subjects) {
+ auto It = SubjectsToRules.find(Subject);
+ assert(It != SubjectsToRules.end() &&
+ "This attribute is unsupported by #pragma clang attribute");
+ for (const auto &Rule : It->getSecond().getAggregateRuleSet()) {
+ // The rule might be language specific, so only subtract it from the given
+ // rules if the specific language options are specified.
+ std::vector<Record *> LangOpts = Rule.getLangOpts();
+ SS << " MatchRules.push_back(std::make_pair(" << Rule.getEnumValue()
+ << ", /*IsSupported=*/";
+ if (!LangOpts.empty()) {
+ for (auto I = LangOpts.begin(), E = LangOpts.end(); I != E; ++I) {
+ std::string Part = (*I)->getValueAsString("Name");
+ if ((*I)->getValueAsBit("Negated"))
+ SS << "!";
+ SS << "LangOpts." + Part;
+ if (I + 1 != E)
+ SS << " || ";
+ }
+ } else
+ SS << "true";
+ SS << "));\n";
+ }
+ }
+ SS << "}\n\n";
+ OS << SS.str();
+ return FnName;
+}
+
+void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) {
+ // Generate routines that check the names of sub-rules.
+ OS << "Optional<attr::SubjectMatchRule> "
+ "defaultIsAttributeSubjectMatchSubRuleFor(StringRef, bool) {\n";
+ OS << " return None;\n";
+ OS << "}\n\n";
+
+ std::map<const Record *, std::vector<AttributeSubjectMatchRule>>
+ SubMatchRules;
+ for (const auto &Rule : Rules) {
+ if (!Rule.isSubRule())
+ continue;
+ SubMatchRules[Rule.MetaSubject].push_back(Rule);
+ }
+
+ for (const auto &SubMatchRule : SubMatchRules) {
+ OS << "Optional<attr::SubjectMatchRule> isAttributeSubjectMatchSubRuleFor_"
+ << SubMatchRule.first->getValueAsString("Name")
+ << "(StringRef Name, bool IsUnless) {\n";
+ OS << " if (IsUnless)\n";
+ OS << " return "
+ "llvm::StringSwitch<Optional<attr::SubjectMatchRule>>(Name).\n";
+ for (const auto &Rule : SubMatchRule.second) {
+ if (Rule.isNegatedSubRule())
+ OS << " Case(\"" << Rule.getName() << "\", " << Rule.getEnumValue()
+ << ").\n";
+ }
+ OS << " Default(None);\n";
+ OS << " return "
+ "llvm::StringSwitch<Optional<attr::SubjectMatchRule>>(Name).\n";
+ for (const auto &Rule : SubMatchRule.second) {
+ if (!Rule.isNegatedSubRule())
+ OS << " Case(\"" << Rule.getName() << "\", " << Rule.getEnumValue()
+ << ").\n";
+ }
+ OS << " Default(None);\n";
+ OS << "}\n\n";
+ }
+
+ // Generate the function that checks for the top-level rules.
+ OS << "std::pair<Optional<attr::SubjectMatchRule>, "
+ "Optional<attr::SubjectMatchRule> (*)(StringRef, "
+ "bool)> isAttributeSubjectMatchRule(StringRef Name) {\n";
+ OS << " return "
+ "llvm::StringSwitch<std::pair<Optional<attr::SubjectMatchRule>, "
+ "Optional<attr::SubjectMatchRule> (*) (StringRef, "
+ "bool)>>(Name).\n";
+ for (const auto &Rule : Rules) {
+ if (Rule.isSubRule())
+ continue;
+ std::string SubRuleFunction;
+ if (SubMatchRules.count(Rule.MetaSubject))
+ SubRuleFunction = "isAttributeSubjectMatchSubRuleFor_" + Rule.getName();
+ else
+ SubRuleFunction = "defaultIsAttributeSubjectMatchSubRuleFor";
+ OS << " Case(\"" << Rule.getName() << "\", std::make_pair("
+ << Rule.getEnumValue() << ", " << SubRuleFunction << ")).\n";
+ }
+ OS << " Default(std::make_pair(None, "
+ "defaultIsAttributeSubjectMatchSubRuleFor));\n";
+ OS << "}\n\n";
+
+ // Generate the function that checks for the submatch rules.
+ OS << "const char *validAttributeSubjectMatchSubRules("
+ << AttributeSubjectMatchRule::EnumName << " Rule) {\n";
+ OS << " switch (Rule) {\n";
+ for (const auto &SubMatchRule : SubMatchRules) {
+ OS << " case "
+ << AttributeSubjectMatchRule(SubMatchRule.first, nullptr).getEnumValue()
+ << ":\n";
+ OS << " return \"'";
+ bool IsFirst = true;
+ for (const auto &Rule : SubMatchRule.second) {
+ if (!IsFirst)
+ OS << ", '";
+ IsFirst = false;
+ if (Rule.isNegatedSubRule())
+ OS << "unless(";
+ OS << Rule.getName();
+ if (Rule.isNegatedSubRule())
+ OS << ')';
+ OS << "'";
+ }
+ OS << "\";\n";
+ }
+ OS << " default: return nullptr;\n";
+ OS << " }\n";
+ OS << "}\n\n";
+}
+
template <typename Fn>
static void forEachUniqueSpelling(const Record &Attr, Fn &&F) {
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(Attr);
@@ -2109,6 +2513,17 @@ void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) {
OS << "#undef PRAGMA_SPELLING_ATTR\n";
}
+// Emits the enumeration list for attributes.
+void EmitClangAttrSubjectMatchRuleList(RecordKeeper &Records, raw_ostream &OS) {
+ emitSourceFileHeader(
+ "List of all attribute subject matching rules that Clang recognizes", OS);
+ PragmaClangAttributeSupport &PragmaAttributeSupport =
+ getPragmaAttributeSupport(Records);
+ emitDefaultDefine(OS, "ATTR_MATCH_RULE", nullptr);
+ PragmaAttributeSupport.emitMatchRuleList(OS);
+ OS << "#undef ATTR_MATCH_RULE\n";
+}
+
// Emits the code to read an attribute from a precompiled header.
void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
emitSourceFileHeader("Attribute deserialization code", OS);
@@ -2596,7 +3011,8 @@ static std::string CalculateDiagnostic(const Record &S) {
Field = 1U << 12,
CXXMethod = 1U << 13,
ObjCProtocol = 1U << 14,
- Enum = 1U << 15
+ Enum = 1U << 15,
+ Named = 1U << 16,
};
uint32_t SubMask = 0;
@@ -2631,6 +3047,7 @@ static std::string CalculateDiagnostic(const Record &S) {
.Case("Field", Field)
.Case("CXXMethod", CXXMethod)
.Case("Enum", Enum)
+ .Case("Named", Named)
.Default(0);
if (!V) {
// Something wasn't in our mapping, so be helpful and let the developer
@@ -2689,6 +3106,9 @@ static std::string CalculateDiagnostic(const Record &S) {
case ObjCProtocol | ObjCInterface:
return "ExpectedObjectiveCInterfaceOrProtocol";
case Field | Var: return "ExpectedFieldOrGlobalVar";
+
+ case Named:
+ return "ExpectedNamedDecl";
}
PrintFatalError(S.getLoc(),
@@ -2704,9 +3124,13 @@ static std::string GetSubjectWithSuffix(const Record *R) {
return B + "Decl";
}
+static std::string functionNameForCustomAppertainsTo(const Record &Subject) {
+ return "is" + Subject.getName().str();
+}
+
static std::string GenerateCustomAppertainsTo(const Record &Subject,
raw_ostream &OS) {
- std::string FnName = "is" + Subject.getName().str();
+ std::string FnName = functionNameForCustomAppertainsTo(Subject);
// If this code has already been generated, simply return the previous
// instance of it.
@@ -2791,6 +3215,42 @@ static std::string GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
return FnName;
}
+static void
+emitAttributeMatchRules(PragmaClangAttributeSupport &PragmaAttributeSupport,
+ raw_ostream &OS) {
+ OS << "static bool checkAttributeMatchRuleAppliesTo(const Decl *D, "
+ << AttributeSubjectMatchRule::EnumName << " rule) {\n";
+ OS << " switch (rule) {\n";
+ for (const auto &Rule : PragmaAttributeSupport.Rules) {
+ if (Rule.isAbstractRule()) {
+ OS << " case " << Rule.getEnumValue() << ":\n";
+ OS << " assert(false && \"Abstract matcher rule isn't allowed\");\n";
+ OS << " return false;\n";
+ continue;
+ }
+ std::vector<Record *> Subjects = Rule.getSubjects();
+ assert(!Subjects.empty() && "Missing subjects");
+ OS << " case " << Rule.getEnumValue() << ":\n";
+ OS << " return ";
+ for (auto I = Subjects.begin(), E = Subjects.end(); I != E; ++I) {
+ // If the subject has custom code associated with it, use the function
+ // that was generated for GenerateAppertainsTo to check if the declaration
+ // is valid.
+ if ((*I)->isSubClassOf("SubsetSubject"))
+ OS << functionNameForCustomAppertainsTo(**I) << "(D)";
+ else
+ OS << "isa<" << GetSubjectWithSuffix(*I) << ">(D)";
+
+ if (I + 1 != E)
+ OS << " || ";
+ }
+ OS << ";\n";
+ }
+ OS << " }\n";
+ OS << " llvm_unreachable(\"Invalid match rule\");\nreturn false;\n";
+ OS << "}\n\n";
+}
+
static void GenerateDefaultLangOptRequirements(raw_ostream &OS) {
OS << "static bool defaultDiagnoseLangOpts(Sema &, ";
OS << "const AttributeList &) {\n";
@@ -2949,6 +3409,9 @@ static bool IsKnownToGCC(const Record &Attr) {
void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
emitSourceFileHeader("Parsed attribute helpers", OS);
+ PragmaClangAttributeSupport &PragmaAttributeSupport =
+ getPragmaAttributeSupport(Records);
+
// Get the list of parsed attributes, and accept the optional list of
// duplicates due to the ParseKind.
ParsedAttrMap Dupes;
@@ -2982,10 +3445,13 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
SS << ", " << I->second->isSubClassOf("TypeAttr");
SS << ", " << I->second->isSubClassOf("StmtAttr");
SS << ", " << IsKnownToGCC(*I->second);
+ SS << ", " << PragmaAttributeSupport.isAttributedSupported(*I->second);
SS << ", " << GenerateAppertainsTo(*I->second, OS);
SS << ", " << GenerateLangOptRequirements(*I->second, OS);
SS << ", " << GenerateTargetRequirements(*I->second, Dupes, OS);
SS << ", " << GenerateSpellingIndexToSemanticSpelling(*I->second, OS);
+ SS << ", "
+ << PragmaAttributeSupport.generateStrictConformsTo(*I->second, OS);
SS << " }";
if (I + 1 != E)
@@ -2997,6 +3463,9 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
OS << "static const ParsedAttrInfo AttrInfoMap[AttributeList::UnknownAttribute + 1] = {\n";
OS << SS.str();
OS << "};\n\n";
+
+ // Generate the attribute match rules.
+ emitAttributeMatchRules(PragmaAttributeSupport, OS);
}
// Emits the kind list of parsed attributes
@@ -3136,6 +3605,11 @@ void EmitClangAttrParserStringSwitches(RecordKeeper &Records,
emitClangAttrLateParsedList(Records, OS);
}
+void EmitClangAttrSubjectMatchRulesParserStringSwitches(RecordKeeper &Records,
+ raw_ostream &OS) {
+ getPragmaAttributeSupport(Records).generateParsingHelpers(OS);
+}
+
class DocumentationData {
public:
const Record *Documentation;
@@ -3167,8 +3641,8 @@ enum SpellingKind {
Pragma = 1 << 5
};
-static void WriteDocumentation(const DocumentationData &Doc,
- raw_ostream &OS) {
+static void WriteDocumentation(RecordKeeper &Records,
+ const DocumentationData &Doc, raw_ostream &OS) {
// FIXME: there is no way to have a per-spelling category for the attribute
// documentation. This may not be a limiting factor since the spellings
// should generally be consistently applied across the category.
@@ -3250,7 +3724,7 @@ static void WriteDocumentation(const DocumentationData &Doc,
// List what spelling syntaxes the attribute supports.
OS << ".. csv-table:: Supported Syntaxes\n";
OS << " :header: \"GNU\", \"C++11\", \"__declspec\", \"Keyword\",";
- OS << " \"Pragma\"\n\n";
+ OS << " \"Pragma\", \"Pragma clang attribute\"\n\n";
OS << " \"";
if (SupportedSpellings & GNU) OS << "X";
OS << "\",\"";
@@ -3261,6 +3735,9 @@ static void WriteDocumentation(const DocumentationData &Doc,
if (SupportedSpellings & Keyword) OS << "X";
OS << "\", \"";
if (SupportedSpellings & Pragma) OS << "X";
+ OS << "\", \"";
+ if (getPragmaAttributeSupport(Records).isAttributedSupported(*Doc.Attribute))
+ OS << "X";
OS << "\"\n\n";
// If the attribute is deprecated, print a message about it, and possibly
@@ -3327,7 +3804,50 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) {
// Walk over each of the attributes in the category and write out their
// documentation.
for (const auto &Doc : I.second)
- WriteDocumentation(Doc, OS);
+ WriteDocumentation(Records, Doc, OS);
+ }
+}
+
+void EmitTestPragmaAttributeSupportedAttributes(RecordKeeper &Records,
+ raw_ostream &OS) {
+ PragmaClangAttributeSupport Support = getPragmaAttributeSupport(Records);
+ ParsedAttrMap Attrs = getParsedAttrList(Records);
+ unsigned NumAttrs = 0;
+ for (const auto &I : Attrs) {
+ if (Support.isAttributedSupported(*I.second))
+ ++NumAttrs;
+ }
+ OS << "#pragma clang attribute supports " << NumAttrs << " attributes:\n";
+ for (const auto &I : Attrs) {
+ if (!Support.isAttributedSupported(*I.second))
+ continue;
+ OS << I.first;
+ if (I.second->isValueUnset("Subjects")) {
+ OS << " ()\n";
+ continue;
+ }
+ const Record *SubjectObj = I.second->getValueAsDef("Subjects");
+ std::vector<Record *> Subjects =
+ SubjectObj->getValueAsListOfDefs("Subjects");
+ OS << " (";
+ for (const auto &Subject : llvm::enumerate(Subjects)) {
+ if (Subject.index())
+ OS << ", ";
+ PragmaClangAttributeSupport::RuleOrAggregateRuleSet &RuleSet =
+ Support.SubjectsToRules.find(Subject.value())->getSecond();
+ if (RuleSet.isRule()) {
+ OS << RuleSet.getRule().getEnumValueName();
+ continue;
+ }
+ OS << "(";
+ for (const auto &Rule : llvm::enumerate(RuleSet.getAggregateRuleSet())) {
+ if (Rule.index())
+ OS << ", ";
+ OS << Rule.value().getEnumValueName();
+ }
+ OS << ")";
+ }
+ OS << ")\n";
}
}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
index fd7999be3877..781518ddbc31 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
@@ -25,8 +25,10 @@ using namespace clang;
enum ActionType {
GenClangAttrClasses,
GenClangAttrParserStringSwitches,
+ GenClangAttrSubjectMatchRulesParserStringSwitches,
GenClangAttrImpl,
GenClangAttrList,
+ GenClangAttrSubjectMatchRuleList,
GenClangAttrPCHRead,
GenClangAttrPCHWrite,
GenClangAttrHasAttributeImpl,
@@ -54,7 +56,8 @@ enum ActionType {
GenArmNeonTest,
GenAttrDocs,
GenDiagDocs,
- GenOptDocs
+ GenOptDocs,
+ GenTestPragmaAttributeSupportedAttributes
};
namespace {
@@ -66,10 +69,17 @@ cl::opt<ActionType> Action(
clEnumValN(GenClangAttrParserStringSwitches,
"gen-clang-attr-parser-string-switches",
"Generate all parser-related attribute string switches"),
+ clEnumValN(GenClangAttrSubjectMatchRulesParserStringSwitches,
+ "gen-clang-attr-subject-match-rules-parser-string-switches",
+ "Generate all parser-related attribute subject match rule"
+ "string switches"),
clEnumValN(GenClangAttrImpl, "gen-clang-attr-impl",
"Generate clang attribute implementations"),
clEnumValN(GenClangAttrList, "gen-clang-attr-list",
"Generate a clang attribute list"),
+ clEnumValN(GenClangAttrSubjectMatchRuleList,
+ "gen-clang-attr-subject-match-rule-list",
+ "Generate a clang attribute subject match rule list"),
clEnumValN(GenClangAttrPCHRead, "gen-clang-attr-pch-read",
"Generate clang PCH attribute reader"),
clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
@@ -80,8 +90,7 @@ cl::opt<ActionType> Action(
clEnumValN(GenClangAttrSpellingListIndex,
"gen-clang-attr-spelling-index",
"Generate a clang attribute spelling index"),
- clEnumValN(GenClangAttrASTVisitor,
- "gen-clang-attr-ast-visitor",
+ clEnumValN(GenClangAttrASTVisitor, "gen-clang-attr-ast-visitor",
"Generate a recursive AST visitor for clang attributes"),
clEnumValN(GenClangAttrTemplateInstantiate,
"gen-clang-attr-template-instantiate",
@@ -137,8 +146,11 @@ cl::opt<ActionType> Action(
"Generate attribute documentation"),
clEnumValN(GenDiagDocs, "gen-diag-docs",
"Generate diagnostic documentation"),
- clEnumValN(GenOptDocs, "gen-opt-docs",
- "Generate option documentation")));
+ clEnumValN(GenOptDocs, "gen-opt-docs", "Generate option documentation"),
+ clEnumValN(GenTestPragmaAttributeSupportedAttributes,
+ "gen-clang-test-pragma-attribute-supported-attributes",
+ "Generate a list of attributes supported by #pragma clang "
+ "attribute for testing purposes")));
cl::opt<std::string>
ClangComponent("clang-component",
@@ -153,12 +165,18 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangAttrParserStringSwitches:
EmitClangAttrParserStringSwitches(Records, OS);
break;
+ case GenClangAttrSubjectMatchRulesParserStringSwitches:
+ EmitClangAttrSubjectMatchRulesParserStringSwitches(Records, OS);
+ break;
case GenClangAttrImpl:
EmitClangAttrImpl(Records, OS);
break;
case GenClangAttrList:
EmitClangAttrList(Records, OS);
break;
+ case GenClangAttrSubjectMatchRuleList:
+ EmitClangAttrSubjectMatchRuleList(Records, OS);
+ break;
case GenClangAttrPCHRead:
EmitClangAttrPCHRead(Records, OS);
break;
@@ -244,6 +262,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenOptDocs:
EmitClangOptDocs(Records, OS);
break;
+ case GenTestPragmaAttributeSupportedAttributes:
+ EmitTestPragmaAttributeSupportedAttributes(Records, OS);
+ break;
}
return false;
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
index 033cb78f36f3..e1b7d0ec63be 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGenBackends.h
@@ -33,9 +33,12 @@ void EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS,
const std::string &N, const std::string &S);
void EmitClangAttrParserStringSwitches(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrSubjectMatchRulesParserStringSwitches(RecordKeeper &Records,
+ raw_ostream &OS);
void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS);
void EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS);
void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS);
+void EmitClangAttrSubjectMatchRuleList(RecordKeeper &Records, raw_ostream &OS);
void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS);
void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS);
void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS);
@@ -72,6 +75,9 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS);
void EmitClangDiagDocs(RecordKeeper &Records, raw_ostream &OS);
void EmitClangOptDocs(RecordKeeper &Records, raw_ostream &OS);
+void EmitTestPragmaAttributeSupportedAttributes(RecordKeeper &Records,
+ raw_ostream &OS);
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/lld/COFF/Driver.cpp b/contrib/llvm/tools/lld/COFF/Driver.cpp
index 3e7f10bf8d11..daddfb86d4cf 100644
--- a/contrib/llvm/tools/lld/COFF/Driver.cpp
+++ b/contrib/llvm/tools/lld/COFF/Driver.cpp
@@ -509,7 +509,7 @@ filterBitcodeFiles(StringRef Path, std::vector<std::string> &TemporaryFiles) {
// Create response file contents and invoke the MSVC linker.
void LinkerDriver::invokeMSVC(opt::InputArgList &Args) {
- std::string Rsp = "/nologo ";
+ std::string Rsp = "/nologo\n";
std::vector<std::string> Temps;
for (auto *Arg : Args) {
@@ -528,14 +528,14 @@ void LinkerDriver::invokeMSVC(opt::InputArgList &Args) {
case OPT_INPUT: {
if (Optional<StringRef> Path = doFindFile(Arg->getValue())) {
if (Optional<std::string> S = filterBitcodeFiles(*Path, Temps))
- Rsp += quote(*S) + " ";
+ Rsp += quote(*S) + "\n";
continue;
}
- Rsp += quote(Arg->getValue()) + " ";
+ Rsp += quote(Arg->getValue()) + "\n";
break;
}
default:
- Rsp += toString(Arg) + " ";
+ Rsp += toString(Arg) + "\n";
}
}
diff --git a/contrib/llvm/tools/lld/COFF/DriverUtils.cpp b/contrib/llvm/tools/lld/COFF/DriverUtils.cpp
index a9c1c9d5593e..2c9ba797f73b 100644
--- a/contrib/llvm/tools/lld/COFF/DriverUtils.cpp
+++ b/contrib/llvm/tools/lld/COFF/DriverUtils.cpp
@@ -634,7 +634,7 @@ void runMSVCLinker(std::string Rsp, ArrayRef<StringRef> Objects) {
std::vector<TemporaryFile> Temps;
for (StringRef S : Objects) {
Temps.emplace_back("lto", "obj", S);
- Rsp += quote(Temps.back().Path) + " ";
+ Rsp += quote(Temps.back().Path) + "\n";
}
log("link.exe " + Rsp);
diff --git a/contrib/llvm/tools/lld/ELF/Config.h b/contrib/llvm/tools/lld/ELF/Config.h
index c8eecec7439c..d25c63c3c0d2 100644
--- a/contrib/llvm/tools/lld/ELF/Config.h
+++ b/contrib/llvm/tools/lld/ELF/Config.h
@@ -104,6 +104,7 @@ struct Configuration {
bool Bsymbolic;
bool BsymbolicFunctions;
bool ColorDiagnostics = false;
+ bool CompressDebugSections;
bool DefineCommon;
bool Demangle = true;
bool DisableVerify;
diff --git a/contrib/llvm/tools/lld/ELF/Driver.cpp b/contrib/llvm/tools/lld/ELF/Driver.cpp
index 47ecd607a48f..93924e4554c9 100644
--- a/contrib/llvm/tools/lld/ELF/Driver.cpp
+++ b/contrib/llvm/tools/lld/ELF/Driver.cpp
@@ -45,6 +45,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Object/Decompressor.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compression.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TarWriter.h"
#include "llvm/Support/TargetSelect.h"
@@ -564,12 +565,24 @@ static std::vector<StringRef> getLines(MemoryBufferRef MB) {
return Ret;
}
+static bool getCompressDebugSections(opt::InputArgList &Args) {
+ if (auto *Arg = Args.getLastArg(OPT_compress_debug_sections)) {
+ StringRef S = Arg->getValue();
+ if (S == "zlib")
+ return zlib::isAvailable();
+ if (S != "none")
+ error("unknown --compress-debug-sections value: " + S);
+ }
+ return false;
+}
+
// Initializes Config members by the command line options.
void LinkerDriver::readConfigs(opt::InputArgList &Args) {
Config->AllowMultipleDefinition = Args.hasArg(OPT_allow_multiple_definition);
Config->AuxiliaryList = getArgs(Args, OPT_auxiliary);
Config->Bsymbolic = Args.hasArg(OPT_Bsymbolic);
Config->BsymbolicFunctions = Args.hasArg(OPT_Bsymbolic_functions);
+ Config->CompressDebugSections = getCompressDebugSections(Args);
Config->DefineCommon = getArg(Args, OPT_define_common, OPT_no_define_common,
!Args.hasArg(OPT_relocatable));
Config->Demangle = getArg(Args, OPT_demangle, OPT_no_demangle, true);
diff --git a/contrib/llvm/tools/lld/ELF/LinkerScript.cpp b/contrib/llvm/tools/lld/ELF/LinkerScript.cpp
index ab2ca22e9e17..63eb90456e17 100644
--- a/contrib/llvm/tools/lld/ELF/LinkerScript.cpp
+++ b/contrib/llvm/tools/lld/ELF/LinkerScript.cpp
@@ -413,6 +413,56 @@ void LinkerScript::processCommands(OutputSectionFactory &Factory) {
CurOutSec = nullptr;
}
+void LinkerScript::fabricateDefaultCommands(bool AllocateHeader) {
+ std::vector<BaseCommand *> Commands;
+
+ // Define start address
+ uint64_t StartAddr = Config->ImageBase;
+ if (AllocateHeader)
+ StartAddr += elf::getHeaderSize();
+
+ // The Sections with -T<section> are sorted in order of ascending address
+ // we must use this if it is lower than StartAddr as calls to setDot() must
+ // be monotonically increasing
+ if (!Config->SectionStartMap.empty()) {
+ uint64_t LowestSecStart = Config->SectionStartMap.begin()->second;
+ StartAddr = std::min(StartAddr, LowestSecStart);
+ }
+ Commands.push_back(
+ make<SymbolAssignment>(".", [=] { return StartAddr; }, ""));
+
+ // For each OutputSection that needs a VA fabricate an OutputSectionCommand
+ // with an InputSectionDescription describing the InputSections
+ for (OutputSection *Sec : *OutputSections) {
+ if (!(Sec->Flags & SHF_ALLOC))
+ continue;
+
+ auto I = Config->SectionStartMap.find(Sec->Name);
+ if (I != Config->SectionStartMap.end())
+ Commands.push_back(
+ make<SymbolAssignment>(".", [=] { return I->second; }, ""));
+
+ auto *OSCmd = make<OutputSectionCommand>(Sec->Name);
+ OSCmd->Sec = Sec;
+ if (Sec->PageAlign)
+ OSCmd->AddrExpr = [=] {
+ return alignTo(Script->getDot(), Config->MaxPageSize);
+ };
+ Commands.push_back(OSCmd);
+ if (Sec->Sections.size()) {
+ auto *ISD = make<InputSectionDescription>("");
+ OSCmd->Commands.push_back(ISD);
+ for (InputSection *ISec : Sec->Sections) {
+ ISD->Sections.push_back(ISec);
+ ISec->Assigned = true;
+ }
+ }
+ }
+ // SECTIONS commands run before other non SECTIONS commands
+ Commands.insert(Commands.end(), Opt.Commands.begin(), Opt.Commands.end());
+ Opt.Commands = std::move(Commands);
+}
+
// Add sections that didn't match any sections command.
void LinkerScript::addOrphanSections(OutputSectionFactory &Factory) {
for (InputSectionBase *S : InputSections)
diff --git a/contrib/llvm/tools/lld/ELF/LinkerScript.h b/contrib/llvm/tools/lld/ELF/LinkerScript.h
index 04a388efb4e9..61942b2db357 100644
--- a/contrib/llvm/tools/lld/ELF/LinkerScript.h
+++ b/contrib/llvm/tools/lld/ELF/LinkerScript.h
@@ -256,6 +256,7 @@ public:
bool isDefined(StringRef S);
std::vector<OutputSection *> *OutputSections;
+ void fabricateDefaultCommands(bool AllocateHeader);
void addOrphanSections(OutputSectionFactory &Factory);
void removeEmptyCommands();
void adjustSectionsBeforeSorting();
diff --git a/contrib/llvm/tools/lld/ELF/Options.td b/contrib/llvm/tools/lld/ELF/Options.td
index 7ed8dfb090bd..4cf14c9011c3 100644
--- a/contrib/llvm/tools/lld/ELF/Options.td
+++ b/contrib/llvm/tools/lld/ELF/Options.td
@@ -22,6 +22,9 @@ def build_id: F<"build-id">, HelpText<"Generate build ID note">;
def build_id_eq: J<"build-id=">, HelpText<"Generate build ID note">;
+def compress_debug_sections : J<"compress-debug-sections=">,
+ HelpText<"Compress DWARF debug sections">;
+
def L: JoinedOrSeparate<["-"], "L">, MetaVarName<"<dir>">,
HelpText<"Add a directory to the library search path">;
diff --git a/contrib/llvm/tools/lld/ELF/OutputSections.cpp b/contrib/llvm/tools/lld/ELF/OutputSections.cpp
index 93f83100a745..a40818d2d301 100644
--- a/contrib/llvm/tools/lld/ELF/OutputSections.cpp
+++ b/contrib/llvm/tools/lld/ELF/OutputSections.cpp
@@ -16,6 +16,7 @@
#include "SyntheticSections.h"
#include "Target.h"
#include "Threads.h"
+#include "llvm/Support/Compression.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
@@ -83,6 +84,33 @@ static bool compareByFilePosition(InputSection *A, InputSection *B) {
return LA->OutSecOff < LB->OutSecOff;
}
+// Compress section contents if this section contains debug info.
+template <class ELFT> void OutputSection::maybeCompress() {
+ typedef typename ELFT::Chdr Elf_Chdr;
+
+ // Compress only DWARF debug sections.
+ if (!Config->CompressDebugSections || (Flags & SHF_ALLOC) ||
+ !Name.startswith(".debug_"))
+ return;
+
+ // Create a section header.
+ ZDebugHeader.resize(sizeof(Elf_Chdr));
+ auto *Hdr = reinterpret_cast<Elf_Chdr *>(ZDebugHeader.data());
+ Hdr->ch_type = ELFCOMPRESS_ZLIB;
+ Hdr->ch_size = Size;
+ Hdr->ch_addralign = Alignment;
+
+ // Write section contents to a temporary buffer and compress it.
+ std::vector<uint8_t> Buf(Size);
+ writeTo<ELFT>(Buf.data());
+ if (Error E = zlib::compress(toStringRef(Buf), CompressedData))
+ fatal("compress failed: " + llvm::toString(std::move(E)));
+
+ // Update section headers.
+ Size = sizeof(Elf_Chdr) + CompressedData.size();
+ Flags |= SHF_COMPRESSED;
+}
+
template <class ELFT> void OutputSection::finalize() {
if ((this->Flags & SHF_LINK_ORDER) && !this->Sections.empty()) {
std::sort(Sections.begin(), Sections.end(), compareByFilePosition);
@@ -245,6 +273,15 @@ uint32_t OutputSection::getFiller() {
template <class ELFT> void OutputSection::writeTo(uint8_t *Buf) {
Loc = Buf;
+ // We may have already rendered compressed content when using
+ // -compress-debug-sections option. Write it together with header.
+ if (!CompressedData.empty()) {
+ memcpy(Buf, ZDebugHeader.data(), ZDebugHeader.size());
+ memcpy(Buf + ZDebugHeader.size(), CompressedData.data(),
+ CompressedData.size());
+ return;
+ }
+
// Write leading padding.
uint32_t Filler = getFiller();
if (Filler)
@@ -422,6 +459,11 @@ template void OutputSection::finalize<ELF32BE>();
template void OutputSection::finalize<ELF64LE>();
template void OutputSection::finalize<ELF64BE>();
+template void OutputSection::maybeCompress<ELF32LE>();
+template void OutputSection::maybeCompress<ELF32BE>();
+template void OutputSection::maybeCompress<ELF64LE>();
+template void OutputSection::maybeCompress<ELF64BE>();
+
template void OutputSection::writeTo<ELF32LE>(uint8_t *Buf);
template void OutputSection::writeTo<ELF32BE>(uint8_t *Buf);
template void OutputSection::writeTo<ELF64LE>(uint8_t *Buf);
diff --git a/contrib/llvm/tools/lld/ELF/OutputSections.h b/contrib/llvm/tools/lld/ELF/OutputSections.h
index 0ae3df5f7859..bcda77d1a26d 100644
--- a/contrib/llvm/tools/lld/ELF/OutputSections.h
+++ b/contrib/llvm/tools/lld/ELF/OutputSections.h
@@ -84,9 +84,14 @@ public:
uint32_t getFiller();
template <class ELFT> void writeTo(uint8_t *Buf);
template <class ELFT> void finalize();
+ template <class ELFT> void maybeCompress();
void assignOffsets();
std::vector<InputSection *> Sections;
+ // Used for implementation of --compress-debug-sections option.
+ std::vector<uint8_t> ZDebugHeader;
+ llvm::SmallVector<char, 1> CompressedData;
+
// Location in the output buffer.
uint8_t *Loc = nullptr;
};
diff --git a/contrib/llvm/tools/lld/ELF/Writer.cpp b/contrib/llvm/tools/lld/ELF/Writer.cpp
index 8eed3b13bc65..098bab24a492 100644
--- a/contrib/llvm/tools/lld/ELF/Writer.cpp
+++ b/contrib/llvm/tools/lld/ELF/Writer.cpp
@@ -19,6 +19,7 @@
#include "SymbolTable.h"
#include "SyntheticSections.h"
#include "Target.h"
+#include "Threads.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileOutputBuffer.h"
@@ -58,7 +59,6 @@ private:
std::vector<PhdrEntry> createPhdrs();
void removeEmptyPTLoad();
void addPtArmExid(std::vector<PhdrEntry> &Phdrs);
- void assignAddresses();
void assignFileOffsets();
void assignFileOffsetsBinary();
void setPhdrs();
@@ -250,13 +250,11 @@ template <class ELFT> void Writer<ELFT>::run() {
if (Config->Relocatable) {
assignFileOffsets();
} else {
- if (Script->Opt.HasSections) {
- Script->assignAddresses(Phdrs);
- } else {
+ if (!Script->Opt.HasSections) {
fixSectionAlignments();
- assignAddresses();
- Script->processNonSectionCommands();
+ Script->fabricateDefaultCommands(Config->MaxPageSize);
}
+ Script->assignAddresses(Phdrs);
// Remove empty PT_LOAD to avoid causing the dynamic linker to try to mmap a
// 0 sized region. This has to be done late since only after assignAddresses
@@ -1216,6 +1214,12 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
for (OutputSection *Sec : OutputSections)
Sec->finalize<ELFT>();
+ // If -compressed-debug-sections is specified, we need to compress
+ // .debug_* sections. Do it right now because it changes the size of
+ // output sections.
+ parallelForEach(OutputSections.begin(), OutputSections.end(),
+ [](OutputSection *S) { S->maybeCompress<ELFT>(); });
+
// createThunks may have added local symbols to the static symbol table
applySynthetic({In<ELFT>::SymTab, In<ELFT>::ShStrTab, In<ELFT>::StrTab},
[](SyntheticSection *SS) { SS->postThunkContents(); });
@@ -1502,37 +1506,6 @@ template <class ELFT> void Writer<ELFT>::fixHeaders() {
AllocateHeader = allocateHeaders(Phdrs, OutputSections, Min);
}
-// Assign VAs (addresses at run-time) to output sections.
-template <class ELFT> void Writer<ELFT>::assignAddresses() {
- uint64_t VA = Config->ImageBase;
- uint64_t ThreadBssOffset = 0;
-
- if (AllocateHeader)
- VA += getHeaderSize();
-
- for (OutputSection *Sec : OutputSections) {
- uint32_t Alignment = Sec->Alignment;
- if (Sec->PageAlign)
- Alignment = std::max<uint32_t>(Alignment, Config->MaxPageSize);
-
- auto I = Config->SectionStartMap.find(Sec->Name);
- if (I != Config->SectionStartMap.end())
- VA = I->second;
-
- // We only assign VAs to allocated sections.
- if (needsPtLoad(Sec)) {
- VA = alignTo(VA, Alignment);
- Sec->Addr = VA;
- VA += Sec->Size;
- } else if (Sec->Flags & SHF_TLS && Sec->Type == SHT_NOBITS) {
- uint64_t TVA = VA + ThreadBssOffset;
- TVA = alignTo(TVA, Alignment);
- Sec->Addr = TVA;
- ThreadBssOffset = TVA - VA + Sec->Size;
- }
- }
-}
-
// Adjusts the file alignment for a given output section and returns
// its new file offset. The file offset must be the same with its
// virtual address (modulo the page size) so that the loader can load
diff --git a/contrib/llvm/tools/lldb/include/lldb/Core/ArchSpec.h b/contrib/llvm/tools/lldb/include/lldb/Core/ArchSpec.h
index 648815c21371..75c7079be08d 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Core/ArchSpec.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Core/ArchSpec.h
@@ -625,6 +625,7 @@ public:
protected:
bool IsEqualTo(const ArchSpec &rhs, bool exact_match) const;
+ void UpdateCore();
llvm::Triple m_triple;
Core m_core = kCore_invalid;
diff --git a/contrib/llvm/tools/lldb/include/lldb/Expression/DiagnosticManager.h b/contrib/llvm/tools/lldb/include/lldb/Expression/DiagnosticManager.h
index d9024e649b80..83e67df2649a 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Expression/DiagnosticManager.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Expression/DiagnosticManager.h
@@ -128,6 +128,8 @@ public:
m_diagnostics.push_back(diagnostic);
}
+ void CopyDiagnostics(DiagnosticManager &otherDiagnostics);
+
size_t Printf(DiagnosticSeverity severity, const char *format, ...)
__attribute__((format(printf, 3, 4)));
size_t PutString(DiagnosticSeverity severity, llvm::StringRef str);
diff --git a/contrib/llvm/tools/lldb/include/lldb/Utility/StringLexer.h b/contrib/llvm/tools/lldb/include/lldb/Utility/StringLexer.h
index e2c31db329cc..e4fc81a85e0d 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Utility/StringLexer.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Utility/StringLexer.h
@@ -41,8 +41,6 @@ public:
bool HasAtLeast(Size s);
- bool HasAny(Character c);
-
std::string GetUnlexed();
// This will assert if there are less than s characters preceding the cursor.
diff --git a/contrib/llvm/tools/lldb/source/Commands/CommandObjectCommands.cpp b/contrib/llvm/tools/lldb/source/Commands/CommandObjectCommands.cpp
index 102010e8e6f6..e39c0330b653 100644
--- a/contrib/llvm/tools/lldb/source/Commands/CommandObjectCommands.cpp
+++ b/contrib/llvm/tools/lldb/source/Commands/CommandObjectCommands.cpp
@@ -50,7 +50,11 @@ class CommandObjectCommandsHistory : public CommandObjectParsed {
public:
CommandObjectCommandsHistory(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "command history",
- "Dump the history of commands in this session.",
+ "Dump the history of commands in this session.\n"
+ "Commands in the history list can be run again "
+ "using \"!<INDEX>\". \"!-<OFFSET>\" will re-run "
+ "the command that is <OFFSET> commands from the end"
+ " of the list (counting the current command).",
nullptr),
m_options() {}
diff --git a/contrib/llvm/tools/lldb/source/Commands/CommandObjectFrame.cpp b/contrib/llvm/tools/lldb/source/Commands/CommandObjectFrame.cpp
index 8be9b6f9b7a6..7e81f5f94140 100644
--- a/contrib/llvm/tools/lldb/source/Commands/CommandObjectFrame.cpp
+++ b/contrib/llvm/tools/lldb/source/Commands/CommandObjectFrame.cpp
@@ -655,42 +655,62 @@ protected:
if (num_variables > 0) {
for (size_t i = 0; i < num_variables; i++) {
var_sp = variable_list->GetVariableAtIndex(i);
- bool dump_variable = true;
- std::string scope_string;
- if (dump_variable && m_option_variable.show_scope)
- scope_string = GetScopeString(var_sp).str();
-
- if (dump_variable) {
- // Use the variable object code to make sure we are
- // using the same APIs as the public API will be
- // using...
- valobj_sp = frame->GetValueObjectForFrameVariable(
- var_sp, m_varobj_options.use_dynamic);
- if (valobj_sp) {
- // When dumping all variables, don't print any variables
- // that are not in scope to avoid extra unneeded output
- if (valobj_sp->IsInScope()) {
- if (!valobj_sp->GetTargetSP()
- ->GetDisplayRuntimeSupportValues() &&
- valobj_sp->IsRuntimeSupportValue())
- continue;
-
- if (!scope_string.empty())
- s.PutCString(scope_string);
-
- if (m_option_variable.show_decl &&
- var_sp->GetDeclaration().GetFile()) {
- var_sp->GetDeclaration().DumpStopContext(&s, false);
- s.PutCString(": ");
- }
-
- options.SetFormat(format);
- options.SetVariableFormatDisplayLanguage(
- valobj_sp->GetPreferredDisplayLanguage());
- options.SetRootValueObjectName(
- var_sp ? var_sp->GetName().AsCString() : nullptr);
- valobj_sp->Dump(result.GetOutputStream(), options);
+ switch (var_sp->GetScope())
+ {
+ case eValueTypeVariableGlobal:
+ if (!m_option_variable.show_globals)
+ continue;
+ break;
+ case eValueTypeVariableStatic:
+ if (!m_option_variable.show_globals)
+ continue;
+ break;
+ case eValueTypeVariableArgument:
+ if (!m_option_variable.show_args)
+ continue;
+ break;
+ case eValueTypeVariableLocal:
+ if (!m_option_variable.show_locals)
+ continue;
+ break;
+ default:
+ continue;
+ break;
+
+ }
+ std::string scope_string;
+ if (m_option_variable.show_scope)
+ scope_string = GetScopeString(var_sp).str();
+
+ // Use the variable object code to make sure we are
+ // using the same APIs as the public API will be
+ // using...
+ valobj_sp = frame->GetValueObjectForFrameVariable(
+ var_sp, m_varobj_options.use_dynamic);
+ if (valobj_sp) {
+ // When dumping all variables, don't print any variables
+ // that are not in scope to avoid extra unneeded output
+ if (valobj_sp->IsInScope()) {
+ if (!valobj_sp->GetTargetSP()
+ ->GetDisplayRuntimeSupportValues() &&
+ valobj_sp->IsRuntimeSupportValue())
+ continue;
+
+ if (!scope_string.empty())
+ s.PutCString(scope_string);
+
+ if (m_option_variable.show_decl &&
+ var_sp->GetDeclaration().GetFile()) {
+ var_sp->GetDeclaration().DumpStopContext(&s, false);
+ s.PutCString(": ");
}
+
+ options.SetFormat(format);
+ options.SetVariableFormatDisplayLanguage(
+ valobj_sp->GetPreferredDisplayLanguage());
+ options.SetRootValueObjectName(
+ var_sp ? var_sp->GetName().AsCString() : nullptr);
+ valobj_sp->Dump(result.GetOutputStream(), options);
}
}
}
diff --git a/contrib/llvm/tools/lldb/source/Core/ArchSpec.cpp b/contrib/llvm/tools/lldb/source/Core/ArchSpec.cpp
index 60ee237aa0f5..7c1b399177fd 100644
--- a/contrib/llvm/tools/lldb/source/Core/ArchSpec.cpp
+++ b/contrib/llvm/tools/lldb/source/Core/ArchSpec.cpp
@@ -834,19 +834,7 @@ lldb::ByteOrder ArchSpec::GetByteOrder() const {
bool ArchSpec::SetTriple(const llvm::Triple &triple) {
m_triple = triple;
-
- llvm::StringRef arch_name(m_triple.getArchName());
- const CoreDefinition *core_def = FindCoreDefinition(arch_name);
- if (core_def) {
- m_core = core_def->core;
- // Set the byte order to the default byte order for an architecture.
- // This can be modified if needed for cases when cores handle both
- // big and little endian
- m_byte_order = core_def->default_byte_order;
- } else {
- Clear();
- }
-
+ UpdateCore();
return IsValid();
}
@@ -994,8 +982,10 @@ void ArchSpec::MergeFrom(const ArchSpec &other) {
GetTriple().setVendor(other.GetTriple().getVendor());
if (TripleOSIsUnspecifiedUnknown() && !other.TripleOSIsUnspecifiedUnknown())
GetTriple().setOS(other.GetTriple().getOS());
- if (GetTriple().getArch() == llvm::Triple::UnknownArch)
+ if (GetTriple().getArch() == llvm::Triple::UnknownArch) {
GetTriple().setArch(other.GetTriple().getArch());
+ UpdateCore();
+ }
if (GetTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
!TripleVendorWasSpecified()) {
if (other.TripleVendorWasSpecified())
@@ -1190,6 +1180,20 @@ bool ArchSpec::IsEqualTo(const ArchSpec &rhs, bool exact_match) const {
return false;
}
+void ArchSpec::UpdateCore() {
+ llvm::StringRef arch_name(m_triple.getArchName());
+ const CoreDefinition *core_def = FindCoreDefinition(arch_name);
+ if (core_def) {
+ m_core = core_def->core;
+ // Set the byte order to the default byte order for an architecture.
+ // This can be modified if needed for cases when cores handle both
+ // big and little endian
+ m_byte_order = core_def->default_byte_order;
+ } else {
+ Clear();
+ }
+}
+
//===----------------------------------------------------------------------===//
// Helper methods.
diff --git a/contrib/llvm/tools/lldb/source/Core/Scalar.cpp b/contrib/llvm/tools/lldb/source/Core/Scalar.cpp
index 88ad430ddbae..3adf85098648 100644
--- a/contrib/llvm/tools/lldb/source/Core/Scalar.cpp
+++ b/contrib/llvm/tools/lldb/source/Core/Scalar.cpp
@@ -2745,7 +2745,7 @@ bool Scalar::SignExtend(uint32_t sign_bit_pos) {
if (max_bit_pos == sign_bit_pos)
return true;
else if (sign_bit_pos < (max_bit_pos - 1)) {
- llvm::APInt sign_bit = llvm::APInt::getSignBit(sign_bit_pos + 1);
+ llvm::APInt sign_bit = llvm::APInt::getSignMask(sign_bit_pos + 1);
llvm::APInt bitwize_and = m_integer & sign_bit;
if (bitwize_and.getBoolValue()) {
const llvm::APInt mask =
diff --git a/contrib/llvm/tools/lldb/source/Expression/DiagnosticManager.cpp b/contrib/llvm/tools/lldb/source/Expression/DiagnosticManager.cpp
index 5ade0817b1e2..ae20feb910dd 100644
--- a/contrib/llvm/tools/lldb/source/Expression/DiagnosticManager.cpp
+++ b/contrib/llvm/tools/lldb/source/Expression/DiagnosticManager.cpp
@@ -79,3 +79,12 @@ size_t DiagnosticManager::PutString(DiagnosticSeverity severity,
AddDiagnostic(str, severity, eDiagnosticOriginLLDB);
return str.size();
}
+
+void DiagnosticManager::CopyDiagnostics(DiagnosticManager &otherDiagnostics) {
+ for (const DiagnosticList::value_type &other_diagnostic:
+ otherDiagnostics.Diagnostics()) {
+ AddDiagnostic(
+ other_diagnostic->GetMessage(), other_diagnostic->GetSeverity(),
+ other_diagnostic->getKind(), other_diagnostic->GetCompilerID());
+ }
+}
diff --git a/contrib/llvm/tools/lldb/source/Interpreter/CommandHistory.cpp b/contrib/llvm/tools/lldb/source/Interpreter/CommandHistory.cpp
index 0fa25ed806ff..ca5c90692b6a 100644
--- a/contrib/llvm/tools/lldb/source/Interpreter/CommandHistory.cpp
+++ b/contrib/llvm/tools/lldb/source/Interpreter/CommandHistory.cpp
@@ -47,13 +47,13 @@ CommandHistory::FindString(llvm::StringRef input_str) const {
size_t idx = 0;
if (input_str.front() == '-') {
- if (input_str.drop_front(2).getAsInteger(0, idx))
+ if (input_str.drop_front(1).getAsInteger(0, idx))
return llvm::None;
if (idx >= m_history.size())
return llvm::None;
idx = m_history.size() - idx;
} else {
- if (input_str.drop_front().getAsInteger(0, idx))
+ if (input_str.getAsInteger(0, idx))
return llvm::None;
if (idx >= m_history.size())
return llvm::None;
diff --git a/contrib/llvm/tools/lldb/source/Plugins/LanguageRuntime/RenderScript/RenderScriptRuntime/RenderScriptRuntime.cpp b/contrib/llvm/tools/lldb/source/Plugins/LanguageRuntime/RenderScript/RenderScriptRuntime/RenderScriptRuntime.cpp
index 638112b9ebde..7a9e66cf5481 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/LanguageRuntime/RenderScript/RenderScriptRuntime/RenderScriptRuntime.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/LanguageRuntime/RenderScript/RenderScriptRuntime/RenderScriptRuntime.cpp
@@ -1810,6 +1810,7 @@ enum ExpressionStrings {
const int jit_max_expr_size = 512;
// Retrieve the string to JIT for the given expression
+#define JIT_TEMPLATE_CONTEXT "void* ctxt = (void*)rsDebugGetContextWrapper(0x%" PRIx64 "); "
const char *JITTemplate(ExpressionStrings e) {
// Format strings containing the expressions we may need to evaluate.
static std::array<const char *, _eExprLast> runtime_expressions = {
@@ -1817,57 +1818,65 @@ const char *JITTemplate(ExpressionStrings e) {
"(int*)_"
"Z12GetOffsetPtrPKN7android12renderscript10AllocationEjjjj23RsAllocation"
"CubemapFace"
- "(0x%" PRIx64 ", %" PRIu32 ", %" PRIu32 ", %" PRIu32 ", 0, 0)",
+ "(0x%" PRIx64 ", %" PRIu32 ", %" PRIu32 ", %" PRIu32 ", 0, 0)", // eExprGetOffsetPtr
// Type* rsaAllocationGetType(Context*, Allocation*)
- "(void*)rsaAllocationGetType(0x%" PRIx64 ", 0x%" PRIx64 ")",
+ JIT_TEMPLATE_CONTEXT "(void*)rsaAllocationGetType(ctxt, 0x%" PRIx64 ")", // eExprAllocGetType
// rsaTypeGetNativeData(Context*, Type*, void* typeData, size) Pack the
// data in the following way mHal.state.dimX; mHal.state.dimY;
// mHal.state.dimZ; mHal.state.lodCount; mHal.state.faces; mElement; into
// typeData Need to specify 32 or 64 bit for uint_t since this differs
// between devices
- "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 6); data[0]", // X dim
- "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 6); data[1]", // Y dim
- "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 6); data[2]", // Z dim
- "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 6); data[5]", // Element ptr
+ JIT_TEMPLATE_CONTEXT
+ "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 6); data[0]", // eExprTypeDimX
+ JIT_TEMPLATE_CONTEXT
+ "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 6); data[1]", // eExprTypeDimY
+ JIT_TEMPLATE_CONTEXT
+ "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 6); data[2]", // eExprTypeDimZ
+ JIT_TEMPLATE_CONTEXT
+ "uint%" PRIu32 "_t data[6]; (void*)rsaTypeGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 6); data[5]", // eExprTypeElemPtr
// rsaElementGetNativeData(Context*, Element*, uint32_t* elemData,size)
// Pack mType; mKind; mNormalized; mVectorSize; NumSubElements into
// elemData
- "uint32_t data[5]; (void*)rsaElementGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 5); data[0]", // Type
- "uint32_t data[5]; (void*)rsaElementGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 5); data[1]", // Kind
- "uint32_t data[5]; (void*)rsaElementGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 5); data[3]", // Vector Size
- "uint32_t data[5]; (void*)rsaElementGetNativeData(0x%" PRIx64
- ", 0x%" PRIx64 ", data, 5); data[4]", // Field Count
+ JIT_TEMPLATE_CONTEXT
+ "uint32_t data[5]; (void*)rsaElementGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 5); data[0]", // eExprElementType
+ JIT_TEMPLATE_CONTEXT
+ "uint32_t data[5]; (void*)rsaElementGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 5); data[1]", // eExprElementKind
+ JIT_TEMPLATE_CONTEXT
+ "uint32_t data[5]; (void*)rsaElementGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 5); data[3]", // eExprElementVec
+ JIT_TEMPLATE_CONTEXT
+ "uint32_t data[5]; (void*)rsaElementGetNativeData(ctxt"
+ ", 0x%" PRIx64 ", data, 5); data[4]", // eExprElementFieldCount
// rsaElementGetSubElements(RsContext con, RsElement elem, uintptr_t
// *ids, const char **names, size_t *arraySizes, uint32_t dataSize)
// Needed for Allocations of structs to gather details about
// fields/Subelements Element* of field
- "void* ids[%" PRIu32 "]; const char* names[%" PRIu32
+ JIT_TEMPLATE_CONTEXT "void* ids[%" PRIu32 "]; const char* names[%" PRIu32
"]; size_t arr_size[%" PRIu32 "];"
- "(void*)rsaElementGetSubElements(0x%" PRIx64 ", 0x%" PRIx64
- ", ids, names, arr_size, %" PRIu32 "); ids[%" PRIu32 "]",
+ "(void*)rsaElementGetSubElements(ctxt, 0x%" PRIx64
+ ", ids, names, arr_size, %" PRIu32 "); ids[%" PRIu32 "]", // eExprSubelementsId
// Name of field
- "void* ids[%" PRIu32 "]; const char* names[%" PRIu32
+ JIT_TEMPLATE_CONTEXT "void* ids[%" PRIu32 "]; const char* names[%" PRIu32
"]; size_t arr_size[%" PRIu32 "];"
- "(void*)rsaElementGetSubElements(0x%" PRIx64 ", 0x%" PRIx64
- ", ids, names, arr_size, %" PRIu32 "); names[%" PRIu32 "]",
+ "(void*)rsaElementGetSubElements(ctxt, 0x%" PRIx64
+ ", ids, names, arr_size, %" PRIu32 "); names[%" PRIu32 "]", // eExprSubelementsName
// Array size of field
- "void* ids[%" PRIu32 "]; const char* names[%" PRIu32
+ JIT_TEMPLATE_CONTEXT "void* ids[%" PRIu32 "]; const char* names[%" PRIu32
"]; size_t arr_size[%" PRIu32 "];"
- "(void*)rsaElementGetSubElements(0x%" PRIx64 ", 0x%" PRIx64
- ", ids, names, arr_size, %" PRIu32 "); arr_size[%" PRIu32 "]"}};
+ "(void*)rsaElementGetSubElements(ctxt, 0x%" PRIx64
+ ", ids, names, arr_size, %" PRIu32 "); arr_size[%" PRIu32 "]"}}; // eExprSubelementsArrSize
return runtime_expressions[e];
}
@@ -1979,8 +1988,8 @@ bool RenderScriptRuntime::JITTypePacked(AllocationDetails *alloc,
for (uint32_t i = 0; i < num_exprs; ++i) {
const char *fmt_str = JITTemplate(ExpressionStrings(eExprTypeDimX + i));
- int written = snprintf(expr_bufs[i], jit_max_expr_size, fmt_str, bits,
- *alloc->context.get(), *alloc->type_ptr.get());
+ int written = snprintf(expr_bufs[i], jit_max_expr_size, fmt_str,
+ *alloc->context.get(), bits, *alloc->type_ptr.get());
if (written < 0) {
if (log)
log->Printf("%s - encoding error in snprintf().", __FUNCTION__);
@@ -2105,7 +2114,7 @@ bool RenderScriptRuntime::JITSubelements(Element &elem,
const char *fmt_str =
JITTemplate(ExpressionStrings(eExprSubelementsId + expr_index));
int written = snprintf(expr_buffer, jit_max_expr_size, fmt_str,
- field_count, field_count, field_count, context,
+ context, field_count, field_count, field_count,
*elem.element_ptr.get(), field_count, field_index);
if (written < 0) {
if (log)
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp
index 298faa48e1c3..347c12943bd5 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp
@@ -224,36 +224,83 @@ void NativeProcessNetBSD::MonitorSIGTRAP(lldb::pid_t pid) {
PtraceWrapper(PT_GET_SIGINFO, pid, &info, sizeof(info));
// Get details on the signal raised.
- if (siginfo_err.Success()) {
- switch (info.psi_siginfo.si_code) {
- case TRAP_BRKPT:
+ if (siginfo_err.Fail()) {
+ return;
+ }
+
+ switch (info.psi_siginfo.si_code) {
+ case TRAP_BRKPT:
+ for (const auto &thread_sp : m_threads) {
+ static_pointer_cast<NativeThreadNetBSD>(thread_sp)
+ ->SetStoppedByBreakpoint();
+ FixupBreakpointPCAsNeeded(
+ *static_pointer_cast<NativeThreadNetBSD>(thread_sp));
+ }
+ SetState(StateType::eStateStopped, true);
+ break;
+ case TRAP_TRACE:
+ for (const auto &thread_sp : m_threads) {
+ static_pointer_cast<NativeThreadNetBSD>(thread_sp)->SetStoppedByTrace();
+ }
+ SetState(StateType::eStateStopped, true);
+ break;
+ case TRAP_EXEC: {
+ Error error = ReinitializeThreads();
+ if (error.Fail()) {
+ SetState(StateType::eStateInvalid);
+ return;
+ }
+
+ // Let our delegate know we have just exec'd.
+ NotifyDidExec();
+
+ for (const auto &thread_sp : m_threads) {
+ static_pointer_cast<NativeThreadNetBSD>(thread_sp)->SetStoppedByExec();
+ }
+ SetState(StateType::eStateStopped, true);
+ } break;
+ case TRAP_DBREG: {
+ // If a watchpoint was hit, report it
+ uint32_t wp_index;
+ Error error =
+ static_pointer_cast<NativeThreadNetBSD>(m_threads[info.psi_lwpid])
+ ->GetRegisterContext()
+ ->GetWatchpointHitIndex(wp_index,
+ (uintptr_t)info.psi_siginfo.si_addr);
+ if (error.Fail())
+ LLDB_LOG(log,
+ "received error while checking for watchpoint hits, pid = "
+ "{0}, LWP = {1}, error = {2}",
+ GetID(), info.psi_lwpid, error);
+ if (wp_index != LLDB_INVALID_INDEX32) {
for (const auto &thread_sp : m_threads) {
static_pointer_cast<NativeThreadNetBSD>(thread_sp)
- ->SetStoppedByBreakpoint();
- FixupBreakpointPCAsNeeded(
- *static_pointer_cast<NativeThreadNetBSD>(thread_sp));
+ ->SetStoppedByWatchpoint(wp_index);
}
SetState(StateType::eStateStopped, true);
break;
- case TRAP_TRACE:
+ }
+
+ // If a breakpoint was hit, report it
+ uint32_t bp_index;
+ error = static_pointer_cast<NativeThreadNetBSD>(m_threads[info.psi_lwpid])
+ ->GetRegisterContext()
+ ->GetHardwareBreakHitIndex(bp_index,
+ (uintptr_t)info.psi_siginfo.si_addr);
+ if (error.Fail())
+ LLDB_LOG(log,
+ "received error while checking for hardware "
+ "breakpoint hits, pid = {0}, LWP = {1}, error = {2}",
+ GetID(), info.psi_lwpid, error);
+ if (bp_index != LLDB_INVALID_INDEX32) {
for (const auto &thread_sp : m_threads) {
- static_pointer_cast<NativeThreadNetBSD>(thread_sp)->SetStoppedByTrace();
+ static_pointer_cast<NativeThreadNetBSD>(thread_sp)
+ ->SetStoppedByBreakpoint();
}
SetState(StateType::eStateStopped, true);
break;
- case TRAP_EXEC: {
- Error error = ReinitializeThreads();
- if (error.Fail()) {
- SetState(StateType::eStateInvalid);
- return;
- }
-
- // Let our delegate know we have just exec'd.
- NotifyDidExec();
-
- SetState(StateType::eStateStopped, true);
- } break;
}
+ } break;
}
}
@@ -328,8 +375,8 @@ Error NativeProcessNetBSD::FixupBreakpointPCAsNeeded(
return error;
} else
LLDB_LOG(log, "breakpoint size: {0}", breakpoint_size);
- // First try probing for a breakpoint at a software breakpoint location: PC -
- // breakpoint size.
+ // First try probing for a breakpoint at a software breakpoint location: PC
+ // - breakpoint size.
const lldb::addr_t initial_pc_addr =
context_sp->GetPCfromBreakpointLocation();
lldb::addr_t breakpoint_addr = initial_pc_addr;
@@ -439,7 +486,7 @@ Error NativeProcessNetBSD::Resume(const ResumeActionList &resume_actions) {
llvm_unreachable("Unexpected state");
default:
- return Error("NativeProcessLinux::%s (): unexpected state %s specified "
+ return Error("NativeProcessNetBSD::%s (): unexpected state %s specified "
"for pid %" PRIu64 ", tid %" PRIu64,
__FUNCTION__, StateAsCString(action->state), GetID(),
thread_sp->GetID());
@@ -540,8 +587,8 @@ Error NativeProcessNetBSD::GetMemoryRegionInfo(lldb::addr_t load_addr,
"descending memory map entries detected, unexpected");
prev_base_address = proc_entry_info.GetRange().GetRangeBase();
UNUSED_IF_ASSERT_DISABLED(prev_base_address);
- // If the target address comes before this entry, indicate distance to next
- // region.
+ // If the target address comes before this entry, indicate distance to
+ // next region.
if (load_addr < proc_entry_info.GetRange().GetRangeBase()) {
range_info.GetRange().SetRangeBase(load_addr);
range_info.GetRange().SetByteSize(
@@ -561,9 +608,8 @@ Error NativeProcessNetBSD::GetMemoryRegionInfo(lldb::addr_t load_addr,
}
// If we made it here, we didn't find an entry that contained the given
// address. Return the
- // load_addr as start and the amount of bytes betwwen load address and the end
- // of the memory as
- // size.
+ // load_addr as start and the amount of bytes betwwen load address and the
+ // end of the memory as size.
range_info.GetRange().SetRangeBase(load_addr);
range_info.GetRange().SetRangeEnd(LLDB_INVALID_ADDRESS);
range_info.SetReadable(MemoryRegionInfo::OptionalBool::eNo);
@@ -722,8 +768,8 @@ Error NativeProcessNetBSD::LaunchInferior(MainLoop &mainloop,
LLDB_LOG(log, "waitpid for inferior failed with %s", error);
// Mark the inferior as invalid.
- // FIXME this could really use a new state - eStateLaunchFailure. For now,
- // using eStateInvalid.
+ // FIXME this could really use a new state - eStateLaunchFailure. For
+ // now, using eStateInvalid.
SetState(StateType::eStateInvalid);
return error;
@@ -766,6 +812,11 @@ Error NativeProcessNetBSD::LaunchInferior(MainLoop &mainloop,
return error;
}
+ for (const auto &thread_sp : m_threads) {
+ static_pointer_cast<NativeThreadNetBSD>(thread_sp)->SetStoppedBySignal(
+ SIGSTOP);
+ }
+
/* Set process stopped */
SetState(StateType::eStateStopped);
@@ -894,6 +945,11 @@ NativeThreadNetBSDSP NativeProcessNetBSD::AddThread(lldb::tid_t thread_id) {
return -1;
}
+ for (const auto &thread_sp : m_threads) {
+ static_pointer_cast<NativeThreadNetBSD>(thread_sp)->SetStoppedBySignal(
+ SIGSTOP);
+ }
+
// Let our process instance know the thread has stopped.
SetState(StateType::eStateStopped);
@@ -1007,7 +1063,6 @@ Error NativeProcessNetBSD::ReinitializeThreads() {
// Reinitialize from scratch threads and register them in process
while (info.pl_lwpid != 0) {
NativeThreadNetBSDSP thread_sp = AddThread(info.pl_lwpid);
- thread_sp->SetStoppedByExec();
error = PtraceWrapper(PT_LWPINFO, GetID(), &info, sizeof(info));
if (error.Fail()) {
return error;
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.cpp b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.cpp
index 1bb6324c97fe..cd47deac73ad 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.cpp
@@ -57,6 +57,22 @@ Error NativeRegisterContextNetBSD::WriteFPR() {
return DoWriteFPR(buf);
}
+Error NativeRegisterContextNetBSD::ReadDBR() {
+ void *buf = GetDBRBuffer();
+ if (!buf)
+ return Error("DBR buffer is NULL");
+
+ return DoReadDBR(buf);
+}
+
+Error NativeRegisterContextNetBSD::WriteDBR() {
+ void *buf = GetDBRBuffer();
+ if (!buf)
+ return Error("DBR buffer is NULL");
+
+ return DoWriteDBR(buf);
+}
+
Error NativeRegisterContextNetBSD::DoReadGPR(void *buf) {
return NativeProcessNetBSD::PtraceWrapper(PT_GETREGS, GetProcessPid(), buf,
m_thread.GetID());
@@ -77,6 +93,16 @@ Error NativeRegisterContextNetBSD::DoWriteFPR(void *buf) {
m_thread.GetID());
}
+Error NativeRegisterContextNetBSD::DoReadDBR(void *buf) {
+ return NativeProcessNetBSD::PtraceWrapper(PT_GETDBREGS, GetProcessPid(), buf,
+ m_thread.GetID());
+}
+
+Error NativeRegisterContextNetBSD::DoWriteDBR(void *buf) {
+ return NativeProcessNetBSD::PtraceWrapper(PT_SETDBREGS, GetProcessPid(), buf,
+ m_thread.GetID());
+}
+
NativeProcessNetBSD &NativeRegisterContextNetBSD::GetProcess() {
auto process_sp =
std::static_pointer_cast<NativeProcessNetBSD>(m_thread.GetProcess());
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.h b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.h
index 5ff59bc87c98..d820baac3afa 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.h
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD.h
@@ -41,6 +41,9 @@ protected:
virtual Error ReadFPR();
virtual Error WriteFPR();
+ virtual Error ReadDBR();
+ virtual Error WriteDBR();
+
virtual void *GetGPRBuffer() { return nullptr; }
virtual size_t GetGPRSize() {
return GetRegisterInfoInterface().GetGPRSize();
@@ -49,12 +52,18 @@ protected:
virtual void *GetFPRBuffer() { return nullptr; }
virtual size_t GetFPRSize() { return 0; }
+ virtual void *GetDBRBuffer() { return nullptr; }
+ virtual size_t GetDBRSize() { return 0; }
+
virtual Error DoReadGPR(void *buf);
virtual Error DoWriteGPR(void *buf);
virtual Error DoReadFPR(void *buf);
virtual Error DoWriteFPR(void *buf);
+ virtual Error DoReadDBR(void *buf);
+ virtual Error DoWriteDBR(void *buf);
+
virtual NativeProcessNetBSD &GetProcess();
virtual ::pid_t GetProcessPid();
};
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.cpp b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.cpp
index 76e64ac48d66..dc37be7b934b 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.cpp
@@ -19,7 +19,15 @@
#include "Plugins/Process/Utility/RegisterContextNetBSD_x86_64.h"
+// clang-format off
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <x86/cpu.h>
#include <elf.h>
+#include <err.h>
+#include <stdint.h>
+#include <stdlib.h>
+// clang-format on
using namespace lldb_private;
using namespace lldb_private::process_netbsd;
@@ -86,6 +94,57 @@ static const RegisterSet g_reg_sets_x86_64[k_num_register_sets] = {
#define REG_CONTEXT_SIZE (GetRegisterInfoInterface().GetGPRSize())
+const int fpu_present = []() -> int {
+ int mib[2];
+ int error;
+ size_t len;
+ int val;
+
+ len = sizeof(val);
+ mib[0] = CTL_MACHDEP;
+ mib[1] = CPU_FPU_PRESENT;
+
+ error = sysctl(mib, __arraycount(mib), &val, &len, NULL, 0);
+ if (error)
+ errx(EXIT_FAILURE, "sysctl");
+
+ return val;
+}();
+
+const int osfxsr = []() -> int {
+ int mib[2];
+ int error;
+ size_t len;
+ int val;
+
+ len = sizeof(val);
+ mib[0] = CTL_MACHDEP;
+ mib[1] = CPU_OSFXSR;
+
+ error = sysctl(mib, __arraycount(mib), &val, &len, NULL, 0);
+ if (error)
+ errx(EXIT_FAILURE, "sysctl");
+
+ return val;
+}();
+
+const int fpu_save = []() -> int {
+ int mib[2];
+ int error;
+ size_t len;
+ int val;
+
+ len = sizeof(val);
+ mib[0] = CTL_MACHDEP;
+ mib[1] = CPU_FPU_SAVE;
+
+ error = sysctl(mib, __arraycount(mib), &val, &len, NULL, 0);
+ if (error)
+ errx(EXIT_FAILURE, "sysctl");
+
+ return val;
+}();
+
} // namespace
NativeRegisterContextNetBSD *
@@ -114,7 +173,7 @@ NativeRegisterContextNetBSD_x86_64::NativeRegisterContextNetBSD_x86_64(
uint32_t concrete_frame_idx)
: NativeRegisterContextNetBSD(native_thread, concrete_frame_idx,
CreateRegisterInfoInterface(target_arch)),
- m_gpr_x86_64() {}
+ m_gpr_x86_64(), m_fpr_x86_64(), m_dbr_x86_64() {}
// CONSIDER after local and llgs debugging are merged, register set support can
// be moved into a base x86-64 class with IsRegisterSetAvailable made virtual.
@@ -143,8 +202,18 @@ NativeRegisterContextNetBSD_x86_64::GetRegisterSet(uint32_t set_index) const {
int NativeRegisterContextNetBSD_x86_64::GetSetForNativeRegNum(
int reg_num) const {
- if (reg_num < lldb_fctrl_x86_64)
+ if (reg_num <= k_last_gpr_x86_64)
return GPRegSet;
+ else if (reg_num <= k_last_fpr_x86_64)
+ return (fpu_present == 1 && osfxsr == 1 && fpu_save >= 1) ? FPRegSet : -1;
+ else if (reg_num <= k_last_avx_x86_64)
+ return -1; // AVX
+ else if (reg_num <= k_last_mpxr_x86_64)
+ return -1; // MPXR
+ else if (reg_num <= k_last_mpxc_x86_64)
+ return -1; // MPXC
+ else if (reg_num <= lldb_dr7_x86_64)
+ return DBRegSet; // DBR
else
return -1;
}
@@ -157,6 +226,9 @@ int NativeRegisterContextNetBSD_x86_64::ReadRegisterSet(uint32_t set) {
case FPRegSet:
ReadFPR();
return 0;
+ case DBRegSet:
+ ReadDBR();
+ return 0;
default:
break;
}
@@ -170,6 +242,9 @@ int NativeRegisterContextNetBSD_x86_64::WriteRegisterSet(uint32_t set) {
case FPRegSet:
WriteFPR();
return 0;
+ case DBRegSet:
+ WriteDBR();
+ return 0;
default:
break;
}
@@ -285,6 +360,87 @@ Error NativeRegisterContextNetBSD_x86_64::ReadRegister(
case lldb_es_x86_64:
reg_value = (uint64_t)m_gpr_x86_64.regs[_REG_ES];
break;
+ case lldb_fctrl_x86_64:
+ reg_value = (uint16_t)m_fpr_x86_64.fxstate.fx_cw;
+ break;
+ case lldb_fstat_x86_64:
+ reg_value = (uint16_t)m_fpr_x86_64.fxstate.fx_sw;
+ break;
+ case lldb_ftag_x86_64:
+ reg_value = (uint8_t)m_fpr_x86_64.fxstate.fx_tw;
+ break;
+ case lldb_fop_x86_64:
+ reg_value = (uint64_t)m_fpr_x86_64.fxstate.fx_opcode;
+ break;
+ case lldb_fiseg_x86_64:
+ reg_value = (uint64_t)m_fpr_x86_64.fxstate.fx_ip.fa_64;
+ break;
+ case lldb_fioff_x86_64:
+ reg_value = (uint32_t)m_fpr_x86_64.fxstate.fx_ip.fa_32.fa_off;
+ break;
+ case lldb_foseg_x86_64:
+ reg_value = (uint64_t)m_fpr_x86_64.fxstate.fx_dp.fa_64;
+ break;
+ case lldb_fooff_x86_64:
+ reg_value = (uint32_t)m_fpr_x86_64.fxstate.fx_dp.fa_32.fa_off;
+ break;
+ case lldb_mxcsr_x86_64:
+ reg_value = (uint32_t)m_fpr_x86_64.fxstate.fx_mxcsr;
+ break;
+ case lldb_mxcsrmask_x86_64:
+ reg_value = (uint32_t)m_fpr_x86_64.fxstate.fx_mxcsr_mask;
+ break;
+ case lldb_st0_x86_64:
+ case lldb_st1_x86_64:
+ case lldb_st2_x86_64:
+ case lldb_st3_x86_64:
+ case lldb_st4_x86_64:
+ case lldb_st5_x86_64:
+ case lldb_st6_x86_64:
+ case lldb_st7_x86_64:
+ reg_value.SetBytes(&m_fpr_x86_64.fxstate.fx_87_ac[reg - lldb_st0_x86_64],
+ reg_info->byte_size, endian::InlHostByteOrder());
+ break;
+ case lldb_mm0_x86_64:
+ case lldb_mm1_x86_64:
+ case lldb_mm2_x86_64:
+ case lldb_mm3_x86_64:
+ case lldb_mm4_x86_64:
+ case lldb_mm5_x86_64:
+ case lldb_mm6_x86_64:
+ case lldb_mm7_x86_64:
+ reg_value.SetBytes(&m_fpr_x86_64.fxstate.fx_xmm[reg - lldb_mm0_x86_64],
+ reg_info->byte_size, endian::InlHostByteOrder());
+ break;
+ case lldb_xmm0_x86_64:
+ case lldb_xmm1_x86_64:
+ case lldb_xmm2_x86_64:
+ case lldb_xmm3_x86_64:
+ case lldb_xmm4_x86_64:
+ case lldb_xmm5_x86_64:
+ case lldb_xmm6_x86_64:
+ case lldb_xmm7_x86_64:
+ case lldb_xmm8_x86_64:
+ case lldb_xmm9_x86_64:
+ case lldb_xmm10_x86_64:
+ case lldb_xmm11_x86_64:
+ case lldb_xmm12_x86_64:
+ case lldb_xmm13_x86_64:
+ case lldb_xmm14_x86_64:
+ case lldb_xmm15_x86_64:
+ reg_value.SetBytes(&m_fpr_x86_64.fxstate.fx_xmm[reg - lldb_xmm0_x86_64],
+ reg_info->byte_size, endian::InlHostByteOrder());
+ break;
+ case lldb_dr0_x86_64:
+ case lldb_dr1_x86_64:
+ case lldb_dr2_x86_64:
+ case lldb_dr3_x86_64:
+ case lldb_dr4_x86_64:
+ case lldb_dr5_x86_64:
+ case lldb_dr6_x86_64:
+ case lldb_dr7_x86_64:
+ reg_value = (uint64_t)m_dbr_x86_64.dr[reg - lldb_dr0_x86_64];
+ break;
}
return error;
@@ -400,6 +556,87 @@ Error NativeRegisterContextNetBSD_x86_64::WriteRegister(
case lldb_es_x86_64:
m_gpr_x86_64.regs[_REG_ES] = reg_value.GetAsUInt64();
break;
+ case lldb_fctrl_x86_64:
+ m_fpr_x86_64.fxstate.fx_cw = reg_value.GetAsUInt16();
+ break;
+ case lldb_fstat_x86_64:
+ m_fpr_x86_64.fxstate.fx_sw = reg_value.GetAsUInt16();
+ break;
+ case lldb_ftag_x86_64:
+ m_fpr_x86_64.fxstate.fx_tw = reg_value.GetAsUInt8();
+ break;
+ case lldb_fop_x86_64:
+ m_fpr_x86_64.fxstate.fx_opcode = reg_value.GetAsUInt16();
+ break;
+ case lldb_fiseg_x86_64:
+ m_fpr_x86_64.fxstate.fx_ip.fa_64 = reg_value.GetAsUInt64();
+ break;
+ case lldb_fioff_x86_64:
+ m_fpr_x86_64.fxstate.fx_ip.fa_32.fa_off = reg_value.GetAsUInt32();
+ break;
+ case lldb_foseg_x86_64:
+ m_fpr_x86_64.fxstate.fx_dp.fa_64 = reg_value.GetAsUInt64();
+ break;
+ case lldb_fooff_x86_64:
+ m_fpr_x86_64.fxstate.fx_dp.fa_32.fa_off = reg_value.GetAsUInt32();
+ break;
+ case lldb_mxcsr_x86_64:
+ m_fpr_x86_64.fxstate.fx_mxcsr = reg_value.GetAsUInt32();
+ break;
+ case lldb_mxcsrmask_x86_64:
+ m_fpr_x86_64.fxstate.fx_mxcsr_mask = reg_value.GetAsUInt32();
+ break;
+ case lldb_st0_x86_64:
+ case lldb_st1_x86_64:
+ case lldb_st2_x86_64:
+ case lldb_st3_x86_64:
+ case lldb_st4_x86_64:
+ case lldb_st5_x86_64:
+ case lldb_st6_x86_64:
+ case lldb_st7_x86_64:
+ ::memcpy(&m_fpr_x86_64.fxstate.fx_87_ac[reg - lldb_st0_x86_64],
+ reg_value.GetBytes(), reg_value.GetByteSize());
+ break;
+ case lldb_mm0_x86_64:
+ case lldb_mm1_x86_64:
+ case lldb_mm2_x86_64:
+ case lldb_mm3_x86_64:
+ case lldb_mm4_x86_64:
+ case lldb_mm5_x86_64:
+ case lldb_mm6_x86_64:
+ case lldb_mm7_x86_64:
+ ::memcpy(&m_fpr_x86_64.fxstate.fx_xmm[reg - lldb_mm0_x86_64],
+ reg_value.GetBytes(), reg_value.GetByteSize());
+ break;
+ case lldb_xmm0_x86_64:
+ case lldb_xmm1_x86_64:
+ case lldb_xmm2_x86_64:
+ case lldb_xmm3_x86_64:
+ case lldb_xmm4_x86_64:
+ case lldb_xmm5_x86_64:
+ case lldb_xmm6_x86_64:
+ case lldb_xmm7_x86_64:
+ case lldb_xmm8_x86_64:
+ case lldb_xmm9_x86_64:
+ case lldb_xmm10_x86_64:
+ case lldb_xmm11_x86_64:
+ case lldb_xmm12_x86_64:
+ case lldb_xmm13_x86_64:
+ case lldb_xmm14_x86_64:
+ case lldb_xmm15_x86_64:
+ ::memcpy(&m_fpr_x86_64.fxstate.fx_xmm[reg - lldb_xmm0_x86_64],
+ reg_value.GetBytes(), reg_value.GetByteSize());
+ break;
+ case lldb_dr0_x86_64:
+ case lldb_dr1_x86_64:
+ case lldb_dr2_x86_64:
+ case lldb_dr3_x86_64:
+ case lldb_dr4_x86_64:
+ case lldb_dr5_x86_64:
+ case lldb_dr6_x86_64:
+ case lldb_dr7_x86_64:
+ m_dbr_x86_64.dr[reg - lldb_dr0_x86_64] = reg_value.GetAsUInt64();
+ break;
}
if (WriteRegisterSet(set) != 0)
@@ -480,4 +717,223 @@ Error NativeRegisterContextNetBSD_x86_64::WriteAllRegisterValues(
return error;
}
+Error NativeRegisterContextNetBSD_x86_64::IsWatchpointHit(uint32_t wp_index,
+ bool &is_hit) {
+ if (wp_index >= NumSupportedHardwareWatchpoints())
+ return Error("Watchpoint index out of range");
+
+ RegisterValue reg_value;
+ const RegisterInfo *const reg_info = GetRegisterInfoAtIndex(lldb_dr6_x86_64);
+ Error error = ReadRegister(reg_info, reg_value);
+ if (error.Fail()) {
+ is_hit = false;
+ return error;
+ }
+
+ uint64_t status_bits = reg_value.GetAsUInt64();
+
+ is_hit = status_bits & (1 << wp_index);
+
+ return error;
+}
+
+Error NativeRegisterContextNetBSD_x86_64::GetWatchpointHitIndex(
+ uint32_t &wp_index, lldb::addr_t trap_addr) {
+ uint32_t num_hw_wps = NumSupportedHardwareWatchpoints();
+ for (wp_index = 0; wp_index < num_hw_wps; ++wp_index) {
+ bool is_hit;
+ Error error = IsWatchpointHit(wp_index, is_hit);
+ if (error.Fail()) {
+ wp_index = LLDB_INVALID_INDEX32;
+ return error;
+ } else if (is_hit) {
+ return error;
+ }
+ }
+ wp_index = LLDB_INVALID_INDEX32;
+ return Error();
+}
+
+Error NativeRegisterContextNetBSD_x86_64::IsWatchpointVacant(uint32_t wp_index,
+ bool &is_vacant) {
+ if (wp_index >= NumSupportedHardwareWatchpoints())
+ return Error("Watchpoint index out of range");
+
+ RegisterValue reg_value;
+ const RegisterInfo *const reg_info = GetRegisterInfoAtIndex(lldb_dr7_x86_64);
+ Error error = ReadRegister(reg_info, reg_value);
+ if (error.Fail()) {
+ is_vacant = false;
+ return error;
+ }
+
+ uint64_t control_bits = reg_value.GetAsUInt64();
+
+ is_vacant = !(control_bits & (1 << (2 * wp_index)));
+
+ return error;
+}
+
+Error NativeRegisterContextNetBSD_x86_64::SetHardwareWatchpointWithIndex(
+ lldb::addr_t addr, size_t size, uint32_t watch_flags, uint32_t wp_index) {
+
+ if (wp_index >= NumSupportedHardwareWatchpoints())
+ return Error("Watchpoint index out of range");
+
+ // Read only watchpoints aren't supported on x86_64. Fall back to read/write
+ // waitchpoints instead.
+ // TODO: Add logic to detect when a write happens and ignore that watchpoint
+ // hit.
+ if (watch_flags == 0x2)
+ watch_flags = 0x3;
+
+ if (watch_flags != 0x1 && watch_flags != 0x3)
+ return Error("Invalid read/write bits for watchpoint");
+
+ if (size != 1 && size != 2 && size != 4 && size != 8)
+ return Error("Invalid size for watchpoint");
+
+ bool is_vacant;
+ Error error = IsWatchpointVacant(wp_index, is_vacant);
+ if (error.Fail())
+ return error;
+ if (!is_vacant)
+ return Error("Watchpoint index not vacant");
+
+ RegisterValue reg_value;
+ const RegisterInfo *const reg_info_dr7 =
+ GetRegisterInfoAtIndex(lldb_dr7_x86_64);
+ error = ReadRegister(reg_info_dr7, reg_value);
+ if (error.Fail())
+ return error;
+
+ // for watchpoints 0, 1, 2, or 3, respectively,
+ // set bits 1, 3, 5, or 7
+ uint64_t enable_bit = 1 << (2 * wp_index);
+
+ // set bits 16-17, 20-21, 24-25, or 28-29
+ // with 0b01 for write, and 0b11 for read/write
+ uint64_t rw_bits = watch_flags << (16 + 4 * wp_index);
+
+ // set bits 18-19, 22-23, 26-27, or 30-31
+ // with 0b00, 0b01, 0b10, or 0b11
+ // for 1, 2, 8 (if supported), or 4 bytes, respectively
+ uint64_t size_bits = (size == 8 ? 0x2 : size - 1) << (18 + 4 * wp_index);
+
+ uint64_t bit_mask = (0x3 << (2 * wp_index)) | (0xF << (16 + 4 * wp_index));
+
+ uint64_t control_bits = reg_value.GetAsUInt64() & ~bit_mask;
+
+ control_bits |= enable_bit | rw_bits | size_bits;
+
+ const RegisterInfo *const reg_info_drN =
+ GetRegisterInfoAtIndex(lldb_dr0_x86_64 + wp_index);
+ error = WriteRegister(reg_info_drN, RegisterValue(addr));
+ if (error.Fail())
+ return error;
+
+ error = WriteRegister(reg_info_dr7, RegisterValue(control_bits));
+ if (error.Fail())
+ return error;
+
+ error.Clear();
+ return error;
+}
+
+bool NativeRegisterContextNetBSD_x86_64::ClearHardwareWatchpoint(
+ uint32_t wp_index) {
+ if (wp_index >= NumSupportedHardwareWatchpoints())
+ return false;
+
+ RegisterValue reg_value;
+
+ // for watchpoints 0, 1, 2, or 3, respectively,
+ // clear bits 0, 1, 2, or 3 of the debug status register (DR6)
+ const RegisterInfo *const reg_info_dr6 =
+ GetRegisterInfoAtIndex(lldb_dr6_x86_64);
+ Error error = ReadRegister(reg_info_dr6, reg_value);
+ if (error.Fail())
+ return false;
+ uint64_t bit_mask = 1 << wp_index;
+ uint64_t status_bits = reg_value.GetAsUInt64() & ~bit_mask;
+ error = WriteRegister(reg_info_dr6, RegisterValue(status_bits));
+ if (error.Fail())
+ return false;
+
+ // for watchpoints 0, 1, 2, or 3, respectively,
+ // clear bits {0-1,16-19}, {2-3,20-23}, {4-5,24-27}, or {6-7,28-31}
+ // of the debug control register (DR7)
+ const RegisterInfo *const reg_info_dr7 =
+ GetRegisterInfoAtIndex(lldb_dr7_x86_64);
+ error = ReadRegister(reg_info_dr7, reg_value);
+ if (error.Fail())
+ return false;
+ bit_mask = (0x3 << (2 * wp_index)) | (0xF << (16 + 4 * wp_index));
+ uint64_t control_bits = reg_value.GetAsUInt64() & ~bit_mask;
+ return WriteRegister(reg_info_dr7, RegisterValue(control_bits)).Success();
+}
+
+Error NativeRegisterContextNetBSD_x86_64::ClearAllHardwareWatchpoints() {
+ RegisterValue reg_value;
+
+ // clear bits {0-4} of the debug status register (DR6)
+ const RegisterInfo *const reg_info_dr6 =
+ GetRegisterInfoAtIndex(lldb_dr6_x86_64);
+ Error error = ReadRegister(reg_info_dr6, reg_value);
+ if (error.Fail())
+ return error;
+ uint64_t bit_mask = 0xF;
+ uint64_t status_bits = reg_value.GetAsUInt64() & ~bit_mask;
+ error = WriteRegister(reg_info_dr6, RegisterValue(status_bits));
+ if (error.Fail())
+ return error;
+
+ // clear bits {0-7,16-31} of the debug control register (DR7)
+ const RegisterInfo *const reg_info_dr7 =
+ GetRegisterInfoAtIndex(lldb_dr7_x86_64);
+ error = ReadRegister(reg_info_dr7, reg_value);
+ if (error.Fail())
+ return error;
+ bit_mask = 0xFF | (0xFFFF << 16);
+ uint64_t control_bits = reg_value.GetAsUInt64() & ~bit_mask;
+ return WriteRegister(reg_info_dr7, RegisterValue(control_bits));
+}
+
+uint32_t NativeRegisterContextNetBSD_x86_64::SetHardwareWatchpoint(
+ lldb::addr_t addr, size_t size, uint32_t watch_flags) {
+ Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_WATCHPOINTS));
+ const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
+ for (uint32_t wp_index = 0; wp_index < num_hw_watchpoints; ++wp_index) {
+ bool is_vacant;
+ Error error = IsWatchpointVacant(wp_index, is_vacant);
+ if (is_vacant) {
+ error = SetHardwareWatchpointWithIndex(addr, size, watch_flags, wp_index);
+ if (error.Success())
+ return wp_index;
+ }
+ if (error.Fail() && log) {
+ log->Printf("NativeRegisterContextNetBSD_x86_64::%s Error: %s",
+ __FUNCTION__, error.AsCString());
+ }
+ }
+ return LLDB_INVALID_INDEX32;
+}
+
+lldb::addr_t
+NativeRegisterContextNetBSD_x86_64::GetWatchpointAddress(uint32_t wp_index) {
+ if (wp_index >= NumSupportedHardwareWatchpoints())
+ return LLDB_INVALID_ADDRESS;
+ RegisterValue reg_value;
+ const RegisterInfo *const reg_info_drN =
+ GetRegisterInfoAtIndex(lldb_dr0_x86_64 + wp_index);
+ if (ReadRegister(reg_info_drN, reg_value).Fail())
+ return LLDB_INVALID_ADDRESS;
+ return reg_value.GetAsUInt64();
+}
+
+uint32_t NativeRegisterContextNetBSD_x86_64::NumSupportedHardwareWatchpoints() {
+ // Available debug address registers: dr0, dr1, dr2, dr3
+ return 4;
+}
+
#endif // defined(__x86_64__)
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.h b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.h
index f6f7d7f0976a..35b7cf1c2f19 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.h
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeRegisterContextNetBSD_x86_64.h
@@ -46,17 +46,40 @@ public:
Error WriteAllRegisterValues(const lldb::DataBufferSP &data_sp) override;
+ Error IsWatchpointHit(uint32_t wp_index, bool &is_hit) override;
+
+ Error GetWatchpointHitIndex(uint32_t &wp_index,
+ lldb::addr_t trap_addr) override;
+
+ Error IsWatchpointVacant(uint32_t wp_index, bool &is_vacant) override;
+
+ bool ClearHardwareWatchpoint(uint32_t wp_index) override;
+
+ Error ClearAllHardwareWatchpoints() override;
+
+ Error SetHardwareWatchpointWithIndex(lldb::addr_t addr, size_t size,
+ uint32_t watch_flags, uint32_t wp_index);
+
+ uint32_t SetHardwareWatchpoint(lldb::addr_t addr, size_t size,
+ uint32_t watch_flags) override;
+
+ lldb::addr_t GetWatchpointAddress(uint32_t wp_index) override;
+
+ uint32_t NumSupportedHardwareWatchpoints() override;
+
protected:
void *GetGPRBuffer() override { return &m_gpr_x86_64; }
void *GetFPRBuffer() override { return &m_fpr_x86_64; }
+ void *GetDBRBuffer() override { return &m_dbr_x86_64; }
private:
// Private member types.
- enum { GPRegSet, FPRegSet };
+ enum { GPRegSet, FPRegSet, DBRegSet };
// Private member variables.
struct reg m_gpr_x86_64;
struct fpreg m_fpr_x86_64;
+ struct dbreg m_dbr_x86_64;
int GetSetForNativeRegNum(int reg_num) const;
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
index f23621e45aad..9beb65288c2f 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
@@ -16,6 +16,9 @@
#include "Plugins/Process/POSIX/ProcessPOSIXLog.h"
#include "lldb/Core/RegisterValue.h"
#include "lldb/Core/State.h"
+#include "lldb/Utility/LLDBAssert.h"
+
+#include <sstream>
using namespace lldb;
using namespace lldb_private;
@@ -68,6 +71,23 @@ void NativeThreadNetBSD::SetStoppedByExec() {
m_stop_info.details.signal.signo = SIGTRAP;
}
+void NativeThreadNetBSD::SetStoppedByWatchpoint(uint32_t wp_index) {
+ SetStopped();
+
+ lldbassert(wp_index != LLDB_INVALID_INDEX32 && "wp_index cannot be invalid");
+
+ std::ostringstream ostr;
+ ostr << GetRegisterContext()->GetWatchpointAddress(wp_index) << " ";
+ ostr << wp_index;
+
+ ostr << " " << GetRegisterContext()->GetWatchpointHitAddress(wp_index);
+
+ m_stop_description = ostr.str();
+
+ m_stop_info.reason = StopReason::eStopReasonWatchpoint;
+ m_stop_info.details.signal.signo = SIGTRAP;
+}
+
void NativeThreadNetBSD::SetStopped() {
const StateType new_state = StateType::eStateStopped;
m_state = new_state;
@@ -142,18 +162,61 @@ NativeRegisterContextSP NativeThreadNetBSD::GetRegisterContext() {
Error NativeThreadNetBSD::SetWatchpoint(lldb::addr_t addr, size_t size,
uint32_t watch_flags, bool hardware) {
- return Error("Unimplemented");
+ if (!hardware)
+ return Error("not implemented");
+ if (m_state == eStateLaunching)
+ return Error();
+ Error error = RemoveWatchpoint(addr);
+ if (error.Fail())
+ return error;
+ NativeRegisterContextSP reg_ctx = GetRegisterContext();
+ uint32_t wp_index = reg_ctx->SetHardwareWatchpoint(addr, size, watch_flags);
+ if (wp_index == LLDB_INVALID_INDEX32)
+ return Error("Setting hardware watchpoint failed.");
+ m_watchpoint_index_map.insert({addr, wp_index});
+ return Error();
}
Error NativeThreadNetBSD::RemoveWatchpoint(lldb::addr_t addr) {
- return Error("Unimplemented");
+ auto wp = m_watchpoint_index_map.find(addr);
+ if (wp == m_watchpoint_index_map.end())
+ return Error();
+ uint32_t wp_index = wp->second;
+ m_watchpoint_index_map.erase(wp);
+ if (GetRegisterContext()->ClearHardwareWatchpoint(wp_index))
+ return Error();
+ return Error("Clearing hardware watchpoint failed.");
}
Error NativeThreadNetBSD::SetHardwareBreakpoint(lldb::addr_t addr,
size_t size) {
- return Error("Unimplemented");
+ if (m_state == eStateLaunching)
+ return Error();
+
+ Error error = RemoveHardwareBreakpoint(addr);
+ if (error.Fail())
+ return error;
+
+ NativeRegisterContextSP reg_ctx = GetRegisterContext();
+ uint32_t bp_index = reg_ctx->SetHardwareBreakpoint(addr, size);
+
+ if (bp_index == LLDB_INVALID_INDEX32)
+ return Error("Setting hardware breakpoint failed.");
+
+ m_hw_break_index_map.insert({addr, bp_index});
+ return Error();
}
Error NativeThreadNetBSD::RemoveHardwareBreakpoint(lldb::addr_t addr) {
- return Error("Unimplemented");
+ auto bp = m_hw_break_index_map.find(addr);
+ if (bp == m_hw_break_index_map.end())
+ return Error();
+
+ uint32_t bp_index = bp->second;
+ if (GetRegisterContext()->ClearHardwareBreakpoint(bp_index)) {
+ m_hw_break_index_map.erase(bp);
+ return Error();
+ }
+
+ return Error("Clearing hardware breakpoint failed.");
}
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.h b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.h
index 85fff5d5653f..96d7fd0ce03b 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.h
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.h
@@ -12,6 +12,9 @@
#include "lldb/Host/common/NativeThreadProtocol.h"
+#include <map>
+#include <string>
+
namespace lldb_private {
namespace process_netbsd {
@@ -53,6 +56,7 @@ private:
void SetStoppedByBreakpoint();
void SetStoppedByTrace();
void SetStoppedByExec();
+ void SetStoppedByWatchpoint(uint32_t wp_index);
void SetStopped();
void SetRunning();
void SetStepping();
@@ -64,6 +68,9 @@ private:
ThreadStopInfo m_stop_info;
NativeRegisterContextSP m_reg_context_sp;
std::string m_stop_description;
+ using WatchpointIndexMap = std::map<lldb::addr_t, uint32_t>;
+ WatchpointIndexMap m_watchpoint_index_map;
+ WatchpointIndexMap m_hw_break_index_map;
};
typedef std::shared_ptr<NativeThreadNetBSD> NativeThreadNetBSDSP;
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterContextPOSIX_mips64.cpp b/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterContextPOSIX_mips64.cpp
index 207c69313282..6a55947ba5c2 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterContextPOSIX_mips64.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterContextPOSIX_mips64.cpp
@@ -55,9 +55,10 @@ RegisterContextPOSIX_mips64::RegisterContextPOSIX_mips64(
m_registers_count[i] = reg_set_ptr->num_registers;
}
- assert(m_num_registers == m_registers_count[gpr_registers_count] +
- m_registers_count[fpr_registers_count] +
- m_registers_count[msa_registers_count]);
+ assert(m_num_registers ==
+ static_cast<uint32_t>(m_registers_count[gpr_registers_count] +
+ m_registers_count[fpr_registers_count] +
+ m_registers_count[msa_registers_count]));
// elf-core yet to support ReadFPR()
ProcessSP base = CalculateProcess();
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterInfos_x86_64.h b/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterInfos_x86_64.h
index 2ba8059911a0..8861ecd66806 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterInfos_x86_64.h
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/Utility/RegisterInfos_x86_64.h
@@ -148,7 +148,7 @@
DR_OFFSET(i), eEncodingUint, eFormatHex, \
{LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, \
LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, \
- LLDB_INVALID_REGNUM }, \
+ lldb_##reg##i##_x86_64 }, \
nullptr, nullptr, nullptr, 0 \
}
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp b/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp
index 7ef253decad6..d527b4daaab9 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.cpp
@@ -1310,12 +1310,20 @@ void GDBRemoteCommunication::DumpHistory(Stream &strm) { m_history.Dump(strm); }
GDBRemoteCommunication::ScopedTimeout::ScopedTimeout(
GDBRemoteCommunication &gdb_comm, std::chrono::seconds timeout)
- : m_gdb_comm(gdb_comm) {
- m_saved_timeout = m_gdb_comm.SetPacketTimeout(timeout);
+ : m_gdb_comm(gdb_comm), m_timeout_modified(false) {
+ auto curr_timeout = gdb_comm.GetPacketTimeout();
+ // Only update the timeout if the timeout is greater than the current
+ // timeout. If the current timeout is larger, then just use that.
+ if (curr_timeout < timeout) {
+ m_timeout_modified = true;
+ m_saved_timeout = m_gdb_comm.SetPacketTimeout(timeout);
+ }
}
GDBRemoteCommunication::ScopedTimeout::~ScopedTimeout() {
- m_gdb_comm.SetPacketTimeout(m_saved_timeout);
+ // Only restore the timeout if we set it in the constructor.
+ if (m_timeout_modified)
+ m_gdb_comm.SetPacketTimeout(m_saved_timeout);
}
// This function is called via the Communications class read thread when bytes
diff --git a/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.h b/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.h
index 1f3fa17cfc26..b49e05e22d95 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.h
+++ b/contrib/llvm/tools/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunication.h
@@ -89,6 +89,10 @@ public:
private:
GDBRemoteCommunication &m_gdb_comm;
std::chrono::seconds m_saved_timeout;
+ // Don't ever reduce the timeout for a packet, only increase it. If the
+ // requested timeout if less than the current timeout, we don't set it
+ // and won't need to restore it.
+ bool m_timeout_modified;
};
GDBRemoteCommunication(const char *comm_name, const char *listener_name);
diff --git a/contrib/llvm/tools/lldb/source/Symbol/ClangASTContext.cpp b/contrib/llvm/tools/lldb/source/Symbol/ClangASTContext.cpp
index 2f4c0ac75b26..d4c303111343 100644
--- a/contrib/llvm/tools/lldb/source/Symbol/ClangASTContext.cpp
+++ b/contrib/llvm/tools/lldb/source/Symbol/ClangASTContext.cpp
@@ -4484,7 +4484,8 @@ ClangASTContext::GetNumMemberFunctions(lldb::opaque_compiler_type_t type) {
const clang::ObjCInterfaceType *objc_interface_type =
objc_class_type->getInterfaceType();
if (objc_interface_type &&
- GetCompleteType((lldb::opaque_compiler_type_t)objc_interface_type)) {
+ GetCompleteType(static_cast<lldb::opaque_compiler_type_t>(
+ const_cast<clang::ObjCInterfaceType *>(objc_interface_type)))) {
clang::ObjCInterfaceDecl *class_interface_decl =
objc_interface_type->getDecl();
if (class_interface_decl) {
@@ -4592,7 +4593,8 @@ ClangASTContext::GetMemberFunctionAtIndex(lldb::opaque_compiler_type_t type,
const clang::ObjCInterfaceType *objc_interface_type =
objc_class_type->getInterfaceType();
if (objc_interface_type &&
- GetCompleteType((lldb::opaque_compiler_type_t)objc_interface_type)) {
+ GetCompleteType(static_cast<lldb::opaque_compiler_type_t>(
+ const_cast<clang::ObjCInterfaceType *>(objc_interface_type)))) {
clang::ObjCInterfaceDecl *class_interface_decl =
objc_interface_type->getDecl();
if (class_interface_decl) {
@@ -5660,7 +5662,8 @@ uint32_t ClangASTContext::GetNumFields(lldb::opaque_compiler_type_t type) {
const clang::ObjCInterfaceType *objc_interface_type =
objc_class_type->getInterfaceType();
if (objc_interface_type &&
- GetCompleteType((lldb::opaque_compiler_type_t)objc_interface_type)) {
+ GetCompleteType(static_cast<lldb::opaque_compiler_type_t>(
+ const_cast<clang::ObjCInterfaceType *>(objc_interface_type)))) {
clang::ObjCInterfaceDecl *class_interface_decl =
objc_interface_type->getDecl();
if (class_interface_decl) {
@@ -5807,7 +5810,8 @@ CompilerType ClangASTContext::GetFieldAtIndex(lldb::opaque_compiler_type_t type,
const clang::ObjCInterfaceType *objc_interface_type =
objc_class_type->getInterfaceType();
if (objc_interface_type &&
- GetCompleteType((lldb::opaque_compiler_type_t)objc_interface_type)) {
+ GetCompleteType(static_cast<lldb::opaque_compiler_type_t>(
+ const_cast<clang::ObjCInterfaceType *>(objc_interface_type)))) {
clang::ObjCInterfaceDecl *class_interface_decl =
objc_interface_type->getDecl();
if (class_interface_decl) {
diff --git a/contrib/llvm/tools/lldb/source/Utility/StringLexer.cpp b/contrib/llvm/tools/lldb/source/Utility/StringLexer.cpp
index 77484d6e43fb..d5c7fc628988 100644
--- a/contrib/llvm/tools/lldb/source/Utility/StringLexer.cpp
+++ b/contrib/llvm/tools/lldb/source/Utility/StringLexer.cpp
@@ -73,10 +73,6 @@ void StringLexer::PutBack(Size s) {
m_position -= s;
}
-bool StringLexer::HasAny(Character c) {
- return m_data.find(c, m_position) != std::string::npos;
-}
-
std::string StringLexer::GetUnlexed() {
return std::string(m_data, m_position);
}
diff --git a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
index abc6fa27a0e0..676134ca2368 100644
--- a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
+++ b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
@@ -122,6 +122,7 @@ static const char *GetBlockName(unsigned BlockID,
case bitc::GLOBALVAL_SUMMARY_BLOCK_ID:
return "GLOBALVAL_SUMMARY_BLOCK";
case bitc::MODULE_STRTAB_BLOCK_ID: return "MODULE_STRTAB_BLOCK";
+ case bitc::STRTAB_BLOCK_ID: return "STRTAB_BLOCK";
}
}
@@ -315,6 +316,7 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
STRINGIFY_CODE(FS, TYPE_CHECKED_LOAD_VCALLS)
STRINGIFY_CODE(FS, TYPE_TEST_ASSUME_CONST_VCALL)
STRINGIFY_CODE(FS, TYPE_CHECKED_LOAD_CONST_VCALL)
+ STRINGIFY_CODE(FS, VALUE_GUID)
}
case bitc::METADATA_ATTACHMENT_ID:
switch(CodeID) {
@@ -381,6 +383,11 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
default: return nullptr;
case bitc::OPERAND_BUNDLE_TAG: return "OPERAND_BUNDLE_TAG";
}
+ case bitc::STRTAB_BLOCK_ID:
+ switch(CodeID) {
+ default: return nullptr;
+ case bitc::STRTAB_BLOB: return "BLOB";
+ }
}
#undef STRINGIFY_CODE
}
diff --git a/contrib/llvm/tools/llvm-modextract/llvm-modextract.cpp b/contrib/llvm/tools/llvm-modextract/llvm-modextract.cpp
index 6c2e364be448..58cede1374ea 100644
--- a/contrib/llvm/tools/llvm-modextract/llvm-modextract.cpp
+++ b/contrib/llvm/tools/llvm-modextract/llvm-modextract.cpp
@@ -59,9 +59,12 @@ int main(int argc, char **argv) {
ExitOnErr(errorCodeToError(EC));
if (BinaryExtract) {
- SmallVector<char, 0> Header;
- BitcodeWriter Writer(Header);
- Out->os() << Header << Ms[ModuleIndex].getBuffer();
+ SmallVector<char, 0> Result;
+ BitcodeWriter Writer(Result);
+ Result.append(Ms[ModuleIndex].getBuffer().begin(),
+ Ms[ModuleIndex].getBuffer().end());
+ Writer.copyStrtab(Ms[ModuleIndex].getStrtab());
+ Out->os() << Result;
Out->keep();
return 0;
}
diff --git a/contrib/llvm/tools/llvm-xray/xray-extract.cc b/contrib/llvm/tools/llvm-xray/xray-extract.cc
index 26e461869a08..d7015a05b0f2 100644
--- a/contrib/llvm/tools/llvm-xray/xray-extract.cc
+++ b/contrib/llvm/tools/llvm-xray/xray-extract.cc
@@ -16,6 +16,7 @@
#include <type_traits>
#include <utility>
+#include "func-id-helper.h"
#include "xray-registry.h"
#include "llvm/Object/ELF.h"
#include "llvm/Object/ObjectFile.h"
@@ -45,10 +46,18 @@ static cl::opt<std::string>
static cl::alias ExtractOutput2("o", cl::aliasopt(ExtractOutput),
cl::desc("Alias for -output"),
cl::sub(Extract));
+static cl::opt<bool> ExtractSymbolize("symbolize", cl::value_desc("symbolize"),
+ cl::init(false),
+ cl::desc("symbolize functions"),
+ cl::sub(Extract));
+static cl::alias ExtractSymbolize2("s", cl::aliasopt(ExtractSymbolize),
+ cl::desc("alias for -symbolize"),
+ cl::sub(Extract));
namespace {
-void exportAsYAML(const InstrumentationMap &Map, raw_ostream &OS) {
+void exportAsYAML(const InstrumentationMap &Map, raw_ostream &OS,
+ FuncIdConversionHelper &FH) {
// First we translate the sleds into the YAMLXRaySledEntry objects in a deque.
std::vector<YAMLXRaySledEntry> YAMLSleds;
auto Sleds = Map.sleds();
@@ -58,7 +67,8 @@ void exportAsYAML(const InstrumentationMap &Map, raw_ostream &OS) {
if (!FuncId)
return;
YAMLSleds.push_back({*FuncId, Sled.Address, Sled.Function, Sled.Kind,
- Sled.AlwaysInstrument});
+ Sled.AlwaysInstrument,
+ ExtractSymbolize ? FH.SymbolOrNumber(*FuncId) : ""});
}
Output Out(OS, nullptr, 0);
Out << YAMLSleds;
@@ -80,6 +90,13 @@ static CommandRegistration Unused(&Extract, []() -> Error {
if (EC)
return make_error<StringError>(
Twine("Cannot open file '") + ExtractOutput + "' for writing.", EC);
- exportAsYAML(*InstrumentationMapOrError, OS);
+ const auto &FunctionAddresses =
+ InstrumentationMapOrError->getFunctionAddresses();
+ symbolize::LLVMSymbolizer::Options Opts(
+ symbolize::FunctionNameKind::LinkageName, true, true, false, "");
+ symbolize::LLVMSymbolizer Symbolizer(Opts);
+ llvm::xray::FuncIdConversionHelper FuncIdHelper(ExtractInput, Symbolizer,
+ FunctionAddresses);
+ exportAsYAML(*InstrumentationMapOrError, OS, FuncIdHelper);
return Error::success();
});
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index d93511b0d873..03c58ac09c2d 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -126,6 +126,45 @@ StringRef llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::v2f64: return "MVT::v2f64";
case MVT::v4f64: return "MVT::v4f64";
case MVT::v8f64: return "MVT::v8f64";
+ case MVT::nxv2i1: return "MVT::nxv2i1";
+ case MVT::nxv4i1: return "MVT::nxv4i1";
+ case MVT::nxv8i1: return "MVT::nxv8i1";
+ case MVT::nxv16i1: return "MVT::nxv16i1";
+ case MVT::nxv32i1: return "MVT::nxv32i1";
+ case MVT::nxv1i8: return "MVT::nxv1i8";
+ case MVT::nxv2i8: return "MVT::nxv2i8";
+ case MVT::nxv4i8: return "MVT::nxv4i8";
+ case MVT::nxv8i8: return "MVT::nxv8i8";
+ case MVT::nxv16i8: return "MVT::nxv16i8";
+ case MVT::nxv32i8: return "MVT::nxv32i8";
+ case MVT::nxv1i16: return "MVT::nxv1i16";
+ case MVT::nxv2i16: return "MVT::nxv2i16";
+ case MVT::nxv4i16: return "MVT::nxv4i16";
+ case MVT::nxv8i16: return "MVT::nxv8i16";
+ case MVT::nxv16i16: return "MVT::nxv16i16";
+ case MVT::nxv32i16: return "MVT::nxv32i16";
+ case MVT::nxv1i32: return "MVT::nxv1i32";
+ case MVT::nxv2i32: return "MVT::nxv2i32";
+ case MVT::nxv4i32: return "MVT::nxv4i32";
+ case MVT::nxv8i32: return "MVT::nxv8i32";
+ case MVT::nxv16i32: return "MVT::nxv16i32";
+ case MVT::nxv1i64: return "MVT::nxv1i64";
+ case MVT::nxv2i64: return "MVT::nxv2i64";
+ case MVT::nxv4i64: return "MVT::nxv4i64";
+ case MVT::nxv8i64: return "MVT::nxv8i64";
+ case MVT::nxv16i64: return "MVT::nxv16i64";
+ case MVT::nxv2f16: return "MVT::nxv2f16";
+ case MVT::nxv4f16: return "MVT::nxv4f16";
+ case MVT::nxv8f16: return "MVT::nxv8f16";
+ case MVT::nxv1f32: return "MVT::nxv1f32";
+ case MVT::nxv2f32: return "MVT::nxv2f32";
+ case MVT::nxv4f32: return "MVT::nxv4f32";
+ case MVT::nxv8f32: return "MVT::nxv8f32";
+ case MVT::nxv16f32: return "MVT::nxv16f32";
+ case MVT::nxv1f64: return "MVT::nxv1f64";
+ case MVT::nxv2f64: return "MVT::nxv2f64";
+ case MVT::nxv4f64: return "MVT::nxv4f64";
+ case MVT::nxv8f64: return "MVT::nxv8f64";
case MVT::token: return "MVT::token";
case MVT::Metadata: return "MVT::Metadata";
case MVT::iPTR: return "MVT::iPTR";
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
index e9dd2fa0aca0..e979b94e46d6 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -84,14 +84,11 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
// Emit the intrinsic parameter attributes.
EmitAttributes(Ints, OS);
- // Individual targets don't need GCC builtin name mappings.
- if (!TargetOnly) {
- // Emit code to translate GCC builtins into LLVM intrinsics.
- EmitIntrinsicToBuiltinMap(Ints, true, OS);
+ // Emit code to translate GCC builtins into LLVM intrinsics.
+ EmitIntrinsicToBuiltinMap(Ints, true, OS);
- // Emit code to translate MS builtins into LLVM intrinsics.
- EmitIntrinsicToBuiltinMap(Ints, false, OS);
- }
+ // Emit code to translate MS builtins into LLVM intrinsics.
+ EmitIntrinsicToBuiltinMap(Ints, false, OS);
EmitSuffix(OS);
}
@@ -756,6 +753,17 @@ void IntrinsicEmitter::EmitIntrinsicToBuiltinMap(
<< "Builtin(const char "
<< "*TargetPrefixStr, StringRef BuiltinNameStr) {\n";
}
+
+ if (Table.Empty()) {
+ OS << " return ";
+ if (!TargetPrefix.empty())
+ OS << "(" << TargetPrefix << "Intrinsic::ID)";
+ OS << "Intrinsic::not_intrinsic;\n";
+ OS << "}\n";
+ OS << "#endif\n\n";
+ return;
+ }
+
OS << " static const char BuiltinNames[] = {\n";
Table.EmitCharArray(OS);
OS << " };\n\n";
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
index cf7c05296744..c443cf06859e 100644
--- a/lib/clang/include/clang/Basic/Version.inc
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
-#define SVN_REVISION "300422"
+#define SVN_REVISION "300890"
diff --git a/lib/clang/include/lld/Config/Version.inc b/lib/clang/include/lld/Config/Version.inc
index 795d8cb0da43..cb6eb9e6bcc5 100644
--- a/lib/clang/include/lld/Config/Version.inc
+++ b/lib/clang/include/lld/Config/Version.inc
@@ -4,5 +4,5 @@
#define LLD_VERSION_STRING "5.0.0"
#define LLD_VERSION_MAJOR 5
#define LLD_VERSION_MINOR 0
-#define LLD_REVISION_STRING "300422"
+#define LLD_REVISION_STRING "300890"
#define LLD_REPOSITORY_STRING "FreeBSD"
diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h
index c36904c385b6..56f0acc3122f 100644
--- a/lib/clang/include/llvm/Support/VCSRevision.h
+++ b/lib/clang/include/llvm/Support/VCSRevision.h
@@ -1,2 +1,2 @@
/* $FreeBSD$ */
-#define LLVM_REVISION "svn-r300422"
+#define LLVM_REVISION "svn-r300890"