summaryrefslogtreecommitdiff
path: root/include/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm')
-rw-r--r--include/llvm/ADT/Triple.h1
-rw-r--r--include/llvm/Analysis/InstructionSimplify.h269
-rw-r--r--include/llvm/Analysis/LoopPass.h5
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h6
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h7
-rw-r--r--include/llvm/Analysis/TargetTransformInfoImpl.h2
-rw-r--r--include/llvm/Analysis/ValueTracking.h3
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h6
-rw-r--r--include/llvm/CodeGen/AtomicExpandUtils.h26
-rw-r--r--include/llvm/CodeGen/DIE.h86
-rw-r--r--include/llvm/CodeGen/FaultMaps.h35
-rw-r--r--include/llvm/CodeGen/GlobalISel/Localizer.h78
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h8
-rw-r--r--include/llvm/CodeGen/LiveInterval.h86
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h43
-rw-r--r--include/llvm/CodeGen/LiveIntervalUnion.h14
-rw-r--r--include/llvm/CodeGen/LivePhysRegs.h116
-rw-r--r--include/llvm/CodeGen/LiveRangeEdit.h80
-rw-r--r--include/llvm/CodeGen/LiveStackAnalysis.h23
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h121
-rw-r--r--include/llvm/CodeGen/MachineBlockFrequencyInfo.h17
-rw-r--r--include/llvm/CodeGen/MachineDominanceFrontier.h23
-rw-r--r--include/llvm/CodeGen/MachineDominators.h18
-rw-r--r--include/llvm/CodeGen/MachineInstr.h35
-rw-r--r--include/llvm/CodeGen/MachineRegisterInfo.h5
-rw-r--r--include/llvm/CodeGen/MachineValueType.h2
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h6
-rw-r--r--include/llvm/CodeGen/ScheduleDAGInstrs.h5
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h5
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h26
-rw-r--r--include/llvm/DebugInfo/CodeView/CVRecord.h8
-rw-r--r--include/llvm/DebugInfo/CodeView/CVTypeVisitor.h1
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeDeserializer.h11
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h33
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeRecord.h11
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeSerializer.h37
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeStreamMerger.h69
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeTableBuilder.h31
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeTableCollection.h4
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFContext.h18
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h3
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDie.h3
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFFormValue.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFRelocMap.h3
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFUnit.h12
-rw-r--r--include/llvm/DebugInfo/MSF/MappedBlockStream.h16
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h1
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBTypeServerHandler.h5
-rw-r--r--include/llvm/DebugInfo/PDB/Native/TpiStream.h7
-rw-r--r--include/llvm/IR/Attributes.h46
-rw-r--r--include/llvm/IR/BasicBlock.h45
-rw-r--r--include/llvm/IR/IntrinsicInst.h13
-rw-r--r--include/llvm/IR/Intrinsics.td58
-rw-r--r--include/llvm/IR/IntrinsicsAMDGPU.td10
-rw-r--r--include/llvm/IR/Metadata.h1
-rw-r--r--include/llvm/IR/Module.h5
-rw-r--r--include/llvm/InitializePasses.h2
-rw-r--r--include/llvm/LTO/Config.h2
-rw-r--r--include/llvm/Object/Binary.h4
-rw-r--r--include/llvm/Object/COFF.h1
-rw-r--r--include/llvm/Object/ELFObjectFile.h12
-rw-r--r--include/llvm/Object/MachO.h1
-rw-r--r--include/llvm/Object/ObjectFile.h6
-rw-r--r--include/llvm/Object/RelocVisitor.h513
-rw-r--r--include/llvm/Object/Wasm.h1
-rw-r--r--include/llvm/Option/OptTable.h8
-rw-r--r--include/llvm/ProfileData/InstrProf.h6
-rw-r--r--include/llvm/TableGen/Record.h48
-rw-r--r--include/llvm/Target/TargetLowering.h6
-rw-r--r--include/llvm/Transforms/Scalar.h7
-rw-r--r--include/llvm/Transforms/Scalar/GVN.h34
-rw-r--r--include/llvm/Transforms/Utils/Local.h12
72 files changed, 1344 insertions, 929 deletions
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index 3a4a37017d61..07626982d289 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -59,6 +59,7 @@ public:
mips64, // MIPS64: mips64
mips64el, // MIPS64EL: mips64el
msp430, // MSP430: msp430
+ nios2, // NIOSII: nios2
ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu
ppc64le, // PPC64LE: powerpc64le
diff --git a/include/llvm/Analysis/InstructionSimplify.h b/include/llvm/Analysis/InstructionSimplify.h
index bf73e099a2bf..ca48b5483512 100644
--- a/include/llvm/Analysis/InstructionSimplify.h
+++ b/include/llvm/Analysis/InstructionSimplify.h
@@ -70,174 +70,173 @@ struct SimplifyQuery {
Copy.CxtI = I;
return Copy;
}
- };
+};
- // NOTE: the explicit multiple argument versions of these functions are
- // deprecated.
- // Please use the SimplifyQuery versions in new code.
+// NOTE: the explicit multiple argument versions of these functions are
+// deprecated.
+// Please use the SimplifyQuery versions in new code.
- /// Given operands for an Add, fold the result or return null.
- Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+/// Given operands for an Add, fold the result or return null.
+Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
- /// Given operands for a Sub, fold the result or return null.
- Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const SimplifyQuery &Q);
+/// Given operands for a Sub, fold the result or return null.
+Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+ const SimplifyQuery &Q);
- /// Given operands for an FAdd, fold the result or return null.
- Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const SimplifyQuery &Q);
+/// Given operands for an FAdd, fold the result or return null.
+Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const SimplifyQuery &Q);
- /// Given operands for an FSub, fold the result or return null.
- Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const SimplifyQuery &Q);
+/// Given operands for an FSub, fold the result or return null.
+Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const SimplifyQuery &Q);
- /// Given operands for an FMul, fold the result or return null.
- Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const SimplifyQuery &Q);
+/// Given operands for an FMul, fold the result or return null.
+Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const SimplifyQuery &Q);
- /// Given operands for a Mul, fold the result or return null.
- Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for a Mul, fold the result or return null.
+Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for an SDiv, fold the result or return null.
- Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for an SDiv, fold the result or return null.
+Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for a UDiv, fold the result or return null.
- Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for a UDiv, fold the result or return null.
+Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for an FDiv, fold the result or return null.
- Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const SimplifyQuery &Q);
+/// Given operands for an FDiv, fold the result or return null.
+Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const SimplifyQuery &Q);
- /// Given operands for an SRem, fold the result or return null.
- Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for an SRem, fold the result or return null.
+Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for a URem, fold the result or return null.
- Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for a URem, fold the result or return null.
+Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for an FRem, fold the result or return null.
- Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const SimplifyQuery &Q);
-
- /// Given operands for a Shl, fold the result or return null.
- Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const SimplifyQuery &Q);
-
- /// Given operands for a LShr, fold the result or return null.
- Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const SimplifyQuery &Q);
+/// Given operands for an FRem, fold the result or return null.
+Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const SimplifyQuery &Q);
- /// Given operands for a AShr, fold the result or return nulll.
- Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const SimplifyQuery &Q);
+/// Given operands for a Shl, fold the result or return null.
+Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const SimplifyQuery &Q);
- /// Given operands for an And, fold the result or return null.
- Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for a LShr, fold the result or return null.
+Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
+ const SimplifyQuery &Q);
- /// Given operands for an Or, fold the result or return null.
- Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for a AShr, fold the result or return nulll.
+Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
+ const SimplifyQuery &Q);
- /// Given operands for an Xor, fold the result or return null.
- Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+/// Given operands for an And, fold the result or return null.
+Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for an ICmpInst, fold the result or return null.
- Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const SimplifyQuery &Q);
+/// Given operands for an Or, fold the result or return null.
+Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for an FCmpInst, fold the result or return null.
- Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- FastMathFlags FMF, const SimplifyQuery &Q);
+/// Given operands for an Xor, fold the result or return null.
+Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- /// Given operands for a SelectInst, fold the result or return null.
- Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const SimplifyQuery &Q);
+/// Given operands for an ICmpInst, fold the result or return null.
+Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const SimplifyQuery &Q);
- /// Given operands for a GetElementPtrInst, fold the result or return null.
- Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
- const SimplifyQuery &Q);
+/// Given operands for an FCmpInst, fold the result or return null.
+Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ FastMathFlags FMF, const SimplifyQuery &Q);
- /// Given operands for an InsertValueInst, fold the result or return null.
- Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
- ArrayRef<unsigned> Idxs,
- const SimplifyQuery &Q);
+/// Given operands for a SelectInst, fold the result or return null.
+Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
+ const SimplifyQuery &Q);
- /// Given operands for an ExtractValueInst, fold the result or return null.
- Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
- const SimplifyQuery &Q);
+/// Given operands for a GetElementPtrInst, fold the result or return null.
+Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
+ const SimplifyQuery &Q);
- /// Given operands for an ExtractElementInst, fold the result or return null.
- Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
- const SimplifyQuery &Q);
+/// Given operands for an InsertValueInst, fold the result or return null.
+Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+ const SimplifyQuery &Q);
- /// Given operands for a CastInst, fold the result or return null.
- Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
- const SimplifyQuery &Q);
+/// Given operands for an ExtractValueInst, fold the result or return null.
+Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
+ const SimplifyQuery &Q);
- /// Given operands for a ShuffleVectorInst, fold the result or return null.
- Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
- Type *RetTy, const SimplifyQuery &Q);
+/// Given operands for an ExtractElementInst, fold the result or return null.
+Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
+ const SimplifyQuery &Q);
- //=== Helper functions for higher up the class hierarchy.
+/// Given operands for a CastInst, fold the result or return null.
+Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
+ const SimplifyQuery &Q);
+/// Given operands for a ShuffleVectorInst, fold the result or return null.
+Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
+ Type *RetTy, const SimplifyQuery &Q);
- /// Given operands for a CmpInst, fold the result or return null.
- Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const SimplifyQuery &Q);
+//=== Helper functions for higher up the class hierarchy.
- /// Given operands for a BinaryOperator, fold the result or return null.
- Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+/// Given operands for a CmpInst, fold the result or return null.
+Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
- /// Given operands for an FP BinaryOperator, fold the result or return null.
- /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
- /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
- Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- FastMathFlags FMF, const SimplifyQuery &Q);
-
- /// Given a function and iterators over arguments, fold the result or return
- /// null.
- Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const SimplifyQuery &Q);
-
- /// Given a function and set of arguments, fold the result or return null.
- Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
-
- /// See if we can compute a simplified version of this instruction. If not,
- /// return null.
- Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
- OptimizationRemarkEmitter *ORE = nullptr);
-
- /// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
- ///
- /// This first performs a normal RAUW of I with SimpleV. It then recursively
- /// attempts to simplify those users updated by the operation. The 'I'
- /// instruction must not be equal to the simplified value 'SimpleV'.
- ///
- /// The function returns true if any simplifications were performed.
- bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr);
-
- /// Recursively attempt to simplify an instruction.
- ///
- /// This routine uses SimplifyInstruction to simplify 'I', and if successful
- /// replaces uses of 'I' with the simplified value. It then recurses on each
- /// of the users impacted. It returns true if any simplifications were
- /// performed.
- bool recursivelySimplifyInstruction(Instruction *I,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr);
- // These helper functions return a SimplifyQuery structure that contains as
- // many of the optional analysis we use as are currently valid. This is the
- // strongly preferred way of constructing SimplifyQuery in passes.
- const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
- template <class T, class... TArgs>
- const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
- Function &);
- const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
- const DataLayout &);
+/// Given operands for a BinaryOperator, fold the result or return null.
+Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const SimplifyQuery &Q);
+
+/// Given operands for an FP BinaryOperator, fold the result or return null.
+/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
+/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
+Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ FastMathFlags FMF, const SimplifyQuery &Q);
+
+/// Given a function and iterators over arguments, fold the result or return
+/// null.
+Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
+ User::op_iterator ArgEnd, const SimplifyQuery &Q);
+
+/// Given a function and set of arguments, fold the result or return null.
+Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
+
+/// See if we can compute a simplified version of this instruction. If not,
+/// return null.
+Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
+ OptimizationRemarkEmitter *ORE = nullptr);
+
+/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
+///
+/// This first performs a normal RAUW of I with SimpleV. It then recursively
+/// attempts to simplify those users updated by the operation. The 'I'
+/// instruction must not be equal to the simplified value 'SimpleV'.
+///
+/// The function returns true if any simplifications were performed.
+bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr);
+
+/// Recursively attempt to simplify an instruction.
+///
+/// This routine uses SimplifyInstruction to simplify 'I', and if successful
+/// replaces uses of 'I' with the simplified value. It then recurses on each
+/// of the users impacted. It returns true if any simplifications were
+/// performed.
+bool recursivelySimplifyInstruction(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr);
+
+// These helper functions return a SimplifyQuery structure that contains as
+// many of the optional analysis we use as are currently valid. This is the
+// strongly preferred way of constructing SimplifyQuery in passes.
+const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
+template <class T, class... TArgs>
+const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
+ Function &);
+const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
+ const DataLayout &);
} // end namespace llvm
#endif
diff --git a/include/llvm/Analysis/LoopPass.h b/include/llvm/Analysis/LoopPass.h
index 496ae189e57b..75e7688bbdc2 100644
--- a/include/llvm/Analysis/LoopPass.h
+++ b/include/llvm/Analysis/LoopPass.h
@@ -126,9 +126,8 @@ public:
}
public:
- // Add a new loop into the loop queue as a child of the given parent, or at
- // the top level if \c ParentLoop is null.
- Loop &addLoop(Loop *ParentLoop);
+ // Add a new loop into the loop queue.
+ void addLoop(Loop &L);
//===--------------------------------------------------------------------===//
/// SimpleAnalysis - Provides simple interface to update analysis info
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index ac54bd4cfffb..4a6fc245c225 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -1533,6 +1533,12 @@ public:
/// specified loop.
bool isLoopInvariant(const SCEV *S, const Loop *L);
+ /// Determine if the SCEV can be evaluated at loop's entry. It is true if it
+ /// doesn't depend on a SCEVUnknown of an instruction which is dominated by
+ /// the header of loop L.
+ bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L, DominatorTree &DT,
+ LoopInfo &LI);
+
/// Return true if the given SCEV changes value in a known way in the
/// specified loop. This property being true implies that the value is
/// variant in the loop AND that we can emit an expression to compute the
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 0a0af384c3e6..6cbe3a1f515e 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -396,6 +396,9 @@ public:
bool isLegalMaskedScatter(Type *DataType) const;
bool isLegalMaskedGather(Type *DataType) const;
+ /// Return true if target doesn't mind addresses in vectors.
+ bool prefersVectorizedAddressing() const;
+
/// \brief Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
/// of the specified type.
@@ -807,6 +810,7 @@ public:
virtual bool isLegalMaskedLoad(Type *DataType) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
virtual bool isLegalMaskedGather(Type *DataType) = 0;
+ virtual bool prefersVectorizedAddressing() = 0;
virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset, bool HasBaseReg,
int64_t Scale, unsigned AddrSpace) = 0;
@@ -1000,6 +1004,9 @@ public:
bool isLegalMaskedGather(Type *DataType) override {
return Impl.isLegalMaskedGather(DataType);
}
+ bool prefersVectorizedAddressing() override {
+ return Impl.prefersVectorizedAddressing();
+ }
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) override {
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index 550e84ad90c4..ad1a7cb748fe 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -237,6 +237,8 @@ public:
bool isLegalMaskedGather(Type *DataType) { return false; }
+ bool prefersVectorizedAddressing() { return true; }
+
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
// Guess that all legal addressing mode are free.
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index cf24062e46f8..b1ee76159c4b 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -60,7 +60,8 @@ template <typename T> class ArrayRef;
KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ OptimizationRemarkEmitter *ORE = nullptr);
/// Compute known bits from the range metadata.
/// \p KnownZero the set of bits that are known to be zero
/// \p KnownOne the set of bits that are known to be one
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index 180c0b579248..c898667f1474 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -34,6 +34,7 @@
namespace llvm {
class AsmPrinterHandler;
+class BasicBlock;
class BlockAddress;
class Constant;
class ConstantArray;
@@ -43,6 +44,7 @@ class DIEAbbrev;
class DwarfDebug;
class GCMetadataPrinter;
class GlobalIndirectSymbol;
+class GlobalObject;
class GlobalValue;
class GlobalVariable;
class GCStrategy;
@@ -65,6 +67,8 @@ class MCSubtargetInfo;
class MCSymbol;
class MCTargetOptions;
class MDNode;
+class Module;
+class raw_ostream;
class TargetLoweringObjectFile;
class TargetMachine;
@@ -109,7 +113,7 @@ public:
/// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
/// its number of uses by other globals.
- typedef std::pair<const GlobalVariable *, unsigned> GOTEquivUsePair;
+ using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
/// Enable print [latency:throughput] in output
diff --git a/include/llvm/CodeGen/AtomicExpandUtils.h b/include/llvm/CodeGen/AtomicExpandUtils.h
index ac18eac8a1ce..1f9c96b18e1b 100644
--- a/include/llvm/CodeGen/AtomicExpandUtils.h
+++ b/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -1,4 +1,4 @@
-//===-- AtomicExpandUtils.h - Utilities for expanding atomic instructions -===//
+//===- AtomicExpandUtils.h - Utilities for expanding atomic instructions --===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,19 +7,24 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_ATOMICEXPANDUTILS_H
+#define LLVM_CODEGEN_ATOMICEXPANDUTILS_H
+
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
-class Value;
-class AtomicRMWInst;
+class AtomicRMWInst;
+class Value;
/// Parameters (see the expansion example below):
/// (the builder, %addr, %loaded, %new_val, ordering,
/// /* OUT */ %success, /* OUT */ %new_loaded)
-typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
- AtomicOrdering, Value *&, Value *&)> CreateCmpXchgInstFun;
+using CreateCmpXchgInstFun =
+ function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
+ Value *&, Value *&)>;
/// \brief Expand an atomic RMW instruction into a loop utilizing
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
@@ -42,7 +47,8 @@ typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
/// loop:
/// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
/// %new = some_op iN %loaded, %incr
-/// ; This is what -atomic-expand will produce using this function on i686 targets:
+/// ; This is what -atomic-expand will produce using this function on i686
+/// targets:
/// %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
/// %new_loaded = extractvalue { iN, i1 } %pair, 0
/// %success = extractvalue { iN, i1 } %pair, 1
@@ -52,6 +58,8 @@ typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
/// [...]
///
/// Returns true if the containing function was modified.
-bool
-expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
-}
+bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ATOMICEXPANDUTILS_H
diff --git a/include/llvm/CodeGen/DIE.h b/include/llvm/CodeGen/DIE.h
index a40147336fe2..4be44e62fa92 100644
--- a/include/llvm/CodeGen/DIE.h
+++ b/include/llvm/CodeGen/DIE.h
@@ -1,4 +1,4 @@
-//===--- lib/CodeGen/DIE.h - DWARF Info Entries -----------------*- C++ -*-===//
+//===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -31,6 +31,7 @@
#include <iterator>
#include <new>
#include <type_traits>
+#include <utility>
#include <vector>
namespace llvm {
@@ -53,11 +54,11 @@ class DIEAbbrevData {
dwarf::Form Form;
/// Dwarf attribute value for DW_FORM_implicit_const
- int64_t Value;
+ int64_t Value = 0;
public:
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
- : Attribute(A), Form(F), Value(0) {}
+ : Attribute(A), Form(F) {}
DIEAbbrevData(dwarf::Attribute A, int64_t V)
: Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}
@@ -136,13 +137,14 @@ class DIEAbbrevSet {
/// storage container.
BumpPtrAllocator &Alloc;
/// \brief FoldingSet that uniques the abbreviations.
- llvm::FoldingSet<DIEAbbrev> AbbreviationsSet;
+ FoldingSet<DIEAbbrev> AbbreviationsSet;
/// A list of all the unique abbreviations in use.
std::vector<DIEAbbrev *> Abbreviations;
public:
DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
~DIEAbbrevSet();
+
/// Generate the abbreviation declaration for a DIE and return a pointer to
/// the generated abbreviation.
///
@@ -289,13 +291,11 @@ public:
/// A pointer to another debug information entry. An instance of this class can
/// also be used as a proxy for a debug information entry not yet defined
/// (ie. types.)
-class DIE;
class DIEEntry {
DIE *Entry;
- DIEEntry() = delete;
-
public:
+ DIEEntry() = delete;
explicit DIEEntry(DIE &E) : Entry(&E) {}
DIE &getEntry() const { return *Entry; }
@@ -348,10 +348,10 @@ private:
///
/// All values that aren't standard layout (or are larger than 8 bytes)
/// should be stored by reference instead of by value.
- typedef AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
- DIEDelta *, DIEEntry, DIEBlock *, DIELoc *,
- DIELocList>
- ValTy;
+ using ValTy = AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
+ DIEDelta *, DIEEntry, DIEBlock *,
+ DIELoc *, DIELocList>;
+
static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
sizeof(ValTy) <= sizeof(void *),
"Expected all large types to be stored via pointer");
@@ -486,10 +486,12 @@ struct IntrusiveBackListNode {
};
struct IntrusiveBackListBase {
- typedef IntrusiveBackListNode Node;
+ using Node = IntrusiveBackListNode;
+
Node *Last = nullptr;
bool empty() const { return !Last; }
+
void push_back(Node &N) {
assert(N.Next.getPointer() == &N && "Expected unlinked node");
assert(N.Next.getInt() == true && "Expected unlinked node");
@@ -505,6 +507,7 @@ struct IntrusiveBackListBase {
template <class T> class IntrusiveBackList : IntrusiveBackListBase {
public:
using IntrusiveBackListBase::empty;
+
void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
T &back() { return *static_cast<T *>(Last); }
const T &back() const { return *static_cast<T *>(Last); }
@@ -513,6 +516,7 @@ public:
class iterator
: public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
friend class const_iterator;
+
Node *N = nullptr;
public:
@@ -585,10 +589,12 @@ public:
class DIEValueList {
struct Node : IntrusiveBackListNode {
DIEValue V;
+
explicit Node(DIEValue V) : V(V) {}
};
- typedef IntrusiveBackList<Node> ListTy;
+ using ListTy = IntrusiveBackList<Node>;
+
ListTy List;
public:
@@ -597,9 +603,10 @@ public:
: public iterator_adaptor_base<value_iterator, ListTy::iterator,
std::forward_iterator_tag, DIEValue> {
friend class const_value_iterator;
- typedef iterator_adaptor_base<value_iterator, ListTy::iterator,
- std::forward_iterator_tag,
- DIEValue> iterator_adaptor;
+
+ using iterator_adaptor =
+ iterator_adaptor_base<value_iterator, ListTy::iterator,
+ std::forward_iterator_tag, DIEValue>;
public:
value_iterator() = default;
@@ -612,9 +619,9 @@ public:
class const_value_iterator : public iterator_adaptor_base<
const_value_iterator, ListTy::const_iterator,
std::forward_iterator_tag, const DIEValue> {
- typedef iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
- std::forward_iterator_tag,
- const DIEValue> iterator_adaptor;
+ using iterator_adaptor =
+ iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
+ std::forward_iterator_tag, const DIEValue>;
public:
const_value_iterator() = default;
@@ -627,8 +634,8 @@ public:
const DIEValue &operator*() const { return wrapped()->V; }
};
- typedef iterator_range<value_iterator> value_range;
- typedef iterator_range<const_value_iterator> const_value_range;
+ using value_range = iterator_range<value_iterator>;
+ using const_value_range = iterator_range<const_value_iterator>;
value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) {
List.push_back(*new (Alloc) Node(V));
@@ -657,15 +664,15 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
friend class DIEUnit;
/// Dwarf unit relative offset.
- unsigned Offset;
+ unsigned Offset = 0;
/// Size of instance + children.
- unsigned Size;
+ unsigned Size = 0;
unsigned AbbrevNumber = ~0u;
/// Dwarf tag code.
dwarf::Tag Tag = (dwarf::Tag)0;
/// Set to true to force a DIE to emit an abbreviation that says it has
/// children even when it doesn't. This is used for unit testing purposes.
- bool ForceChildren;
+ bool ForceChildren = false;
/// Children DIEs.
IntrusiveBackList<DIE> Children;
@@ -673,20 +680,19 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
/// DIEUnit which contains this DIE as its unit DIE.
PointerUnion<DIE *, DIEUnit *> Owner;
- DIE() = delete;
- explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag),
- ForceChildren(false) {}
+ explicit DIE(dwarf::Tag Tag) : Tag(Tag) {}
public:
+ DIE() = delete;
+ DIE(const DIE &RHS) = delete;
+ DIE(DIE &&RHS) = delete;
+ DIE &operator=(const DIE &RHS) = delete;
+ DIE &operator=(const DIE &&RHS) = delete;
+
static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
return new (Alloc) DIE(Tag);
}
- DIE(const DIE &RHS) = delete;
- DIE(DIE &&RHS) = delete;
- void operator=(const DIE &RHS) = delete;
- void operator=(const DIE &&RHS) = delete;
-
// Accessors.
unsigned getAbbrevNumber() const { return AbbrevNumber; }
dwarf::Tag getTag() const { return Tag; }
@@ -696,10 +702,10 @@ public:
bool hasChildren() const { return ForceChildren || !Children.empty(); }
void setForceChildren(bool B) { ForceChildren = B; }
- typedef IntrusiveBackList<DIE>::iterator child_iterator;
- typedef IntrusiveBackList<DIE>::const_iterator const_child_iterator;
- typedef iterator_range<child_iterator> child_range;
- typedef iterator_range<const_child_iterator> const_child_range;
+ using child_iterator = IntrusiveBackList<DIE>::iterator;
+ using const_child_iterator = IntrusiveBackList<DIE>::const_iterator;
+ using child_range = iterator_range<child_iterator>;
+ using const_child_range = iterator_range<const_child_iterator>;
child_range children() {
return make_range(Children.begin(), Children.end());
@@ -838,10 +844,10 @@ struct BasicDIEUnit final : DIEUnit {
/// DIELoc - Represents an expression location.
//
class DIELoc : public DIEValueList {
- mutable unsigned Size; // Size in bytes excluding size header.
+ mutable unsigned Size = 0; // Size in bytes excluding size header.
public:
- DIELoc() : Size(0) {}
+ DIELoc() = default;
/// ComputeSize - Calculate the size of the location expression.
///
@@ -872,10 +878,10 @@ public:
/// DIEBlock - Represents a block of values.
//
class DIEBlock : public DIEValueList {
- mutable unsigned Size; // Size in bytes excluding size header.
+ mutable unsigned Size = 0; // Size in bytes excluding size header.
public:
- DIEBlock() : Size(0) {}
+ DIEBlock() = default;
/// ComputeSize - Calculate the size of the location expression.
///
diff --git a/include/llvm/CodeGen/FaultMaps.h b/include/llvm/CodeGen/FaultMaps.h
index 0f0005b83c54..98ff526dfe94 100644
--- a/include/llvm/CodeGen/FaultMaps.h
+++ b/include/llvm/CodeGen/FaultMaps.h
@@ -56,7 +56,7 @@ private:
HandlerOffsetExpr(HandlerOffset) {}
};
- typedef std::vector<FaultInfo> FunctionFaultInfos;
+ using FunctionFaultInfos = std::vector<FaultInfo>;
// We'd like to keep a stable iteration order for FunctionInfos to help
// FileCheck based testing.
@@ -78,20 +78,17 @@ private:
/// generated by the version of LLVM that includes it. No guarantees are made
/// with respect to forward or backward compatibility.
class FaultMapParser {
- typedef uint8_t FaultMapVersionType;
- static const size_t FaultMapVersionOffset = 0;
+ using FaultMapVersionType = uint8_t;
+ using Reserved0Type = uint8_t;
+ using Reserved1Type = uint16_t;
+ using NumFunctionsType = uint32_t;
- typedef uint8_t Reserved0Type;
+ static const size_t FaultMapVersionOffset = 0;
static const size_t Reserved0Offset =
FaultMapVersionOffset + sizeof(FaultMapVersionType);
-
- typedef uint16_t Reserved1Type;
static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
-
- typedef uint32_t NumFunctionsType;
static const size_t NumFunctionsOffset =
Reserved1Offset + sizeof(Reserved1Type);
-
static const size_t FunctionInfosOffset =
NumFunctionsOffset + sizeof(NumFunctionsType);
@@ -105,14 +102,13 @@ class FaultMapParser {
public:
class FunctionFaultInfoAccessor {
- typedef uint32_t FaultKindType;
- static const size_t FaultKindOffset = 0;
+ using FaultKindType = uint32_t;
+ using FaultingPCOffsetType = uint32_t;
+ using HandlerPCOffsetType = uint32_t;
- typedef uint32_t FaultingPCOffsetType;
+ static const size_t FaultKindOffset = 0;
static const size_t FaultingPCOffsetOffset =
FaultKindOffset + sizeof(FaultKindType);
-
- typedef uint32_t HandlerPCOffsetType;
static const size_t HandlerPCOffsetOffset =
FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
@@ -140,20 +136,17 @@ public:
};
class FunctionInfoAccessor {
- typedef uint64_t FunctionAddrType;
- static const size_t FunctionAddrOffset = 0;
+ using FunctionAddrType = uint64_t;
+ using NumFaultingPCsType = uint32_t;
+ using ReservedType = uint32_t;
- typedef uint32_t NumFaultingPCsType;
+ static const size_t FunctionAddrOffset = 0;
static const size_t NumFaultingPCsOffset =
FunctionAddrOffset + sizeof(FunctionAddrType);
-
- typedef uint32_t ReservedType;
static const size_t ReservedOffset =
NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
-
static const size_t FunctionFaultInfosOffset =
ReservedOffset + sizeof(ReservedType);
-
static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
const uint8_t *P = nullptr;
diff --git a/include/llvm/CodeGen/GlobalISel/Localizer.h b/include/llvm/CodeGen/GlobalISel/Localizer.h
new file mode 100644
index 000000000000..0a46eb9e7840
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -0,0 +1,78 @@
+//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the Localizer pass.
+/// This pass moves/duplicates constant-like instructions close to their uses.
+/// Its primarily goal is to workaround the deficiencies of the fast register
+/// allocator.
+/// With GlobalISel constants are all materialized in the entry block of
+/// a function. However, the fast allocator cannot rematerialize constants and
+/// has a lot more live-ranges to deal with and will most likely end up
+/// spilling a lot.
+/// By pushing the constants close to their use, we only create small
+/// live-ranges.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+// Forward declarations.
+class MachineRegisterInfo;
+
+/// This pass implements the localization mechanism described at the
+/// top of this file. One specificity of the implementation is that
+/// it will materialize one and only one instance of a constant per
+/// basic block, thus enabling reuse of that constant within that block.
+/// Moreover, it only materializes constants in blocks where they
+/// are used. PHI uses are considered happening at the end of the
+/// related predecessor.
+class Localizer : public MachineFunctionPass {
+public:
+ static char ID;
+
+private:
+ /// MRI contains all the register class/bank information that this
+ /// pass uses and updates.
+ MachineRegisterInfo *MRI;
+
+ /// Check whether or not \p MI needs to be moved close to its uses.
+ static bool shouldLocalize(const MachineInstr &MI);
+
+ /// Check if \p MOUse is used in the same basic block as \p Def.
+ /// If the use is in the same block, we say it is local.
+ /// When the use is not local, \p InsertMBB will contain the basic
+ /// block when to insert \p Def to have a local use.
+ static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
+ MachineBasicBlock *&InsertMBB);
+
+ /// Initialize the field members using \p MF.
+ void init(MachineFunction &MF);
+
+public:
+ Localizer();
+
+ StringRef getPassName() const override { return "Localizer"; }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::IsSSA)
+ .set(MachineFunctionProperties::Property::Legalized)
+ .set(MachineFunctionProperties::Property::RegBankSelected);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index f2a9a9f73ca6..2300a106c358 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -264,6 +264,14 @@ namespace ISD {
/// optimized.
STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM,
+ /// Constrained versions of libm-equivalent floating point intrinsics.
+ /// These will be lowered to the equivalent non-constrained pseudo-op
+ /// (or expanded to the equivalent library call) before final selection.
+ /// They are used to limit optimizations while the DAG is being optimized.
+ STRICT_FSQRT, STRICT_FPOW, STRICT_FPOWI, STRICT_FSIN, STRICT_FCOS,
+ STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
+ STRICT_FRINT, STRICT_FNEARBYINT,
+
/// FMA - Perform a * b + c with no intermediate rounding step.
FMA,
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
index b792cba4b78a..40cd146f88f8 100644
--- a/include/llvm/CodeGen/LiveInterval.h
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===//
+//===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,22 +21,30 @@
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
#define LLVM_CODEGEN_LIVEINTERVAL_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
#include <cassert>
-#include <climits>
+#include <cstddef>
+#include <functional>
+#include <memory>
#include <set>
+#include <tuple>
+#include <utility>
namespace llvm {
+
class CoalescerPair;
class LiveIntervals;
- class MachineInstr;
class MachineRegisterInfo;
- class TargetRegisterInfo;
class raw_ostream;
- template <typename T, unsigned Small> class SmallPtrSet;
/// VNInfo - Value Number Information.
/// This class holds information about a machine level values, including
@@ -44,7 +52,7 @@ namespace llvm {
///
class VNInfo {
public:
- typedef BumpPtrAllocator Allocator;
+ using Allocator = BumpPtrAllocator;
/// The ID number of this value.
unsigned id;
@@ -53,14 +61,10 @@ namespace llvm {
SlotIndex def;
/// VNInfo constructor.
- VNInfo(unsigned i, SlotIndex d)
- : id(i), def(d)
- { }
+ VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {}
/// VNInfo constructor, copies values from orig, except for the value number.
- VNInfo(unsigned i, const VNInfo &orig)
- : id(i), def(orig.def)
- { }
+ VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {}
/// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) {
@@ -152,16 +156,16 @@ namespace llvm {
/// segment with a new value number is used.
class LiveRange {
public:
-
/// This represents a simple continuous liveness interval for a value.
/// The start point is inclusive, the end point exclusive. These intervals
/// are rendered as [start,end).
struct Segment {
SlotIndex start; // Start point of the interval (inclusive)
SlotIndex end; // End point of the interval (exclusive)
- VNInfo *valno; // identifier for the value contained in this segment.
+ VNInfo *valno = nullptr; // identifier for the value contained in this
+ // segment.
- Segment() : valno(nullptr) {}
+ Segment() = default;
Segment(SlotIndex S, SlotIndex E, VNInfo *V)
: start(S), end(E), valno(V) {
@@ -189,8 +193,8 @@ namespace llvm {
void dump() const;
};
- typedef SmallVector<Segment, 2> Segments;
- typedef SmallVector<VNInfo *, 2> VNInfoList;
+ using Segments = SmallVector<Segment, 2>;
+ using VNInfoList = SmallVector<VNInfo *, 2>;
Segments segments; // the liveness segments
VNInfoList valnos; // value#'s
@@ -198,22 +202,24 @@ namespace llvm {
// The segment set is used temporarily to accelerate initial computation
// of live ranges of physical registers in computeRegUnitRange.
// After that the set is flushed to the segment vector and deleted.
- typedef std::set<Segment> SegmentSet;
+ using SegmentSet = std::set<Segment>;
std::unique_ptr<SegmentSet> segmentSet;
- typedef Segments::iterator iterator;
+ using iterator = Segments::iterator;
+ using const_iterator = Segments::const_iterator;
+
iterator begin() { return segments.begin(); }
iterator end() { return segments.end(); }
- typedef Segments::const_iterator const_iterator;
const_iterator begin() const { return segments.begin(); }
const_iterator end() const { return segments.end(); }
- typedef VNInfoList::iterator vni_iterator;
+ using vni_iterator = VNInfoList::iterator;
+ using const_vni_iterator = VNInfoList::const_iterator;
+
vni_iterator vni_begin() { return valnos.begin(); }
vni_iterator vni_end() { return valnos.end(); }
- typedef VNInfoList::const_iterator const_vni_iterator;
const_vni_iterator vni_begin() const { return valnos.begin(); }
const_vni_iterator vni_end() const { return valnos.end(); }
@@ -631,40 +637,37 @@ namespace llvm {
/// or stack slot.
class LiveInterval : public LiveRange {
public:
- typedef LiveRange super;
+ using super = LiveRange;
/// A live range for subregisters. The LaneMask specifies which parts of the
/// super register are covered by the interval.
/// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
class SubRange : public LiveRange {
public:
- SubRange *Next;
+ SubRange *Next = nullptr;
LaneBitmask LaneMask;
/// Constructs a new SubRange object.
- SubRange(LaneBitmask LaneMask)
- : Next(nullptr), LaneMask(LaneMask) {
- }
+ SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {}
/// Constructs a new SubRange object by copying liveness from @p Other.
SubRange(LaneBitmask LaneMask, const LiveRange &Other,
BumpPtrAllocator &Allocator)
- : LiveRange(Other, Allocator), Next(nullptr), LaneMask(LaneMask) {
- }
+ : LiveRange(Other, Allocator), LaneMask(LaneMask) {}
void print(raw_ostream &OS) const;
void dump() const;
};
private:
- SubRange *SubRanges; ///< Single linked list of subregister live ranges.
+ SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
+ /// ranges.
public:
const unsigned reg; // the register or stack slot of this interval.
float weight; // weight of this interval
- LiveInterval(unsigned Reg, float Weight)
- : SubRanges(nullptr), reg(Reg), weight(Weight) {}
+ LiveInterval(unsigned Reg, float Weight) : reg(Reg), weight(Weight) {}
~LiveInterval() {
clearSubRanges();
@@ -673,8 +676,10 @@ namespace llvm {
template<typename T>
class SingleLinkedListIterator {
T *P;
+
public:
SingleLinkedListIterator<T>(T *P) : P(P) {}
+
SingleLinkedListIterator<T> &operator++() {
P = P->Next;
return *this;
@@ -698,7 +703,9 @@ namespace llvm {
}
};
- typedef SingleLinkedListIterator<SubRange> subrange_iterator;
+ using subrange_iterator = SingleLinkedListIterator<SubRange>;
+ using const_subrange_iterator = SingleLinkedListIterator<const SubRange>;
+
subrange_iterator subrange_begin() {
return subrange_iterator(SubRanges);
}
@@ -706,7 +713,6 @@ namespace llvm {
return subrange_iterator(nullptr);
}
- typedef SingleLinkedListIterator<const SubRange> const_subrange_iterator;
const_subrange_iterator subrange_begin() const {
return const_subrange_iterator(SubRanges);
}
@@ -759,12 +765,12 @@ namespace llvm {
/// isSpillable - Can this interval be spilled?
bool isSpillable() const {
- return weight != llvm::huge_valf;
+ return weight != huge_valf;
}
/// markNotSpillable - Mark interval as not spillable
void markNotSpillable() {
- weight = llvm::huge_valf;
+ weight = huge_valf;
}
/// For a given lane mask @p LaneMask, compute indexes at which the
@@ -931,5 +937,7 @@ namespace llvm {
void Distribute(LiveInterval &LI, LiveInterval *LIV[],
MachineRegisterInfo &MRI);
};
-}
-#endif
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEINTERVAL_H
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 181cb375de86..820e88362483 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -1,4 +1,4 @@
-//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
+//===- LiveIntervalAnalysis.h - Live Interval Analysis ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,6 +20,7 @@
#ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -27,27 +28,29 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/Support/Allocator.h"
+#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include <cmath>
+#include <cassert>
+#include <cstdint>
+#include <utility>
namespace llvm {
extern cl::opt<bool> UseSegmentSetForPhysRegs;
- class BitVector;
- class BlockFrequency;
- class LiveRangeCalc;
- class LiveVariables;
- class MachineDominatorTree;
- class MachineLoopInfo;
- class TargetRegisterInfo;
- class MachineRegisterInfo;
- class TargetInstrInfo;
- class TargetRegisterClass;
- class VirtRegMap;
- class MachineBlockFrequencyInfo;
+class BitVector;
+class LiveRangeCalc;
+class MachineBlockFrequencyInfo;
+class MachineDominatorTree;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class TargetInstrInfo;
+class VirtRegMap;
class LiveIntervals : public MachineFunctionPass {
MachineFunction* MF;
@@ -56,8 +59,8 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
const TargetInstrInfo* TII;
AliasAnalysis *AA;
SlotIndexes* Indexes;
- MachineDominatorTree *DomTree;
- LiveRangeCalc *LRCalc;
+ MachineDominatorTree *DomTree = nullptr;
+ LiveRangeCalc *LRCalc = nullptr;
/// Special pool allocator for VNInfo's (LiveInterval val#).
VNInfo::Allocator VNInfoAllocator;
@@ -95,6 +98,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
public:
static char ID;
+
LiveIntervals();
~LiveIntervals() override;
@@ -466,6 +470,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
class HMEditor;
};
-} // End llvm namespace
-#endif
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
diff --git a/include/llvm/CodeGen/LiveIntervalUnion.h b/include/llvm/CodeGen/LiveIntervalUnion.h
index 57e3deb038af..b922e543c856 100644
--- a/include/llvm/CodeGen/LiveIntervalUnion.h
+++ b/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -26,12 +26,14 @@
namespace llvm {
+class raw_ostream;
class TargetRegisterInfo;
#ifndef NDEBUG
// forward declaration
template <unsigned Element> class SparseBitVector;
-typedef SparseBitVector<128> LiveVirtRegBitSet;
+
+using LiveVirtRegBitSet = SparseBitVector<128>;
#endif
/// Union of live intervals that are strong candidates for coalescing into a
@@ -42,19 +44,19 @@ class LiveIntervalUnion {
// A set of live virtual register segments that supports fast insertion,
// intersection, and removal.
// Mapping SlotIndex intervals to virtual register numbers.
- typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments;
+ using LiveSegments = IntervalMap<SlotIndex, LiveInterval*>;
public:
// SegmentIter can advance to the next segment ordered by starting position
// which may belong to a different live virtual register. We also must be able
// to reach the current segment's containing virtual register.
- typedef LiveSegments::iterator SegmentIter;
+ using SegmentIter = LiveSegments::iterator;
/// Const version of SegmentIter.
- typedef LiveSegments::const_iterator ConstSegmentIter;
+ using ConstSegmentIter = LiveSegments::const_iterator;
// LiveIntervalUnions share an external allocator.
- typedef LiveSegments::Allocator Allocator;
+ using Allocator = LiveSegments::Allocator;
private:
unsigned Tag = 0; // unique tag for current contents.
@@ -76,7 +78,7 @@ public:
SlotIndex startIndex() const { return Segments.start(); }
// Provide public access to the underlying map to allow overlap iteration.
- typedef LiveSegments Map;
+ using Map = LiveSegments;
const Map &getMap() const { return Segments; }
/// getTag - Return an opaque tag representing the current state of the union.
diff --git a/include/llvm/CodeGen/LivePhysRegs.h b/include/llvm/CodeGen/LivePhysRegs.h
index 9e04c467fadc..f9c741dd75b2 100644
--- a/include/llvm/CodeGen/LivePhysRegs.h
+++ b/include/llvm/CodeGen/LivePhysRegs.h
@@ -7,23 +7,24 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the LivePhysRegs utility for tracking liveness of
-// physical registers. This can be used for ad-hoc liveness tracking after
-// register allocation. You can start with the live-ins/live-outs at the
-// beginning/end of a block and update the information while walking the
-// instructions inside the block. This implementation tracks the liveness on a
-// sub-register granularity.
-//
-// We assume that the high bits of a physical super-register are not preserved
-// unless the instruction has an implicit-use operand reading the super-
-// register.
-//
-// X86 Example:
-// %YMM0<def> = ...
-// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
-//
-// %YMM0<def> = ...
-// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
+/// \file
+/// This file implements the LivePhysRegs utility for tracking liveness of
+/// physical registers. This can be used for ad-hoc liveness tracking after
+/// register allocation. You can start with the live-ins/live-outs at the
+/// beginning/end of a block and update the information while walking the
+/// instructions inside the block. This implementation tracks the liveness on a
+/// sub-register granularity.
+///
+/// We assume that the high bits of a physical super-register are not preserved
+/// unless the instruction has an implicit-use operand reading the super-
+/// register.
+///
+/// X86 Example:
+/// %YMM0<def> = ...
+/// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
+///
+/// %YMM0<def> = ...
+/// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
@@ -39,40 +40,42 @@
namespace llvm {
class MachineInstr;
+class MachineOperand;
+class MachineRegisterInfo;
+class raw_ostream;
-/// \brief A set of live physical registers with functions to track liveness
+/// \brief A set of physical registers with utility functions to track liveness
/// when walking backward/forward through a basic block.
class LivePhysRegs {
const TargetRegisterInfo *TRI = nullptr;
SparseSet<unsigned> LiveRegs;
- LivePhysRegs(const LivePhysRegs&) = delete;
- LivePhysRegs &operator=(const LivePhysRegs&) = delete;
-
public:
- /// \brief Constructs a new empty LivePhysRegs set.
+ /// Constructs an unitialized set. init() needs to be called to initialize it.
LivePhysRegs() = default;
- /// \brief Constructs and initialize an empty LivePhysRegs set.
- LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
- assert(TRI && "Invalid TargetRegisterInfo pointer.");
- LiveRegs.setUniverse(TRI->getNumRegs());
+ /// Constructs and initializes an empty set.
+ LivePhysRegs(const TargetRegisterInfo &TRI) : TRI(&TRI) {
+ LiveRegs.setUniverse(TRI.getNumRegs());
}
- /// \brief Clear and initialize the LivePhysRegs set.
+ LivePhysRegs(const LivePhysRegs&) = delete;
+ LivePhysRegs &operator=(const LivePhysRegs&) = delete;
+
+ /// (re-)initializes and clears the set.
void init(const TargetRegisterInfo &TRI) {
this->TRI = &TRI;
LiveRegs.clear();
LiveRegs.setUniverse(TRI.getNumRegs());
}
- /// \brief Clears the LivePhysRegs set.
+ /// Clears the set.
void clear() { LiveRegs.clear(); }
- /// \brief Returns true if the set is empty.
+ /// Returns true if the set is empty.
bool empty() const { return LiveRegs.empty(); }
- /// \brief Adds a physical register and all its sub-registers to the set.
+ /// Adds a physical register and all its sub-registers to the set.
void addReg(unsigned Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
@@ -90,12 +93,13 @@ public:
LiveRegs.erase(*R);
}
- /// \brief Removes physical registers clobbered by the regmask operand @p MO.
+ /// Removes physical registers clobbered by the regmask operand \p MO.
void removeRegsInMask(const MachineOperand &MO,
- SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers);
+ SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
+ nullptr);
- /// \brief Returns true if register @p Reg is contained in the set. This also
- /// works if only the super register of @p Reg has been defined, because
+ /// \brief Returns true if register \p Reg is contained in the set. This also
+ /// works if only the super register of \p Reg has been defined, because
/// addReg() always adds all sub-registers to the set as well.
/// Note: Returns false if just some sub registers are live, use available()
/// when searching a free register.
@@ -104,48 +108,48 @@ public:
/// Returns true if register \p Reg and no aliasing register is in the set.
bool available(const MachineRegisterInfo &MRI, unsigned Reg) const;
- /// \brief Simulates liveness when stepping backwards over an
- /// instruction(bundle): Remove Defs, add uses. This is the recommended way of
- /// calculating liveness.
+ /// Simulates liveness when stepping backwards over an instruction(bundle).
+ /// Remove Defs, add uses. This is the recommended way of calculating
+ /// liveness.
void stepBackward(const MachineInstr &MI);
- /// \brief Simulates liveness when stepping forward over an
- /// instruction(bundle): Remove killed-uses, add defs. This is the not
- /// recommended way, because it depends on accurate kill flags. If possible
- /// use stepBackward() instead of this function.
- /// The clobbers set will be the list of registers either defined or clobbered
- /// by a regmask. The operand will identify whether this is a regmask or
- /// register operand.
+ /// Simulates liveness when stepping forward over an instruction(bundle).
+ /// Remove killed-uses, add defs. This is the not recommended way, because it
+ /// depends on accurate kill flags. If possible use stepBackward() instead of
+ /// this function. The clobbers set will be the list of registers either
+ /// defined or clobbered by a regmask. The operand will identify whether this
+ /// is a regmask or register operand.
void stepForward(const MachineInstr &MI,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
- /// Adds all live-in registers of basic block @p MBB.
+ /// Adds all live-in registers of basic block \p MBB.
/// Live in registers are the registers in the blocks live-in list and the
/// pristine registers.
void addLiveIns(const MachineBasicBlock &MBB);
- /// Adds all live-out registers of basic block @p MBB.
+ /// Adds all live-out registers of basic block \p MBB.
/// Live out registers are the union of the live-in registers of the successor
/// blocks and pristine registers. Live out registers of the end block are the
/// callee saved registers.
void addLiveOuts(const MachineBasicBlock &MBB);
- /// Like addLiveOuts() but does not add pristine registers/callee saved
+ /// Adds all live-out registers of basic block \p MBB but skips pristine
/// registers.
void addLiveOutsNoPristines(const MachineBasicBlock &MBB);
- typedef SparseSet<unsigned>::const_iterator const_iterator;
+ using const_iterator = SparseSet<unsigned>::const_iterator;
+
const_iterator begin() const { return LiveRegs.begin(); }
const_iterator end() const { return LiveRegs.end(); }
- /// \brief Prints the currently live registers to @p OS.
+ /// Prints the currently live registers to \p OS.
void print(raw_ostream &OS) const;
- /// \brief Dumps the currently live registers to the debug output.
+ /// Dumps the currently live registers to the debug output.
void dump() const;
private:
- /// Adds live-in registers from basic block @p MBB, taking associated
+ /// \brief Adds live-in registers from basic block \p MBB, taking associated
/// lane masks into consideration.
void addBlockLiveIns(const MachineBasicBlock &MBB);
};
@@ -155,11 +159,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
return OS;
}
-/// Compute the live-in list for \p MBB assuming all of its successors live-in
-/// lists are up-to-date. Uses the given LivePhysReg instance \p LiveRegs; This
-/// is just here to avoid repeated heap allocations when calling this multiple
-/// times in a pass.
-void computeLiveIns(LivePhysRegs &LiveRegs, const TargetRegisterInfo &TRI,
+/// \brief Computes the live-in list for \p MBB assuming all of its successors
+/// live-in lists are up-to-date. Uses the given LivePhysReg instance \p
+/// LiveRegs; This is just here to avoid repeated heap allocations when calling
+/// this multiple times in a pass.
+void computeLiveIns(LivePhysRegs &LiveRegs, const MachineRegisterInfo &MRI,
MachineBasicBlock &MBB);
} // end namespace llvm
diff --git a/include/llvm/CodeGen/LiveRangeEdit.h b/include/llvm/CodeGen/LiveRangeEdit.h
index 4250777682ba..362d9854a271 100644
--- a/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/include/llvm/CodeGen/LiveRangeEdit.h
@@ -1,4 +1,4 @@
-//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
+//===- LiveRangeEdit.h - Basic tools for split and spill --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,19 +19,28 @@
#define LLVM_CODEGEN_LIVERANGEEDIT_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <cassert>
namespace llvm {
class LiveIntervals;
class MachineBlockFrequencyInfo;
+class MachineInstr;
class MachineLoopInfo;
+class MachineOperand;
+class TargetInstrInfo;
+class TargetRegisterInfo;
class VirtRegMap;
class LiveRangeEdit : private MachineRegisterInfo::Delegate {
@@ -39,7 +48,10 @@ public:
/// Callback methods for LiveRangeEdit owners.
class Delegate {
virtual void anchor();
+
public:
+ virtual ~Delegate() = default;
+
/// Called immediately before erasing a dead machine instruction.
virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
@@ -53,8 +65,6 @@ public:
/// Called after cloning a virtual register.
/// This is used for new registers representing connected components of Old.
virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
-
- virtual ~Delegate() {}
};
private:
@@ -70,7 +80,7 @@ private:
const unsigned FirstNew;
/// ScannedRemattable - true when remattable values have been identified.
- bool ScannedRemattable;
+ bool ScannedRemattable = false;
/// DeadRemats - The saved instructions which have already been dead after
/// rematerialization but not deleted yet -- to be done in postOptimization.
@@ -78,11 +88,11 @@ private:
/// Remattable - Values defined by remattable instructions as identified by
/// tii.isTriviallyReMaterializable().
- SmallPtrSet<const VNInfo*,4> Remattable;
+ SmallPtrSet<const VNInfo *, 4> Remattable;
/// Rematted - Values that were actually rematted, and so need to have their
/// live range trimmed or entirely removed.
- SmallPtrSet<const VNInfo*,4> Rematted;
+ SmallPtrSet<const VNInfo *, 4> Rematted;
/// scanRemattable - Identify the Parent values that may rematerialize.
void scanRemattable(AliasAnalysis *aa);
@@ -94,11 +104,11 @@ private:
/// foldAsLoad - If LI has a single use and a single def that can be folded as
/// a load, eliminate the register by folding the def into the use.
- bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
+ bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead);
+
+ using ToShrinkSet = SetVector<LiveInterval *, SmallVector<LiveInterval *, 8>,
+ SmallPtrSet<LiveInterval *, 8>>;
- typedef SetVector<LiveInterval*,
- SmallVector<LiveInterval*, 8>,
- SmallPtrSet<LiveInterval*, 8> > ToShrinkSet;
/// Helper for eliminateDeadDefs.
void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
AliasAnalysis *AA);
@@ -129,26 +139,26 @@ public:
SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
: Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),
- FirstNew(newRegs.size()), ScannedRemattable(false),
- DeadRemats(deadRemats) {
+ FirstNew(newRegs.size()), DeadRemats(deadRemats) {
MRI.setDelegate(this);
}
~LiveRangeEdit() override { MRI.resetDelegate(this); }
LiveInterval &getParent() const {
- assert(Parent && "No parent LiveInterval");
- return *Parent;
+ assert(Parent && "No parent LiveInterval");
+ return *Parent;
}
+
unsigned getReg() const { return getParent().reg; }
/// Iterator for accessing the new registers added by this edit.
- typedef SmallVectorImpl<unsigned>::const_iterator iterator;
- iterator begin() const { return NewRegs.begin()+FirstNew; }
+ using iterator = SmallVectorImpl<unsigned>::const_iterator;
+ iterator begin() const { return NewRegs.begin() + FirstNew; }
iterator end() const { return NewRegs.end(); }
- unsigned size() const { return NewRegs.size()-FirstNew; }
+ unsigned size() const { return NewRegs.size() - FirstNew; }
bool empty() const { return size() == 0; }
- unsigned get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
+ unsigned get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
/// pop_back - It allows LiveRangeEdit users to drop new registers.
/// The context is when an original def instruction of a register is
@@ -176,26 +186,25 @@ public:
return createEmptyIntervalFrom(getReg());
}
- unsigned create() {
- return createFrom(getReg());
- }
+ unsigned create() { return createFrom(getReg()); }
/// anyRematerializable - Return true if any parent values may be
/// rematerializable.
/// This function must be called before any rematerialization is attempted.
- bool anyRematerializable(AliasAnalysis*);
+ bool anyRematerializable(AliasAnalysis *);
/// checkRematerializable - Manually add VNI to the list of rematerializable
/// values if DefMI may be rematerializable.
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
- AliasAnalysis*);
+ AliasAnalysis *);
/// Remat - Information needed to rematerialize at a specific location.
struct Remat {
- VNInfo *ParentVNI; // parent_'s value at the remat location.
- MachineInstr *OrigMI; // Instruction defining OrigVNI. It contains the
- // real expr for remat.
- explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
+ VNInfo *ParentVNI; // parent_'s value at the remat location.
+ MachineInstr *OrigMI = nullptr; // Instruction defining OrigVNI. It contains
+ // the real expr for remat.
+
+ explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
};
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at
@@ -209,10 +218,8 @@ public:
/// liveness is not updated.
/// Return the SlotIndex of the new instruction.
SlotIndex rematerializeAt(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg,
- const Remat &RM,
- const TargetRegisterInfo&,
+ MachineBasicBlock::iterator MI, unsigned DestReg,
+ const Remat &RM, const TargetRegisterInfo &,
bool Late = false);
/// markRematerialized - explicitly mark a value as rematerialized after doing
@@ -248,11 +255,10 @@ public:
/// calculateRegClassAndHint - Recompute register class and hint for each new
/// register.
- void calculateRegClassAndHint(MachineFunction&,
- const MachineLoopInfo&,
- const MachineBlockFrequencyInfo&);
+ void calculateRegClassAndHint(MachineFunction &, const MachineLoopInfo &,
+ const MachineBlockFrequencyInfo &);
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_LIVERANGEEDIT_H
diff --git a/include/llvm/CodeGen/LiveStackAnalysis.h b/include/llvm/CodeGen/LiveStackAnalysis.h
index 3ffbe3d775b4..c90ae7b184f4 100644
--- a/include/llvm/CodeGen/LiveStackAnalysis.h
+++ b/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -1,4 +1,4 @@
-//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===//
+//===- LiveStackAnalysis.h - Live Stack Slot Analysis -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,13 +18,16 @@
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Pass.h"
+#include <cassert>
#include <map>
#include <unordered_map>
namespace llvm {
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
class LiveStacks : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
@@ -33,8 +36,7 @@ class LiveStacks : public MachineFunctionPass {
VNInfo::Allocator VNInfoAllocator;
/// S2IMap - Stack slot indices to live interval mapping.
- ///
- typedef std::unordered_map<int, LiveInterval> SS2IntervalMap;
+ using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
SS2IntervalMap S2IMap;
/// S2RCMap - Stack slot indices to register class mapping.
@@ -42,12 +44,14 @@ class LiveStacks : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
+
LiveStacks() : MachineFunctionPass(ID) {
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
}
- typedef SS2IntervalMap::iterator iterator;
- typedef SS2IntervalMap::const_iterator const_iterator;
+ using iterator = SS2IntervalMap::iterator;
+ using const_iterator = SS2IntervalMap::const_iterator;
+
const_iterator begin() const { return S2IMap.begin(); }
const_iterator end() const { return S2IMap.end(); }
iterator begin() { return S2IMap.begin(); }
@@ -93,6 +97,7 @@ public:
/// print - Implement the dump method.
void print(raw_ostream &O, const Module * = nullptr) const override;
};
-}
-#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVESTACK_ANALYSIS_H
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 18d40564856d..8da48c379d00 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===//
+//===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,41 +15,50 @@
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/simple_ilist.h"
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstdint>
#include <functional>
+#include <iterator>
+#include <string>
+#include <vector>
namespace llvm {
-class Pass;
class BasicBlock;
class MachineFunction;
class MCSymbol;
-class MIPrinter;
+class ModuleSlotTracker;
+class Pass;
class SlotIndexes;
class StringRef;
class raw_ostream;
-class MachineBranchProbabilityInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
template <> struct ilist_traits<MachineInstr> {
private:
friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
+
MachineBasicBlock *Parent;
- typedef simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator
- instr_iterator;
+ using instr_iterator =
+ simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;
public:
void addNodeToList(MachineInstr *N);
void removeNodeFromList(MachineInstr *N);
void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
instr_iterator Last);
-
void deleteNode(MachineInstr *MI);
};
@@ -69,7 +78,8 @@ public:
};
private:
- typedef ilist<MachineInstr, ilist_sentinel_tracking<true>> Instructions;
+ using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;
+
Instructions Insts;
const BasicBlock *BB;
int Number;
@@ -83,12 +93,12 @@ private:
/// same order as Successors, or it is empty if we don't use it (disable
/// optimization).
std::vector<BranchProbability> Probs;
- typedef std::vector<BranchProbability>::iterator probability_iterator;
- typedef std::vector<BranchProbability>::const_iterator
- const_probability_iterator;
+ using probability_iterator = std::vector<BranchProbability>::iterator;
+ using const_probability_iterator =
+ std::vector<BranchProbability>::const_iterator;
/// Keep track of the physical registers that are livein of the basicblock.
- typedef std::vector<RegisterMaskPair> LiveInVector;
+ using LiveInVector = std::vector<RegisterMaskPair>;
LiveInVector LiveIns;
/// Alignment of the basic block. Zero if the basic block does not need to be
@@ -113,7 +123,7 @@ private:
mutable MCSymbol *CachedMCSymbol = nullptr;
// Intrusive list support
- MachineBasicBlock() {}
+ MachineBasicBlock() = default;
explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
@@ -145,16 +155,16 @@ public:
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
- typedef Instructions::iterator instr_iterator;
- typedef Instructions::const_iterator const_instr_iterator;
- typedef Instructions::reverse_iterator reverse_instr_iterator;
- typedef Instructions::const_reverse_iterator const_reverse_instr_iterator;
+ using instr_iterator = Instructions::iterator;
+ using const_instr_iterator = Instructions::const_iterator;
+ using reverse_instr_iterator = Instructions::reverse_iterator;
+ using const_reverse_instr_iterator = Instructions::const_reverse_iterator;
- typedef MachineInstrBundleIterator<MachineInstr> iterator;
- typedef MachineInstrBundleIterator<const MachineInstr> const_iterator;
- typedef MachineInstrBundleIterator<MachineInstr, true> reverse_iterator;
- typedef MachineInstrBundleIterator<const MachineInstr, true>
- const_reverse_iterator;
+ using iterator = MachineInstrBundleIterator<MachineInstr>;
+ using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
+ using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
+ using const_reverse_iterator =
+ MachineInstrBundleIterator<const MachineInstr, true>;
unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); }
@@ -178,8 +188,8 @@ public:
reverse_instr_iterator instr_rend () { return Insts.rend(); }
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
- typedef iterator_range<instr_iterator> instr_range;
- typedef iterator_range<const_instr_iterator> const_instr_range;
+ using instr_range = iterator_range<instr_iterator>;
+ using const_instr_range = iterator_range<const_instr_iterator>;
instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
const_instr_range instrs() const {
return const_instr_range(instr_begin(), instr_end());
@@ -213,18 +223,18 @@ public:
}
// Machine-CFG iterators
- typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
- typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
- typedef std::vector<MachineBasicBlock *>::iterator succ_iterator;
- typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator;
- typedef std::vector<MachineBasicBlock *>::reverse_iterator
- pred_reverse_iterator;
- typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
- const_pred_reverse_iterator;
- typedef std::vector<MachineBasicBlock *>::reverse_iterator
- succ_reverse_iterator;
- typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
- const_succ_reverse_iterator;
+ using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
+ using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+ using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
+ using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+ using pred_reverse_iterator =
+ std::vector<MachineBasicBlock *>::reverse_iterator;
+ using const_pred_reverse_iterator =
+ std::vector<MachineBasicBlock *>::const_reverse_iterator;
+ using succ_reverse_iterator =
+ std::vector<MachineBasicBlock *>::reverse_iterator;
+ using const_succ_reverse_iterator =
+ std::vector<MachineBasicBlock *>::const_reverse_iterator;
pred_iterator pred_begin() { return Predecessors.begin(); }
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
pred_iterator pred_end() { return Predecessors.end(); }
@@ -307,7 +317,7 @@ public:
// Iteration support for live in sets. These sets are kept in sorted
// order by their register number.
- typedef LiveInVector::const_iterator livein_iterator;
+ using livein_iterator = LiveInVector::const_iterator;
#ifndef NDEBUG
/// Unlike livein_begin, this method does not check that the liveness
/// information is accurate. Still for debug purposes it may be useful
@@ -455,7 +465,6 @@ public:
/// other block.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
-
/// Return the fallthrough block if the block can implicitly
/// transfer control to the block after it by falling off the end of
/// it. This should return null if it can reach the block after
@@ -695,7 +704,7 @@ public:
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
unsigned Reg,
const_iterator Before,
- unsigned Neighborhood=10) const;
+ unsigned Neighborhood = 10) const;
// Debugging methods.
void dump() const;
@@ -714,7 +723,6 @@ public:
/// Return the MCSymbol for this basic block.
MCSymbol *getSymbol() const;
-
private:
/// Return probability iterator corresponding to the I successor iterator.
probability_iterator getProbabilityIterator(succ_iterator I);
@@ -764,8 +772,8 @@ struct MBB2NumberFunctor :
//
template <> struct GraphTraits<MachineBasicBlock *> {
- typedef MachineBasicBlock *NodeRef;
- typedef MachineBasicBlock::succ_iterator ChildIteratorType;
+ using NodeRef = MachineBasicBlock *;
+ using ChildIteratorType = MachineBasicBlock::succ_iterator;
static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
@@ -773,8 +781,8 @@ template <> struct GraphTraits<MachineBasicBlock *> {
};
template <> struct GraphTraits<const MachineBasicBlock *> {
- typedef const MachineBasicBlock *NodeRef;
- typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
+ using NodeRef = const MachineBasicBlock *;
+ using ChildIteratorType = MachineBasicBlock::const_succ_iterator;
static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
@@ -787,28 +795,30 @@ template <> struct GraphTraits<const MachineBasicBlock *> {
// to be when traversing the predecessor edges of a MBB
// instead of the successor edges.
//
-template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
- typedef MachineBasicBlock *NodeRef;
- typedef MachineBasicBlock::pred_iterator ChildIteratorType;
+template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
+ using NodeRef = MachineBasicBlock *;
+ using ChildIteratorType = MachineBasicBlock::pred_iterator;
+
static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
return G.Graph;
}
+
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
-template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
- typedef const MachineBasicBlock *NodeRef;
- typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
+template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
+ using NodeRef = const MachineBasicBlock *;
+ using ChildIteratorType = MachineBasicBlock::const_pred_iterator;
+
static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
return G.Graph;
}
+
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
-
-
/// MachineInstrSpan provides an interface to get an iteration range
/// containing the instruction it was initialized with, along with all
/// those instructions inserted prior to or following that instruction
@@ -816,6 +826,7 @@ template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
class MachineInstrSpan {
MachineBasicBlock &MBB;
MachineBasicBlock::iterator I, B, E;
+
public:
MachineInstrSpan(MachineBasicBlock::iterator I)
: MBB(*I->getParent()),
@@ -854,6 +865,6 @@ inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
return It;
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H
diff --git a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
index cd1c204981ed..cba79c818a76 100644
--- a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
+++ b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -1,4 +1,4 @@
-//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*-----===//
+//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,26 +17,28 @@
#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/BlockFrequency.h"
-#include <climits>
+#include <cstdint>
+#include <memory>
namespace llvm {
+template <class BlockT> class BlockFrequencyInfoImpl;
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
+class MachineFunction;
class MachineLoopInfo;
-template <class BlockT> class BlockFrequencyInfoImpl;
+class raw_ostream;
/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
/// to estimate machine basic block frequencies.
class MachineBlockFrequencyInfo : public MachineFunctionPass {
- typedef BlockFrequencyInfoImpl<MachineBasicBlock> ImplType;
+ using ImplType = BlockFrequencyInfoImpl<MachineBasicBlock>;
std::unique_ptr<ImplType> MBFI;
public:
static char ID;
MachineBlockFrequencyInfo();
-
~MachineBlockFrequencyInfo() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
@@ -74,9 +76,8 @@ public:
const MachineBasicBlock *MBB) const;
uint64_t getEntryFreq() const;
-
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
diff --git a/include/llvm/CodeGen/MachineDominanceFrontier.h b/include/llvm/CodeGen/MachineDominanceFrontier.h
index 4131194a0c0f..370ffbe4862e 100644
--- a/include/llvm/CodeGen/MachineDominanceFrontier.h
+++ b/include/llvm/CodeGen/MachineDominanceFrontier.h
@@ -11,23 +11,28 @@
#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Analysis/DominanceFrontierImpl.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-
+#include "llvm/Support/GenericDomTree.h"
+#include <vector>
namespace llvm {
class MachineDominanceFrontier : public MachineFunctionPass {
ForwardDominanceFrontierBase<MachineBasicBlock> Base;
+
public:
- typedef DominatorTreeBase<MachineBasicBlock> DomTreeT;
- typedef DomTreeNodeBase<MachineBasicBlock> DomTreeNodeT;
- typedef DominanceFrontierBase<MachineBasicBlock>::DomSetType DomSetType;
- typedef DominanceFrontierBase<MachineBasicBlock>::iterator iterator;
- typedef DominanceFrontierBase<MachineBasicBlock>::const_iterator const_iterator;
+ using DomTreeT = DominatorTreeBase<MachineBasicBlock>;
+ using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
+ using DomSetType = DominanceFrontierBase<MachineBasicBlock>::DomSetType;
+ using iterator = DominanceFrontierBase<MachineBasicBlock>::iterator;
+ using const_iterator =
+ DominanceFrontierBase<MachineBasicBlock>::const_iterator;
- void operator=(const MachineDominanceFrontier &) = delete;
MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
+ MachineDominanceFrontier &
+ operator=(const MachineDominanceFrontier &) = delete;
static char ID;
@@ -104,6 +109,6 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
diff --git a/include/llvm/CodeGen/MachineDominators.h b/include/llvm/CodeGen/MachineDominators.h
index 30b6cfdd1c36..74a7c3ea04ae 100644
--- a/include/llvm/CodeGen/MachineDominators.h
+++ b/include/llvm/CodeGen/MachineDominators.h
@@ -1,4 +1,4 @@
-//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==//
+//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -16,12 +16,15 @@
#define LLVM_CODEGEN_MACHINEDOMINATORS_H
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/GenericDomTreeConstruction.h"
+#include <cassert>
#include <memory>
+#include <vector>
namespace llvm {
@@ -33,7 +36,7 @@ inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB
extern template class DomTreeNodeBase<MachineBasicBlock>;
extern template class DominatorTreeBase<MachineBasicBlock>;
-typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
+using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
//===-------------------------------------
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
@@ -52,6 +55,7 @@ class MachineDominatorTree : public MachineFunctionPass {
/// The splitting of a critical edge is local and thus, it is possible
/// to apply several of those changes at the same time.
mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
+
/// \brief Remember all the basic blocks that are inserted during
/// edge splitting.
/// Invariant: NewBBs == all the basic blocks contained in the NewBB
@@ -259,8 +263,8 @@ public:
template <class Node, class ChildIterator>
struct MachineDomTreeGraphTraitsBase {
- typedef Node *NodeRef;
- typedef ChildIterator ChildIteratorType;
+ using NodeRef = Node *;
+ using ChildIteratorType = ChildIterator;
static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
@@ -287,6 +291,6 @@ template <> struct GraphTraits<MachineDominatorTree*>
}
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index e7e728c1be28..8d040beff7a6 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -826,26 +826,35 @@ public:
getOperand(0).getSubReg() == getOperand(1).getSubReg();
}
- /// Return true if this is a transient instruction that is
- /// either very likely to be eliminated during register allocation (such as
- /// copy-like instructions), or if this instruction doesn't have an
- /// execution-time cost.
+ /// Return true if this instruction doesn't produce any output in the form of
+ /// executable instructions.
+ bool isMetaInstruction() const {
+ switch (getOpcode()) {
+ default:
+ return false;
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::CFI_INSTRUCTION:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::GC_LABEL:
+ case TargetOpcode::DBG_VALUE:
+ return true;
+ }
+ }
+
+ /// Return true if this is a transient instruction that is either very likely
+ /// to be eliminated during register allocation (such as copy-like
+ /// instructions), or if this instruction doesn't have an execution-time cost.
bool isTransient() const {
- switch(getOpcode()) {
- default: return false;
+ switch (getOpcode()) {
+ default:
+ return isMetaInstruction();
// Copy-like instructions are usually eliminated during register allocation.
case TargetOpcode::PHI:
case TargetOpcode::COPY:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
- // Pseudo-instructions that don't produce any real output.
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case TargetOpcode::CFI_INSTRUCTION:
- case TargetOpcode::EH_LABEL:
- case TargetOpcode::GC_LABEL:
- case TargetOpcode::DBG_VALUE:
return true;
}
}
diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h
index 6e5c6473ff4a..1026654da3d7 100644
--- a/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -642,6 +642,11 @@ public:
///
void setRegBank(unsigned Reg, const RegisterBank &RegBank);
+ void setRegClassOrRegBank(unsigned Reg,
+ const RegClassOrRegBank &RCOrRB){
+ VRegInfo[Reg].first = RCOrRB;
+ }
+
/// constrainRegClass - Constrain the register class of the specified virtual
/// register to be a common subclass of RC and the current register class,
/// but only if the new class has at least MinNumRegs registers. Return the
diff --git a/include/llvm/CodeGen/MachineValueType.h b/include/llvm/CodeGen/MachineValueType.h
index e92bb7f74967..d991e4c216d9 100644
--- a/include/llvm/CodeGen/MachineValueType.h
+++ b/include/llvm/CodeGen/MachineValueType.h
@@ -26,7 +26,7 @@ namespace llvm {
/// Machine Value Type. Every type that is supported natively by some
/// processor targeted by LLVM occurs here. This means that any legal value
/// type can be represented by an MVT.
-class MVT {
+ class MVT {
public:
enum SimpleValueType : uint8_t {
// Simple value types that aren't explicitly part of this enumeration
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 99afd8c5c9ab..97aa2aace822 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -52,14 +52,14 @@ class TargetRegisterInfo;
/// These are the different kinds of scheduling dependencies.
enum Kind {
Data, ///< Regular data dependence (aka true-dependence).
- Anti, ///< A register anti-dependedence (aka WAR).
+ Anti, ///< A register anti-dependence (aka WAR).
Output, ///< A register output-dependence (aka WAW).
Order ///< Any other ordering dependency.
};
// Strong dependencies must be respected by the scheduler. Artificial
// dependencies may be removed only if they are redundant with another
- // strong depedence.
+ // strong dependence.
//
// Weak dependencies may be violated by the scheduling strategy, but only if
// the strategy can prove it is correct to do so.
@@ -342,7 +342,7 @@ class TargetRegisterInfo;
/// BoundaryNodes can have DAG edges, including Data edges, but they do not
/// correspond to schedulable entities (e.g. instructions) and do not have a
/// valid ID. Consequently, always check for boundary nodes before accessing
- /// an assoicative data structure keyed on node ID.
+ /// an associative data structure keyed on node ID.
bool isBoundaryNode() const { return NodeNum == BoundaryID; }
/// Assigns the representative SDNode for this SUnit. This may be used
diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 21e1740aa6b8..f5f5bfd45e79 100644
--- a/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SparseMultiSet.h"
#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/Compiler.h"
@@ -224,7 +225,7 @@ namespace llvm {
MachineInstr *FirstDbgValue;
/// Set of live physical registers for updating kill flags.
- BitVector LiveRegs;
+ LivePhysRegs LiveRegs;
public:
explicit ScheduleDAGInstrs(MachineFunction &mf,
@@ -311,7 +312,7 @@ namespace llvm {
std::string getDAGName() const override;
/// Fixes register kill flags that scheduling has made invalid.
- void fixupKills(MachineBasicBlock *MBB);
+ void fixupKills(MachineBasicBlock &MBB);
protected:
void initSUnits();
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index d761661f763e..493122b15704 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -1070,6 +1070,11 @@ public:
SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
ArrayRef<SDValue> Ops);
+ /// Mutate the specified strict FP node to its non-strict equivalent,
+ /// unlinking the node from its chain and dropping the metadata arguments.
+ /// The node must be a strict FP node.
+ SDNode *mutateStrictFPToFP(SDNode *Node);
+
/// These are used for target selectors to create a new node
/// with specified return type(s), MachineInstr opcode, and operands.
///
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index 35ddcf80c91f..973c5aac5281 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -612,6 +612,32 @@ public:
SDNodeBits.IsMemIntrinsic;
}
+ /// Test if this node is a strict floating point pseudo-op.
+ bool isStrictFPOpcode() {
+ switch (NodeType) {
+ default:
+ return false;
+ case ISD::STRICT_FADD:
+ case ISD::STRICT_FSUB:
+ case ISD::STRICT_FMUL:
+ case ISD::STRICT_FDIV:
+ case ISD::STRICT_FREM:
+ case ISD::STRICT_FSQRT:
+ case ISD::STRICT_FPOW:
+ case ISD::STRICT_FPOWI:
+ case ISD::STRICT_FSIN:
+ case ISD::STRICT_FCOS:
+ case ISD::STRICT_FEXP:
+ case ISD::STRICT_FEXP2:
+ case ISD::STRICT_FLOG:
+ case ISD::STRICT_FLOG10:
+ case ISD::STRICT_FLOG2:
+ case ISD::STRICT_FRINT:
+ case ISD::STRICT_FNEARBYINT:
+ return true;
+ }
+ }
+
/// Test if this node has a post-isel opcode, directly
/// corresponding to a MachineInstr opcode.
bool isMachineOpcode() const { return NodeType < 0; }
diff --git a/include/llvm/DebugInfo/CodeView/CVRecord.h b/include/llvm/DebugInfo/CodeView/CVRecord.h
index 71ea82b6a9ab..68ad09982202 100644
--- a/include/llvm/DebugInfo/CodeView/CVRecord.h
+++ b/include/llvm/DebugInfo/CodeView/CVRecord.h
@@ -14,6 +14,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
@@ -50,6 +51,13 @@ public:
Optional<uint32_t> Hash;
};
+template <typename Kind> struct RemappedRecord {
+ explicit RemappedRecord(const CVRecord<Kind> &R) : OriginalRecord(R) {}
+
+ CVRecord<Kind> OriginalRecord;
+ SmallVector<std::pair<uint32_t, TypeIndex>, 8> Mappings;
+};
+
} // end namespace codeview
template <typename Kind>
diff --git a/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h b/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
index 4bc8fbefd5d8..70ccc867cd38 100644
--- a/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
+++ b/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h
@@ -46,6 +46,7 @@ Error visitMemberRecordStream(ArrayRef<uint8_t> FieldList,
TypeVisitorCallbacks &Callbacks);
Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
+ VisitorDataSource Source = VDS_BytesPresent,
TypeServerHandler *TS = nullptr);
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr);
diff --git a/include/llvm/DebugInfo/CodeView/TypeDeserializer.h b/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
index 2142d4a2dec7..a9c5cf42fc5b 100644
--- a/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
+++ b/include/llvm/DebugInfo/CodeView/TypeDeserializer.h
@@ -40,6 +40,17 @@ class TypeDeserializer : public TypeVisitorCallbacks {
public:
TypeDeserializer() = default;
+ template <typename T> static Error deserializeAs(CVType &CVT, T &Record) {
+ MappingInfo I(CVT.content());
+ if (auto EC = I.Mapping.visitTypeBegin(CVT))
+ return EC;
+ if (auto EC = I.Mapping.visitKnownRecord(CVT, Record))
+ return EC;
+ if (auto EC = I.Mapping.visitTypeEnd(CVT))
+ return EC;
+ return Error::success();
+ }
+
Error visitTypeBegin(CVType &Record) override {
assert(!Mapping && "Already in a type mapping!");
Mapping = llvm::make_unique<MappingInfo>(Record.content());
diff --git a/include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h b/include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h
new file mode 100644
index 000000000000..82ceb5038316
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h
@@ -0,0 +1,33 @@
+//===- TypeIndexDiscovery.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+enum class TiRefKind { TypeRef, IndexRef };
+struct TiReference {
+ TiRefKind Kind;
+ uint32_t Offset;
+ uint32_t Count;
+};
+
+void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
+ SmallVectorImpl<TiReference> &Refs);
+void discoverTypeIndices(const CVType &Type,
+ SmallVectorImpl<TiReference> &Refs);
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/TypeRecord.h b/include/llvm/DebugInfo/CodeView/TypeRecord.h
index 1f10872c8768..92745ebfcded 100644
--- a/include/llvm/DebugInfo/CodeView/TypeRecord.h
+++ b/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -35,6 +35,7 @@ using support::ulittle16_t;
using support::ulittle32_t;
typedef CVRecord<TypeLeafKind> CVType;
+typedef RemappedRecord<TypeLeafKind> RemappedType;
struct CVMemberRecord {
TypeLeafKind Kind;
@@ -278,15 +279,9 @@ public:
Attrs(calcAttrs(PK, PM, PO, Size)) {}
PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
- PointerOptions PO, uint8_t Size,
- const MemberPointerInfo &Member)
+ PointerOptions PO, uint8_t Size, const MemberPointerInfo &MPI)
: TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
- Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(Member) {}
-
- PointerRecord(TypeIndex ReferentType, uint32_t Attrs,
- const MemberPointerInfo &Member)
- : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
- Attrs(Attrs), MemberInfo(Member) {}
+ Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(MPI) {}
TypeIndex getReferentType() const { return ReferentType; }
diff --git a/include/llvm/DebugInfo/CodeView/TypeSerializer.h b/include/llvm/DebugInfo/CodeView/TypeSerializer.h
index 6dad98247136..435c43f7edcb 100644
--- a/include/llvm/DebugInfo/CodeView/TypeSerializer.h
+++ b/include/llvm/DebugInfo/CodeView/TypeSerializer.h
@@ -17,7 +17,6 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
@@ -26,6 +25,8 @@ namespace llvm {
namespace codeview {
+class TypeHasher;
+
class TypeSerializer : public TypeVisitorCallbacks {
struct SubRecord {
SubRecord(TypeLeafKind K, uint32_t S) : Kind(K), Size(S) {}
@@ -45,14 +46,13 @@ class TypeSerializer : public TypeVisitorCallbacks {
}
};
- typedef SmallVector<MutableArrayRef<uint8_t>, 2> RecordList;
+ typedef SmallVector<MutableArrayRef<uint8_t>, 2> MutableRecordList;
static constexpr uint8_t ContinuationLength = 8;
BumpPtrAllocator &RecordStorage;
RecordSegment CurrentSegment;
- RecordList FieldListSegments;
+ MutableRecordList FieldListSegments;
- TypeIndex LastTypeIndex;
Optional<TypeLeafKind> TypeKind;
Optional<TypeLeafKind> MemberKind;
std::vector<uint8_t> RecordBuffer;
@@ -60,28 +60,35 @@ class TypeSerializer : public TypeVisitorCallbacks {
BinaryStreamWriter Writer;
TypeRecordMapping Mapping;
- RecordList SeenRecords;
- StringMap<TypeIndex> HashedRecords;
+ /// Private type record hashing implementation details are handled here.
+ std::unique_ptr<TypeHasher> Hasher;
+
+ /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
+ SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;
+
+ /// Temporary storage that we use to copy a record's data while re-writing
+ /// its type indices.
+ SmallVector<uint8_t, 256> RemapStorage;
+
+ TypeIndex nextTypeIndex() const;
bool isInFieldList() const;
- TypeIndex calcNextTypeIndex() const;
- TypeIndex incrementTypeIndex();
MutableArrayRef<uint8_t> getCurrentSubRecordData();
MutableArrayRef<uint8_t> getCurrentRecordData();
Error writeRecordPrefix(TypeLeafKind Kind);
- TypeIndex insertRecordBytesPrivate(MutableArrayRef<uint8_t> Record);
- TypeIndex insertRecordBytesWithCopy(CVType &Record,
- MutableArrayRef<uint8_t> Data);
Expected<MutableArrayRef<uint8_t>>
addPadding(MutableArrayRef<uint8_t> Record);
public:
- explicit TypeSerializer(BumpPtrAllocator &Storage);
+ explicit TypeSerializer(BumpPtrAllocator &Storage, bool Hash = true);
+ ~TypeSerializer();
+
+ void reset();
- ArrayRef<MutableArrayRef<uint8_t>> records() const;
- TypeIndex getLastTypeIndex() const;
- TypeIndex insertRecordBytes(MutableArrayRef<uint8_t> Record);
+ ArrayRef<ArrayRef<uint8_t>> records() const;
+ TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
+ TypeIndex insertRecord(const RemappedType &Record);
Expected<TypeIndex> visitTypeEndGetIndex(CVType &Record);
Error visitTypeBegin(CVType &Record) override;
diff --git a/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h b/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
index 65bcf9812e68..3ad2b4e9c92f 100644
--- a/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
+++ b/include/llvm/DebugInfo/CodeView/TypeStreamMerger.h
@@ -22,12 +22,75 @@ class TypeIndex;
class TypeServerHandler;
class TypeTableBuilder;
-/// Merges one type stream into another. Returns true on success.
-Error mergeTypeStreams(TypeTableBuilder &DestIdStream,
- TypeTableBuilder &DestTypeStream,
+/// \brief Merge one set of type records into another. This method assumes
+/// that all records are type records, and there are no Id records present.
+///
+/// \param Dest The table to store the re-written type records into.
+///
+/// \param SourceToDest A vector, indexed by the TypeIndex in the source
+/// type stream, that contains the index of the corresponding type record
+/// in the destination stream.
+///
+/// \param Handler (optional) If non-null, an interface that gets invoked
+/// to handle type server records.
+///
+/// \param Types The collection of types to merge in.
+///
+/// \returns Error::success() if the operation succeeded, otherwise an
+/// appropriate error code.
+Error mergeTypeRecords(TypeTableBuilder &Dest,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler, const CVTypeArray &Types);
+/// \brief Merge one set of id records into another. This method assumes
+/// that all records are id records, and there are no Type records present.
+/// However, since Id records can refer back to Type records, this method
+/// assumes that the referenced type records have also been merged into
+/// another type stream (for example using the above method), and accepts
+/// the mapping from source to dest for that stream so that it can re-write
+/// the type record mappings accordingly.
+///
+/// \param Dest The table to store the re-written id records into.
+///
+/// \param Types The mapping to use for the type records that these id
+/// records refer to.
+///
+/// \param SourceToDest A vector, indexed by the TypeIndex in the source
+/// id stream, that contains the index of the corresponding id record
+/// in the destination stream.
+///
+/// \param Ids The collection of id records to merge in.
+///
+/// \returns Error::success() if the operation succeeded, otherwise an
+/// appropriate error code.
+Error mergeIdRecords(TypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
+ SmallVectorImpl<TypeIndex> &SourceToDest,
+ const CVTypeArray &Ids);
+
+/// \brief Merge a unified set of type and id records, splitting them into
+/// separate output streams.
+///
+/// \param DestIds The table to store the re-written id records into.
+///
+/// \param DestTypes the table to store the re-written type records into.
+///
+/// \param SourceToDest A vector, indexed by the TypeIndex in the source
+/// id stream, that contains the index of the corresponding id record
+/// in the destination stream.
+///
+/// \param Handler (optional) If non-null, an interface that gets invoked
+/// to handle type server records.
+///
+/// \param IdsAndTypes The collection of id records to merge in.
+///
+/// \returns Error::success() if the operation succeeded, otherwise an
+/// appropriate error code.
+Error mergeTypeAndIdRecords(TypeTableBuilder &DestIds,
+ TypeTableBuilder &DestTypes,
+ SmallVectorImpl<TypeIndex> &SourceToDest,
+ TypeServerHandler *Handler,
+ const CVTypeArray &IdsAndTypes);
+
} // end namespace codeview
} // end namespace llvm
diff --git a/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h b/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
index 102bee4b0801..7bdc9ecb20cf 100644
--- a/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
+++ b/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
@@ -64,10 +64,14 @@ public:
return *ExpectedIndex;
}
- TypeIndex writeSerializedRecord(MutableArrayRef<uint8_t> Record) {
+ TypeIndex writeSerializedRecord(ArrayRef<uint8_t> Record) {
return Serializer.insertRecordBytes(Record);
}
+ TypeIndex writeSerializedRecord(const RemappedType &Record) {
+ return Serializer.insertRecord(Record);
+ }
+
template <typename TFunc> void ForEachRecord(TFunc Func) {
uint32_t Index = TypeIndex::FirstNonSimpleIndex;
@@ -77,23 +81,24 @@ public:
}
}
- ArrayRef<MutableArrayRef<uint8_t>> records() const {
- return Serializer.records();
- }
+ ArrayRef<ArrayRef<uint8_t>> records() const { return Serializer.records(); }
};
class FieldListRecordBuilder {
TypeTableBuilder &TypeTable;
+ BumpPtrAllocator Allocator;
TypeSerializer TempSerializer;
CVType Type;
public:
explicit FieldListRecordBuilder(TypeTableBuilder &TypeTable)
- : TypeTable(TypeTable), TempSerializer(TypeTable.getAllocator()) {
+ : TypeTable(TypeTable), TempSerializer(Allocator, false) {
Type.Type = TypeLeafKind::LF_FIELDLIST;
}
void begin() {
+ TempSerializer.reset();
+
if (auto EC = TempSerializer.visitTypeBegin(Type))
consumeError(std::move(EC));
}
@@ -109,23 +114,19 @@ public:
consumeError(std::move(EC));
}
- TypeIndex end() {
+ TypeIndex end(bool Write) {
+ TypeIndex Index;
if (auto EC = TempSerializer.visitTypeEnd(Type)) {
consumeError(std::move(EC));
return TypeIndex();
}
- TypeIndex Index;
- for (auto Record : TempSerializer.records()) {
- Index = TypeTable.writeSerializedRecord(Record);
+ if (Write) {
+ for (auto Record : TempSerializer.records())
+ Index = TypeTable.writeSerializedRecord(Record);
}
- return Index;
- }
- /// Stop building the record.
- void reset() {
- if (auto EC = TempSerializer.visitTypeEnd(Type))
- consumeError(std::move(EC));
+ return Index;
}
};
diff --git a/include/llvm/DebugInfo/CodeView/TypeTableCollection.h b/include/llvm/DebugInfo/CodeView/TypeTableCollection.h
index 7de562a19a74..42b62ba2b6ce 100644
--- a/include/llvm/DebugInfo/CodeView/TypeTableCollection.h
+++ b/include/llvm/DebugInfo/CodeView/TypeTableCollection.h
@@ -18,7 +18,7 @@ namespace codeview {
class TypeTableCollection : public TypeCollection {
public:
- explicit TypeTableCollection(ArrayRef<MutableArrayRef<uint8_t>> Records);
+ explicit TypeTableCollection(ArrayRef<ArrayRef<uint8_t>> Records);
Optional<TypeIndex> getFirst() override;
Optional<TypeIndex> getNext(TypeIndex Prev) override;
@@ -33,7 +33,7 @@ private:
bool hasCapacityFor(TypeIndex Index) const;
void ensureTypeExists(TypeIndex Index);
- ArrayRef<MutableArrayRef<uint8_t>> Records;
+ ArrayRef<ArrayRef<uint8_t>> Records;
TypeDatabase Database;
};
}
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index d3a63edf10ff..7fa68f3f2314 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -46,7 +46,8 @@ class raw_ostream;
/// Reads a value from data extractor and applies a relocation to the result if
/// one exists for the given offset.
uint64_t getRelocatedValue(const DataExtractor &Data, uint32_t Size,
- uint32_t *Off, const RelocAddrMap *Relocs);
+ uint32_t *Off, const RelocAddrMap *Relocs,
+ uint64_t *SecNdx = nullptr);
/// DWARFContext
/// This data structure is the top level entity that deals with dwarf debug
@@ -71,6 +72,14 @@ class DWARFContext : public DIContext {
std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
std::unique_ptr<DWARFDebugLocDWO> LocDWO;
+ struct DWOFile {
+ object::OwningBinary<object::ObjectFile> File;
+ std::unique_ptr<DWARFContext> Context;
+ };
+ StringMap<std::weak_ptr<DWOFile>> DWOFiles;
+ std::weak_ptr<DWOFile> DWP;
+ bool CheckedForDWP = false;
+
/// Read compile units from the debug_info section (if necessary)
/// and store them in CUs.
void parseCompileUnits();
@@ -165,6 +174,8 @@ public:
return DWOCUs[index].get();
}
+ DWARFCompileUnit *getDWOCompileUnitForHash(uint64_t Hash);
+
/// Get a DIE given an exact offset.
DWARFDie getDIEForOffset(uint32_t Offset);
@@ -206,6 +217,7 @@ public:
DIInliningInfo getInliningInfoForAddress(uint64_t Address,
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ virtual StringRef getFileName() const = 0;
virtual bool isLittleEndian() const = 0;
virtual uint8_t getAddressSize() const = 0;
virtual const DWARFSection &getInfoSection() = 0;
@@ -248,6 +260,8 @@ public:
return version == 2 || version == 3 || version == 4 || version == 5;
}
+ std::shared_ptr<DWARFContext> getDWOContext(StringRef AbsolutePath);
+
private:
/// Return the compile unit that includes an offset (relative to .debug_info).
DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
@@ -263,6 +277,7 @@ private:
class DWARFContextInMemory : public DWARFContext {
virtual void anchor();
+ StringRef FileName;
bool IsLittleEndian;
uint8_t AddressSize;
DWARFSection InfoSection;
@@ -316,6 +331,7 @@ public:
uint8_t AddrSize,
bool isLittleEndian = sys::IsLittleEndianHost);
+ StringRef getFileName() const override { return FileName; }
bool isLittleEndian() const override { return IsLittleEndian; }
uint8_t getAddressSize() const override { return AddressSize; }
const DWARFSection &getInfoSection() override { return InfoSection; }
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h b/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
index 95ec1be62a79..b436711ae6ed 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
@@ -25,6 +25,7 @@ class raw_ostream;
struct DWARFAddressRange {
uint64_t LowPC;
uint64_t HighPC;
+ uint64_t SectionIndex;
};
/// DWARFAddressRangesVector - represents a set of absolute address ranges.
@@ -44,6 +45,8 @@ public:
/// address past the end of the address range. The ending address must
/// be greater than or equal to the beginning address.
uint64_t EndAddress;
+ /// A section index this range belongs to.
+ uint64_t SectionIndex;
/// The end of any given range list is marked by an end of list entry,
/// which consists of a 0 for the beginning address offset
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDie.h b/include/llvm/DebugInfo/DWARF/DWARFDie.h
index ca94a90fabfc..fa41b9e293c0 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDie.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDie.h
@@ -195,7 +195,8 @@ public:
/// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
/// Returns true if both attributes are present.
- bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC) const;
+ bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
+ uint64_t &SectionIndex) const;
/// Get the address ranges for this DIE.
///
diff --git a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index a30e0be9c3c3..3a781dde8929 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -47,6 +47,7 @@ private:
const char *cstr;
};
const uint8_t *data = nullptr;
+ uint64_t SectionIndex; /// Section index for reference forms.
};
dwarf::Form Form; /// Form for this value.
@@ -58,6 +59,7 @@ public:
dwarf::Form getForm() const { return Form; }
uint64_t getRawUValue() const { return Value.uval; }
+ uint64_t getSectionIndex() const { return Value.SectionIndex; }
void setForm(dwarf::Form F) { Form = F; }
void setUValue(uint64_t V) { Value.uval = V; }
void setSValue(int64_t V) { Value.sval = V; }
diff --git a/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h b/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
index fabacc0abcea..f143de334737 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
@@ -16,7 +16,10 @@
namespace llvm {
+/// RelocAddrEntry contains relocated value and section index.
+/// Section index is -1LL if relocation points to absolute symbol.
struct RelocAddrEntry {
+ uint64_t SectionIndex;
uint64_t Value;
};
diff --git a/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/include/llvm/DebugInfo/DWARF/DWARFUnit.h
index ae7fd24ce5bb..d0f7bd0d623f 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFUnit.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -143,17 +143,7 @@ class DWARFUnit {
typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>
die_iterator_range;
- class DWOHolder {
- object::OwningBinary<object::ObjectFile> DWOFile;
- std::unique_ptr<DWARFContext> DWOContext;
- DWARFUnit *DWOU = nullptr;
-
- public:
- DWOHolder(StringRef DWOPath, uint64_t DWOId);
-
- DWARFUnit *getUnit() const { return DWOU; }
- };
- std::unique_ptr<DWOHolder> DWO;
+ std::shared_ptr<DWARFUnit> DWO;
const DWARFUnitIndex::Entry *IndexEntry;
diff --git a/include/llvm/DebugInfo/MSF/MappedBlockStream.h b/include/llvm/DebugInfo/MSF/MappedBlockStream.h
index c91f6f725c80..d68f5f70c83e 100644
--- a/include/llvm/DebugInfo/MSF/MappedBlockStream.h
+++ b/include/llvm/DebugInfo/MSF/MappedBlockStream.h
@@ -43,8 +43,8 @@ class MappedBlockStream : public BinaryStream {
friend class WritableMappedBlockStream;
public:
static std::unique_ptr<MappedBlockStream>
- createStream(uint32_t BlockSize, uint32_t NumBlocks,
- const MSFStreamLayout &Layout, BinaryStreamRef MsfData);
+ createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
+ BinaryStreamRef MsfData);
static std::unique_ptr<MappedBlockStream>
createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
@@ -74,12 +74,11 @@ public:
void invalidateCache();
uint32_t getBlockSize() const { return BlockSize; }
- uint32_t getNumBlocks() const { return NumBlocks; }
+ uint32_t getNumBlocks() const { return StreamLayout.Blocks.size(); }
uint32_t getStreamLength() const { return StreamLayout.Length; }
protected:
- MappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
- const MSFStreamLayout &StreamLayout,
+ MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
BinaryStreamRef MsfData);
private:
@@ -91,7 +90,6 @@ private:
ArrayRef<uint8_t> &Buffer);
const uint32_t BlockSize;
- const uint32_t NumBlocks;
const MSFStreamLayout StreamLayout;
BinaryStreamRef MsfData;
@@ -103,8 +101,8 @@ private:
class WritableMappedBlockStream : public WritableBinaryStream {
public:
static std::unique_ptr<WritableMappedBlockStream>
- createStream(uint32_t BlockSize, uint32_t NumBlocks,
- const MSFStreamLayout &Layout, WritableBinaryStreamRef MsfData);
+ createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
+ WritableBinaryStreamRef MsfData);
static std::unique_ptr<WritableMappedBlockStream>
createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
@@ -139,7 +137,7 @@ public:
uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }
protected:
- WritableMappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
+ WritableMappedBlockStream(uint32_t BlockSize,
const MSFStreamLayout &StreamLayout,
WritableBinaryStreamRef MsfData);
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
index bcac182e2145..e116f314ac0e 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -82,6 +82,7 @@ private:
Error finalize();
uint32_t calculateModiSubstreamSize() const;
+ uint32_t calculateNamesOffset() const;
uint32_t calculateSectionContribsStreamSize() const;
uint32_t calculateSectionMapStreamSize() const;
uint32_t calculateFileInfoSubstreamSize() const;
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBTypeServerHandler.h b/include/llvm/DebugInfo/PDB/Native/PDBTypeServerHandler.h
index bfd38b6c80ec..196ba4d6ffbd 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBTypeServerHandler.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBTypeServerHandler.h
@@ -11,8 +11,7 @@
#define LLVM_DEBUGINFO_PDB_PDBTYPESERVERHANDLER_H
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
@@ -39,7 +38,7 @@ private:
bool RevisitAlways;
std::unique_ptr<NativeSession> Session;
- SmallVector<SmallString<64>, 4> SearchPaths;
+ StringSet<> SearchPaths;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Native/TpiStream.h b/include/llvm/DebugInfo/PDB/Native/TpiStream.h
index c5549983ed43..17fba9991c2e 100644
--- a/include/llvm/DebugInfo/PDB/Native/TpiStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/TpiStream.h
@@ -21,6 +21,9 @@
#include "llvm/Support/Error.h"
namespace llvm {
+namespace codeview {
+class LazyRandomTypeCollection;
+}
namespace msf {
class MappedBlockStream;
}
@@ -53,12 +56,16 @@ public:
codeview::CVTypeRange types(bool *HadError) const;
const codeview::CVTypeArray &typeArray() const { return TypeRecords; }
+ codeview::LazyRandomTypeCollection &typeCollection() { return *Types; }
+
Error commit();
private:
const PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
+ std::unique_ptr<codeview::LazyRandomTypeCollection> Types;
+
codeview::CVTypeArray TypeRecords;
std::unique_ptr<BinaryStream> HashStream;
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index d4a896c01867..ace309ed95a4 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -322,7 +322,7 @@ template <> struct DenseMapInfo<AttributeSet> {
/// the AttributeList object. The function attributes are at index
/// `AttributeList::FunctionIndex', the return value is at index
/// `AttributeList::ReturnIndex', and the attributes for the parameters start at
-/// index `1'.
+/// index `AttributeList::FirstArgIndex'.
class AttributeList {
public:
enum AttrIndex : unsigned {
@@ -347,8 +347,8 @@ public:
/// \brief Create an AttributeList with the specified parameters in it.
static AttributeList get(LLVMContext &C,
ArrayRef<std::pair<unsigned, Attribute>> Attrs);
- static AttributeList
- get(LLVMContext &C, ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
+ static AttributeList get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
/// \brief Create an AttributeList from attribute sets for a function, its
/// return value, and all of its arguments.
@@ -356,13 +356,11 @@ public:
AttributeSet RetAttrs,
ArrayRef<AttributeSet> ArgAttrs);
- static AttributeList
- getImpl(LLVMContext &C,
- ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
-
private:
explicit AttributeList(AttributeListImpl *LI) : pImpl(LI) {}
+ static AttributeList getImpl(LLVMContext &C, ArrayRef<AttributeSet> AttrSets);
+
public:
AttributeList() = default;
@@ -521,39 +519,31 @@ public:
/// \brief Return the attributes at the index as a string.
std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
- using iterator = ArrayRef<Attribute>::iterator;
+ //===--------------------------------------------------------------------===//
+ // AttributeList Introspection
+ //===--------------------------------------------------------------------===//
+
+ typedef const AttributeSet *iterator;
+ iterator begin() const;
+ iterator end() const;
+
+ unsigned getNumAttrSets() const;
- iterator begin(unsigned Slot) const;
- iterator end(unsigned Slot) const;
+ /// Use these to iterate over the valid attribute indices.
+ unsigned index_begin() const { return AttributeList::FunctionIndex; }
+ unsigned index_end() const { return getNumAttrSets() - 1; }
/// operator==/!= - Provide equality predicates.
bool operator==(const AttributeList &RHS) const { return pImpl == RHS.pImpl; }
bool operator!=(const AttributeList &RHS) const { return pImpl != RHS.pImpl; }
- //===--------------------------------------------------------------------===//
- // AttributeList Introspection
- //===--------------------------------------------------------------------===//
-
/// \brief Return a raw pointer that uniquely identifies this attribute list.
void *getRawPointer() const {
return pImpl;
}
/// \brief Return true if there are no attributes.
- bool isEmpty() const {
- return getNumSlots() == 0;
- }
-
- /// \brief Return the number of slots used in this attribute list. This is
- /// the number of arguments that have an attribute set on them (including the
- /// function itself).
- unsigned getNumSlots() const;
-
- /// \brief Return the index for the given slot.
- unsigned getSlotIndex(unsigned Slot) const;
-
- /// \brief Return the attributes at the given slot.
- AttributeSet getSlotAttributes(unsigned Slot) const;
+ bool isEmpty() const { return pImpl == nullptr; }
void dump() const;
};
diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h
index c917b1f2cada..235cb57cfd09 100644
--- a/include/llvm/IR/BasicBlock.h
+++ b/include/llvm/IR/BasicBlock.h
@@ -33,6 +33,7 @@ class Function;
class LandingPadInst;
class LLVMContext;
class Module;
+class PHINode;
class TerminatorInst;
class ValueSymbolTable;
@@ -261,6 +262,50 @@ public:
inline const Instruction &back() const { return InstList.back(); }
inline Instruction &back() { return InstList.back(); }
+ /// Iterator to walk just the phi nodes in the basic block.
+ template <typename PHINodeT = PHINode, typename BBIteratorT = iterator>
+ class phi_iterator_impl
+ : public iterator_facade_base<phi_iterator_impl<PHINodeT, BBIteratorT>,
+ std::forward_iterator_tag, PHINodeT> {
+ friend BasicBlock;
+
+ PHINodeT *PN;
+
+ phi_iterator_impl(PHINodeT *PN) : PN(PN) {}
+
+ public:
+ // Allow default construction to build variables, but this doesn't build
+ // a useful iterator.
+ phi_iterator_impl() = default;
+
+ // Allow conversion between instantiations where valid.
+ template <typename PHINodeU, typename BBIteratorU>
+ phi_iterator_impl(const phi_iterator_impl<PHINodeU, BBIteratorU> &Arg)
+ : PN(Arg.PN) {}
+
+ bool operator==(const phi_iterator_impl &Arg) const { return PN == Arg.PN; }
+
+ PHINodeT &operator*() const { return *PN; }
+
+ using phi_iterator_impl::iterator_facade_base::operator++;
+ phi_iterator_impl &operator++() {
+ assert(PN && "Cannot increment the end iterator!");
+ PN = dyn_cast<PHINodeT>(std::next(BBIteratorT(PN)));
+ return *this;
+ }
+ };
+ typedef phi_iterator_impl<> phi_iterator;
+ typedef phi_iterator_impl<const PHINode, BasicBlock::const_iterator>
+ const_phi_iterator;
+
+ /// Returns a range that iterates over the phis in the basic block.
+ ///
+ /// Note that this cannot be used with basic blocks that have no terminator.
+ iterator_range<const_phi_iterator> phis() const {
+ return const_cast<BasicBlock *>(this)->phis();
+ }
+ iterator_range<phi_iterator> phis();
+
/// \brief Return the underlying instruction list container.
///
/// Currently you need to access the underlying instruction list container
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 05e3315cbab2..2ae98d9e35b0 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -171,6 +171,7 @@ namespace llvm {
ebStrict
};
+ bool isUnaryOp() const;
RoundingMode getRoundingMode() const;
ExceptionBehavior getExceptionBehavior() const;
@@ -182,6 +183,18 @@ namespace llvm {
case Intrinsic::experimental_constrained_fmul:
case Intrinsic::experimental_constrained_fdiv:
case Intrinsic::experimental_constrained_frem:
+ case Intrinsic::experimental_constrained_sqrt:
+ case Intrinsic::experimental_constrained_pow:
+ case Intrinsic::experimental_constrained_powi:
+ case Intrinsic::experimental_constrained_sin:
+ case Intrinsic::experimental_constrained_cos:
+ case Intrinsic::experimental_constrained_exp:
+ case Intrinsic::experimental_constrained_exp2:
+ case Intrinsic::experimental_constrained_log:
+ case Intrinsic::experimental_constrained_log10:
+ case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_rint:
+ case Intrinsic::experimental_constrained_nearbyint:
return true;
default: return false;
}
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 19f6045568f4..291d16fb0d9b 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -489,8 +489,64 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+
+ // These intrinsics are sensitive to the rounding mode so we need constrained
+ // versions of each of them. When strict rounding and exception control are
+ // not required the non-constrained versions of these intrinsics should be
+ // used.
+ def int_experimental_constrained_sqrt : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_powi : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_i32_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_sin : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_cos : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_pow : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_log : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_log10: Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_log2 : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_exp2 : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_rint : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_nearbyint : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
}
-// FIXME: Add intrinsic for fcmp, fptrunc, fpext, fptoui and fptosi.
+// FIXME: Add intrinsics for fcmp, fptrunc, fpext, fptoui and fptosi.
+// FIXME: Add intrinsics for fabs, copysign, floor, ceil, trunc and round?
//===------------------------- Expect Intrinsics --------------------------===//
diff --git a/include/llvm/IR/IntrinsicsAMDGPU.td b/include/llvm/IR/IntrinsicsAMDGPU.td
index d7413fe9e56f..e1928546607a 100644
--- a/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -566,6 +566,16 @@ def int_amdgcn_s_getreg :
[IntrReadMem, IntrSpeculatable]
>;
+// int_amdgcn_s_getpc is provided to allow a specific style of position
+// independent code to determine the high part of its address when it is
+// known (through convention) that the code and any data of interest does
+// not cross a 4Gb address boundary. Use for any other purpose may not
+// produce the desired results as optimizations may cause code movement,
+// especially as we explicitly use IntrNoMem to allow optimizations.
+def int_amdgcn_s_getpc :
+ GCCBuiltin<"__builtin_amdgcn_s_getpc">,
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
+
// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
// param values: 0 = P10, 1 = P20, 2 = P0
def int_amdgcn_interp_mov :
diff --git a/include/llvm/IR/Metadata.h b/include/llvm/IR/Metadata.h
index 92f701e01ff3..3c753260190e 100644
--- a/include/llvm/IR/Metadata.h
+++ b/include/llvm/IR/Metadata.h
@@ -1223,6 +1223,7 @@ public:
// FIXME: Fix callers and remove condition on N.
unsigned size() const { return N ? N->getNumOperands() : 0u; }
+ bool empty() const { return N ? N->getNumOperands() == 0 : true; }
T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); }
// FIXME: Fix callers and remove condition on N.
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index 3024d9e27a2f..5e1f680c5b36 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -139,9 +139,12 @@ public:
/// during the append operation.
AppendUnique = 6,
+ /// Takes the max of the two values, which are required to be integers.
+ Max = 7,
+
// Markers:
ModFlagBehaviorFirstVal = Error,
- ModFlagBehaviorLastVal = AppendUnique
+ ModFlagBehaviorLastVal = Max
};
/// Checks if Metadata represents a valid ModFlagBehavior, and stores the
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index 3df5244a0bd6..3ca21c15577b 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -144,6 +144,7 @@ void initializeGCMachineCodeAnalysisPass(PassRegistry&);
void initializeGCModuleInfoPass(PassRegistry&);
void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
void initializeGVNHoistLegacyPassPass(PassRegistry&);
+void initializeGVNSinkLegacyPassPass(PassRegistry&);
void initializeGVNLegacyPassPass(PassRegistry&);
void initializeGlobalDCELegacyPassPass(PassRegistry&);
void initializeGlobalMergePass(PassRegistry&);
@@ -193,6 +194,7 @@ void initializeLiveVariablesPass(PassRegistry&);
void initializeLoadCombinePass(PassRegistry&);
void initializeLoadStoreVectorizerPass(PassRegistry&);
void initializeLoaderPassPass(PassRegistry&);
+void initializeLocalizerPass(PassRegistry&);
void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
diff --git a/include/llvm/LTO/Config.h b/include/llvm/LTO/Config.h
index ede6637dfa4d..5ba8492db8f5 100644
--- a/include/llvm/LTO/Config.h
+++ b/include/llvm/LTO/Config.h
@@ -39,7 +39,7 @@ struct Config {
std::string CPU;
TargetOptions Options;
std::vector<std::string> MAttrs;
- Reloc::Model RelocModel = Reloc::PIC_;
+ Optional<Reloc::Model> RelocModel = Reloc::PIC_;
CodeModel::Model CodeModel = CodeModel::Default;
CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
TargetMachine::CodeGenFileType CGFileType = TargetMachine::CGFT_ObjectFile;
diff --git a/include/llvm/Object/Binary.h b/include/llvm/Object/Binary.h
index cf5d93ee9ed7..3f5a233c1ee1 100644
--- a/include/llvm/Object/Binary.h
+++ b/include/llvm/Object/Binary.h
@@ -95,9 +95,7 @@ public:
return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
}
- bool isSymbolic() const {
- return isIR() || isObject();
- }
+ bool isSymbolic() const { return isIR() || isObject() || isCOFFImportFile(); }
bool isArchive() const {
return TypeID == ID_Archive;
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index 8b9b49737170..dafd1a43cb59 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -782,6 +782,7 @@ protected:
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionIndex(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
diff --git a/include/llvm/Object/ELFObjectFile.h b/include/llvm/Object/ELFObjectFile.h
index d8b58b8079fa..ef2abd8c52ce 100644
--- a/include/llvm/Object/ELFObjectFile.h
+++ b/include/llvm/Object/ELFObjectFile.h
@@ -235,6 +235,7 @@ protected:
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionIndex(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
@@ -646,6 +647,17 @@ uint64_t ELFObjectFile<ELFT>::getSectionAddress(DataRefImpl Sec) const {
}
template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionIndex(DataRefImpl Sec) const {
+ auto SectionsOrErr = EF.sections();
+ handleAllErrors(std::move(SectionsOrErr.takeError()),
+ [](const ErrorInfoBase &) {
+ llvm_unreachable("unable to get section index");
+ });
+ const Elf_Shdr *First = SectionsOrErr->begin();
+ return getSection(Sec) - First;
+}
+
+template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionSize(DataRefImpl Sec) const {
return getSection(Sec)->sh_size;
}
diff --git a/include/llvm/Object/MachO.h b/include/llvm/Object/MachO.h
index 29553558f72f..a4356d5977b2 100644
--- a/include/llvm/Object/MachO.h
+++ b/include/llvm/Object/MachO.h
@@ -290,6 +290,7 @@ public:
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionIndex(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
diff --git a/include/llvm/Object/ObjectFile.h b/include/llvm/Object/ObjectFile.h
index 9a7bc618ffd0..ea6a9049bc1b 100644
--- a/include/llvm/Object/ObjectFile.h
+++ b/include/llvm/Object/ObjectFile.h
@@ -95,6 +95,7 @@ public:
std::error_code getName(StringRef &Result) const;
uint64_t getAddress() const;
+ uint64_t getIndex() const;
uint64_t getSize() const;
std::error_code getContents(StringRef &Result) const;
@@ -222,6 +223,7 @@ protected:
virtual std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const = 0;
virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
+ virtual uint64_t getSectionIndex(DataRefImpl Sec) const = 0;
virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
virtual std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const = 0;
@@ -393,6 +395,10 @@ inline uint64_t SectionRef::getAddress() const {
return OwningObject->getSectionAddress(SectionPimpl);
}
+inline uint64_t SectionRef::getIndex() const {
+ return OwningObject->getSectionIndex(SectionPimpl);
+}
+
inline uint64_t SectionRef::getSize() const {
return OwningObject->getSectionSize(SectionPimpl);
}
diff --git a/include/llvm/Object/RelocVisitor.h b/include/llvm/Object/RelocVisitor.h
index 86579b7c3e3a..348179860f3e 100644
--- a/include/llvm/Object/RelocVisitor.h
+++ b/include/llvm/Object/RelocVisitor.h
@@ -40,13 +40,13 @@ public:
// TODO: Should handle multiple applied relocations via either passing in the
// previously computed value or just count paired relocations as a single
// visit.
- uint64_t visit(uint32_t RelocType, RelocationRef R, uint64_t Value = 0) {
+ uint64_t visit(uint32_t Rel, RelocationRef R, uint64_t Value = 0) {
if (isa<ELFObjectFileBase>(ObjToVisit))
- return visitELF(RelocType, R, Value);
+ return visitELF(Rel, R, Value);
if (isa<COFFObjectFile>(ObjToVisit))
- return visitCOFF(RelocType, R, Value);
+ return visitCOFF(Rel, R, Value);
if (isa<MachOObjectFile>(ObjToVisit))
- return visitMachO(RelocType, R, Value);
+ return visitMachO(Rel, R, Value);
HasError = true;
return 0;
@@ -58,214 +58,60 @@ private:
const ObjectFile &ObjToVisit;
bool HasError = false;
- uint64_t visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
+ uint64_t visitELF(uint32_t Rel, RelocationRef R, uint64_t Value) {
if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
switch (ObjToVisit.getArch()) {
case Triple::x86_64:
- switch (RelocType) {
- case ELF::R_X86_64_NONE:
- return visitELF_X86_64_NONE(R);
- case ELF::R_X86_64_64:
- return visitELF_X86_64_64(R, Value);
- case ELF::R_X86_64_PC32:
- return visitELF_X86_64_PC32(R, Value);
- case ELF::R_X86_64_32:
- return visitELF_X86_64_32(R, Value);
- case ELF::R_X86_64_32S:
- return visitELF_X86_64_32S(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitX86_64(Rel, R, Value);
case Triple::aarch64:
case Triple::aarch64_be:
- switch (RelocType) {
- case ELF::R_AARCH64_ABS32:
- return visitELF_AARCH64_ABS32(R, Value);
- case ELF::R_AARCH64_ABS64:
- return visitELF_AARCH64_ABS64(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitAarch64(Rel, R, Value);
case Triple::bpfel:
case Triple::bpfeb:
- switch (RelocType) {
- case ELF::R_BPF_64_64:
- return visitELF_BPF_64_64(R, Value);
- case ELF::R_BPF_64_32:
- return visitELF_BPF_64_32(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitBpf(Rel, R, Value);
case Triple::mips64el:
case Triple::mips64:
- switch (RelocType) {
- case ELF::R_MIPS_32:
- return visitELF_MIPS64_32(R, Value);
- case ELF::R_MIPS_64:
- return visitELF_MIPS64_64(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitMips64(Rel, R, Value);
case Triple::ppc64le:
case Triple::ppc64:
- switch (RelocType) {
- case ELF::R_PPC64_ADDR32:
- return visitELF_PPC64_ADDR32(R, Value);
- case ELF::R_PPC64_ADDR64:
- return visitELF_PPC64_ADDR64(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitPPC64(Rel, R, Value);
case Triple::systemz:
- switch (RelocType) {
- case ELF::R_390_32:
- return visitELF_390_32(R, Value);
- case ELF::R_390_64:
- return visitELF_390_64(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitSystemz(Rel, R, Value);
case Triple::sparcv9:
- switch (RelocType) {
- case ELF::R_SPARC_32:
- case ELF::R_SPARC_UA32:
- return visitELF_SPARCV9_32(R, Value);
- case ELF::R_SPARC_64:
- case ELF::R_SPARC_UA64:
- return visitELF_SPARCV9_64(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitSparc64(Rel, R, Value);
case Triple::amdgcn:
- switch (RelocType) {
- case ELF::R_AMDGPU_ABS32:
- return visitELF_AMDGPU_ABS32(R, Value);
- case ELF::R_AMDGPU_ABS64:
- return visitELF_AMDGPU_ABS64(R, Value);
- default:
- HasError = true;
- return 0;
- }
+ return visitAmdgpu(Rel, R, Value);
default:
HasError = true;
return 0;
}
- } else if (ObjToVisit.getBytesInAddress() == 4) { // 32-bit object file
- switch (ObjToVisit.getArch()) {
- case Triple::x86:
- switch (RelocType) {
- case ELF::R_386_NONE:
- return visitELF_386_NONE(R);
- case ELF::R_386_32:
- return visitELF_386_32(R, Value);
- case ELF::R_386_PC32:
- return visitELF_386_PC32(R, Value);
- default:
- HasError = true;
- return 0;
- }
- case Triple::ppc:
- switch (RelocType) {
- case ELF::R_PPC_ADDR32:
- return visitELF_PPC_ADDR32(R, Value);
- default:
- HasError = true;
- return 0;
- }
- case Triple::arm:
- case Triple::armeb:
- switch (RelocType) {
- default:
- HasError = true;
- return 0;
- case ELF::R_ARM_ABS32:
- return visitELF_ARM_ABS32(R, Value);
- }
- case Triple::lanai:
- switch (RelocType) {
- case ELF::R_LANAI_32:
- return visitELF_Lanai_32(R, Value);
- default:
- HasError = true;
- return 0;
- }
- case Triple::mipsel:
- case Triple::mips:
- switch (RelocType) {
- case ELF::R_MIPS_32:
- return visitELF_MIPS_32(R, Value);
- default:
- HasError = true;
- return 0;
- }
- case Triple::sparc:
- switch (RelocType) {
- case ELF::R_SPARC_32:
- case ELF::R_SPARC_UA32:
- return visitELF_SPARC_32(R, Value);
- default:
- HasError = true;
- return 0;
- }
- case Triple::hexagon:
- switch (RelocType) {
- case ELF::R_HEX_32:
- return visitELF_HEX_32(R, Value);
- default:
- HasError = true;
- return 0;
- }
- default:
- HasError = true;
- return 0;
- }
- } else {
- report_fatal_error("Invalid word size in object file");
}
- }
- uint64_t visitCOFF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
- switch (ObjToVisit.getArch()) {
- case Triple::x86:
- switch (RelocType) {
- case COFF::IMAGE_REL_I386_SECREL:
- return visitCOFF_I386_SECREL(R, Value);
- case COFF::IMAGE_REL_I386_DIR32:
- return visitCOFF_I386_DIR32(R, Value);
- }
- break;
- case Triple::x86_64:
- switch (RelocType) {
- case COFF::IMAGE_REL_AMD64_SECREL:
- return visitCOFF_AMD64_SECREL(R, Value);
- case COFF::IMAGE_REL_AMD64_ADDR64:
- return visitCOFF_AMD64_ADDR64(R, Value);
- }
- break;
- }
- HasError = true;
- return 0;
- }
+ // 32-bit object file
+ assert(ObjToVisit.getBytesInAddress() == 4 &&
+ "Invalid word size in object file");
- uint64_t visitMachO(uint32_t RelocType, RelocationRef R, uint64_t Value) {
switch (ObjToVisit.getArch()) {
- default: break;
- case Triple::x86_64:
- switch (RelocType) {
- default: break;
- case MachO::X86_64_RELOC_UNSIGNED:
- return visitMACHO_X86_64_UNSIGNED(R, Value);
- }
+ case Triple::x86:
+ return visitX86(Rel, R, Value);
+ case Triple::ppc:
+ return visitPPC32(Rel, R, Value);
+ case Triple::arm:
+ case Triple::armeb:
+ return visitARM(Rel, R, Value);
+ case Triple::lanai:
+ return visitLanai(Rel, R, Value);
+ case Triple::mipsel:
+ case Triple::mips:
+ return visitMips32(Rel, R, Value);
+ case Triple::sparc:
+ return visitSparc32(Rel, R, Value);
+ case Triple::hexagon:
+ return visitHexagon(Rel, R, Value);
+ default:
+ HasError = true;
+ return 0;
}
- HasError = true;
- return 0;
}
int64_t getELFAddend(RelocationRef R) {
@@ -275,176 +121,193 @@ private:
return *AddendOrErr;
}
- /// Operations
-
- /// 386-ELF
- uint64_t visitELF_386_NONE(RelocationRef R) {
+ uint64_t visitX86_64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_X86_64_NONE:
+ return 0;
+ case ELF::R_X86_64_64:
+ return Value + getELFAddend(R);
+ case ELF::R_X86_64_PC32:
+ return Value + getELFAddend(R) - R.getOffset();
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S:
+ return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+ }
+ HasError = true;
return 0;
}
- // Ideally the Addend here will be the addend in the data for
- // the relocation. It's not actually the case for Rel relocations.
- uint64_t visitELF_386_32(RelocationRef R, uint64_t Value) {
- return Value;
- }
-
- uint64_t visitELF_386_PC32(RelocationRef R, uint64_t Value) {
- return Value - R.getOffset();
- }
-
- /// X86-64 ELF
- uint64_t visitELF_X86_64_NONE(RelocationRef R) {
+ uint64_t visitAarch64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_AARCH64_ABS32: {
+ int64_t Res = Value + getELFAddend(R);
+ if (Res < INT32_MIN || Res > UINT32_MAX)
+ HasError = true;
+ return static_cast<uint32_t>(Res);
+ }
+ case ELF::R_AARCH64_ABS64:
+ return Value + getELFAddend(R);
+ }
+ HasError = true;
return 0;
}
- uint64_t visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
- }
-
- uint64_t visitELF_X86_64_PC32(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R) - R.getOffset();
- }
-
- uint64_t visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
- return (Value + getELFAddend(R)) & 0xFFFFFFFF;
- }
-
- uint64_t visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
- return (Value + getELFAddend(R)) & 0xFFFFFFFF;
- }
-
- /// BPF ELF
- uint64_t visitELF_BPF_64_32(RelocationRef R, uint64_t Value) {
- return Value & 0xFFFFFFFF;
- }
-
- uint64_t visitELF_BPF_64_64(RelocationRef R, uint64_t Value) {
- return Value;
- }
-
- /// PPC64 ELF
- uint64_t visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
- return (Value + getELFAddend(R)) & 0xFFFFFFFF;
- }
-
- uint64_t visitELF_PPC64_ADDR64(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
- }
-
- /// PPC32 ELF
- uint64_t visitELF_PPC_ADDR32(RelocationRef R, uint64_t Value) {
- return (Value + getELFAddend(R)) & 0xFFFFFFFF;
- }
-
- /// Lanai ELF
- uint64_t visitELF_Lanai_32(RelocationRef R, uint64_t Value) {
- return (Value + getELFAddend(R)) & 0xFFFFFFFF;
- }
-
- /// MIPS ELF
- uint64_t visitELF_MIPS_32(RelocationRef R, uint64_t Value) {
- return Value & 0xFFFFFFFF;
- }
-
- /// MIPS64 ELF
- uint64_t visitELF_MIPS64_32(RelocationRef R, uint64_t Value) {
- return (Value + getELFAddend(R)) & 0xFFFFFFFF;
- }
-
- uint64_t visitELF_MIPS64_64(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
- }
-
- // AArch64 ELF
- uint64_t visitELF_AARCH64_ABS32(RelocationRef R, uint64_t Value) {
- int64_t Addend = getELFAddend(R);
- int64_t Res = Value + Addend;
-
- // Overflow check allows for both signed and unsigned interpretation.
- if (Res < INT32_MIN || Res > UINT32_MAX)
- HasError = true;
-
- return static_cast<uint32_t>(Res);
- }
-
- uint64_t visitELF_AARCH64_ABS64(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
+ uint64_t visitBpf(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_BPF_64_32:
+ return Value & 0xFFFFFFFF;
+ case ELF::R_BPF_64_64:
+ return Value;
+ }
+ HasError = true;
+ return 0;
}
- // SystemZ ELF
- uint64_t visitELF_390_32(RelocationRef R, uint64_t Value) {
- int64_t Addend = getELFAddend(R);
- int64_t Res = Value + Addend;
-
- // Overflow check allows for both signed and unsigned interpretation.
- if (Res < INT32_MIN || Res > UINT32_MAX)
- HasError = true;
-
- return static_cast<uint32_t>(Res);
+ uint64_t visitMips64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_MIPS_32:
+ return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+ case ELF::R_MIPS_64:
+ return Value + getELFAddend(R);
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_390_64(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
+ uint64_t visitPPC64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_PPC64_ADDR32:
+ return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+ case ELF::R_PPC64_ADDR64:
+ return Value + getELFAddend(R);
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_SPARC_32(RelocationRef R, uint32_t Value) {
- return Value + getELFAddend(R);
+ uint64_t visitSystemz(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_390_32: {
+ int64_t Res = Value + getELFAddend(R);
+ if (Res < INT32_MIN || Res > UINT32_MAX)
+ HasError = true;
+ return static_cast<uint32_t>(Res);
+ }
+ case ELF::R_390_64:
+ return Value + getELFAddend(R);
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_SPARCV9_32(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
+ uint64_t visitSparc64(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_SPARC_32:
+ case ELF::R_SPARC_64:
+ case ELF::R_SPARC_UA32:
+ case ELF::R_SPARC_UA64:
+ return Value + getELFAddend(R);
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_SPARCV9_64(RelocationRef R, uint64_t Value) {
- return Value + getELFAddend(R);
+ uint64_t visitAmdgpu(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_AMDGPU_ABS32:
+ case ELF::R_AMDGPU_ABS64:
+ return Value + getELFAddend(R);
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_ARM_ABS32(RelocationRef R, uint64_t Value) {
- int64_t Res = Value;
-
- // Overflow check allows for both signed and unsigned interpretation.
- if (Res < INT32_MIN || Res > UINT32_MAX)
- HasError = true;
-
- return static_cast<uint32_t>(Res);
+ uint64_t visitX86(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (Rel) {
+ case ELF::R_386_NONE:
+ return 0;
+ case ELF::R_386_32:
+ return Value;
+ case ELF::R_386_PC32:
+ return Value - R.getOffset();
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_HEX_32(RelocationRef R, uint64_t Value) {
- int64_t Addend = getELFAddend(R);
- return Value + Addend;
+ uint64_t visitPPC32(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (Rel == ELF::R_PPC_ADDR32)
+ return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_AMDGPU_ABS32(RelocationRef R, uint64_t Value) {
- int64_t Addend = getELFAddend(R);
- return Value + Addend;
+ uint64_t visitARM(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (Rel == ELF::R_ARM_ABS32) {
+ if ((int64_t)Value < INT32_MIN || (int64_t)Value > UINT32_MAX)
+ HasError = true;
+ return static_cast<uint32_t>(Value);
+ }
+ HasError = true;
+ return 0;
}
- uint64_t visitELF_AMDGPU_ABS64(RelocationRef R, uint64_t Value) {
- int64_t Addend = getELFAddend(R);
- return Value + Addend;
+ uint64_t visitLanai(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (Rel == ELF::R_LANAI_32)
+ return (Value + getELFAddend(R)) & 0xFFFFFFFF;
+ HasError = true;
+ return 0;
}
- /// I386 COFF
- uint64_t visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
- return static_cast<uint32_t>(Value);
+ uint64_t visitMips32(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (Rel == ELF::R_MIPS_32)
+ return Value & 0xFFFFFFFF;
+ HasError = true;
+ return 0;
}
- uint64_t visitCOFF_I386_DIR32(RelocationRef R, uint64_t Value) {
- return static_cast<uint32_t>(Value);
+ uint64_t visitSparc32(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (Rel == ELF::R_SPARC_32 || Rel == ELF::R_SPARC_UA32)
+ return Value + getELFAddend(R);
+ HasError = true;
+ return 0;
}
- /// AMD64 COFF
- uint64_t visitCOFF_AMD64_SECREL(RelocationRef R, uint64_t Value) {
- return static_cast<uint32_t>(Value);
+ uint64_t visitHexagon(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (Rel == ELF::R_HEX_32)
+ return Value + getELFAddend(R);
+ HasError = true;
+ return 0;
}
- uint64_t visitCOFF_AMD64_ADDR64(RelocationRef R, uint64_t Value) {
- return Value;
+ uint64_t visitCOFF(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ switch (ObjToVisit.getArch()) {
+ case Triple::x86:
+ switch (Rel) {
+ case COFF::IMAGE_REL_I386_SECREL:
+ case COFF::IMAGE_REL_I386_DIR32:
+ return static_cast<uint32_t>(Value);
+ }
+ break;
+ case Triple::x86_64:
+ switch (Rel) {
+ case COFF::IMAGE_REL_AMD64_SECREL:
+ return static_cast<uint32_t>(Value);
+ case COFF::IMAGE_REL_AMD64_ADDR64:
+ return Value;
+ }
+ break;
+ }
+ HasError = true;
+ return 0;
}
- // X86_64 MachO
- uint64_t visitMACHO_X86_64_UNSIGNED(RelocationRef R, uint64_t Value) {
- return Value;
+ uint64_t visitMachO(uint32_t Rel, RelocationRef R, uint64_t Value) {
+ if (ObjToVisit.getArch() == Triple::x86_64 &&
+ Rel == MachO::X86_64_RELOC_UNSIGNED)
+ return Value;
+ HasError = true;
+ return 0;
}
};
diff --git a/include/llvm/Object/Wasm.h b/include/llvm/Object/Wasm.h
index d200d4a148e3..de54a4928cce 100644
--- a/include/llvm/Object/Wasm.h
+++ b/include/llvm/Object/Wasm.h
@@ -119,6 +119,7 @@ public:
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionIndex(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
diff --git a/include/llvm/Option/OptTable.h b/include/llvm/Option/OptTable.h
index 390e52774fea..8a323a255ca1 100644
--- a/include/llvm/Option/OptTable.h
+++ b/include/llvm/Option/OptTable.h
@@ -113,6 +113,14 @@ public:
return getInfo(id).MetaVar;
}
+ /// Find flags from OptTable which starts with Cur.
+ ///
+ /// \param [in] Cur - String prefix that all returned flags need
+ // to start with.
+ ///
+ /// \return The vector of flags which start with Cur.
+ std::vector<std::string> findByPrefix(StringRef Cur) const;
+
/// \brief Parse a single argument; returning the new argument and
/// updating Index.
///
diff --git a/include/llvm/ProfileData/InstrProf.h b/include/llvm/ProfileData/InstrProf.h
index 1b07c33746e7..0dbb2cf9f269 100644
--- a/include/llvm/ProfileData/InstrProf.h
+++ b/include/llvm/ProfileData/InstrProf.h
@@ -212,12 +212,12 @@ StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName,
/// third field is the uncompressed strings; otherwise it is the
/// compressed string. When the string compression is off, the
/// second field will have value zero.
-Error collectPGOFuncNameStrings(const std::vector<std::string> &NameStrs,
+Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs,
bool doCompression, std::string &Result);
/// Produce \c Result string with the same format described above. The input
/// is vector of PGO function name variables that are referenced.
-Error collectPGOFuncNameStrings(const std::vector<GlobalVariable *> &NameVars,
+Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
std::string &Result, bool doCompression = true);
/// \c NameStrings is a string composed of one of more sub-strings encoded in
@@ -967,7 +967,7 @@ struct Header {
} // end namespace RawInstrProf
// Parse MemOP Size range option.
-void getMemOPSizeRangeFromOption(std::string Str, int64_t &RangeStart,
+void getMemOPSizeRangeFromOption(StringRef Str, int64_t &RangeStart,
int64_t &RangeLast);
} // end namespace llvm
diff --git a/include/llvm/TableGen/Record.h b/include/llvm/TableGen/Record.h
index fef5bf304566..d14a56cb87e0 100644
--- a/include/llvm/TableGen/Record.h
+++ b/include/llvm/TableGen/Record.h
@@ -671,7 +671,7 @@ public:
/// [AL, AH, CL] - Represent a list of defs
///
class ListInit final : public TypedInit, public FoldingSetNode,
- public TrailingObjects<BitsInit, Init *> {
+ public TrailingObjects<ListInit, Init *> {
unsigned NumValues;
public:
@@ -1137,17 +1137,19 @@ public:
/// to have at least one value then a (possibly empty) list of arguments. Each
/// argument can have a name associated with it.
///
-class DagInit : public TypedInit, public FoldingSetNode {
+class DagInit final : public TypedInit, public FoldingSetNode,
+ public TrailingObjects<DagInit, Init *, StringInit *> {
Init *Val;
StringInit *ValName;
- SmallVector<Init*, 4> Args;
- SmallVector<StringInit*, 4> ArgNames;
+ unsigned NumArgs;
+ unsigned NumArgNames;
- DagInit(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
- ArrayRef<StringInit *> NameRange)
+ DagInit(Init *V, StringInit *VN, unsigned NumArgs, unsigned NumArgNames)
: TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
- Args(ArgRange.begin(), ArgRange.end()),
- ArgNames(NameRange.begin(), NameRange.end()) {}
+ NumArgs(NumArgs), NumArgNames(NumArgNames) {}
+
+ friend TrailingObjects;
+ size_t numTrailingObjects(OverloadToken<Init *>) const { return NumArgs; }
public:
DagInit(const DagInit &Other) = delete;
@@ -1173,20 +1175,24 @@ public:
return ValName ? ValName->getValue() : StringRef();
}
- unsigned getNumArgs() const { return Args.size(); }
+ unsigned getNumArgs() const { return NumArgs; }
Init *getArg(unsigned Num) const {
- assert(Num < Args.size() && "Arg number out of range!");
- return Args[Num];
+ assert(Num < NumArgs && "Arg number out of range!");
+ return getTrailingObjects<Init *>()[Num];
}
StringInit *getArgName(unsigned Num) const {
- assert(Num < ArgNames.size() && "Arg number out of range!");
- return ArgNames[Num];
+ assert(Num < NumArgNames && "Arg number out of range!");
+ return getTrailingObjects<StringInit *>()[Num];
}
StringRef getArgNameStr(unsigned Num) const {
StringInit *Init = getArgName(Num);
return Init ? Init->getValue() : StringRef();
}
+ ArrayRef<StringInit *> getArgNames() const {
+ return makeArrayRef(getTrailingObjects<StringInit *>(), NumArgNames);
+ }
+
Init *resolveReferences(Record &R, const RecordVal *RV) const override;
std::string getAsString() const override;
@@ -1194,20 +1200,20 @@ public:
typedef SmallVectorImpl<Init*>::const_iterator const_arg_iterator;
typedef SmallVectorImpl<StringInit*>::const_iterator const_name_iterator;
- inline const_arg_iterator arg_begin() const { return Args.begin(); }
- inline const_arg_iterator arg_end () const { return Args.end(); }
+ inline const_arg_iterator arg_begin() const { return getTrailingObjects<Init *>(); }
+ inline const_arg_iterator arg_end () const { return arg_begin() + NumArgs; }
inline iterator_range<const_arg_iterator> args() const {
return llvm::make_range(arg_begin(), arg_end());
}
- inline size_t arg_size () const { return Args.size(); }
- inline bool arg_empty() const { return Args.empty(); }
+ inline size_t arg_size () const { return NumArgs; }
+ inline bool arg_empty() const { return NumArgs == 0; }
- inline const_name_iterator name_begin() const { return ArgNames.begin(); }
- inline const_name_iterator name_end () const { return ArgNames.end(); }
+ inline const_name_iterator name_begin() const { return getTrailingObjects<StringInit *>(); }
+ inline const_name_iterator name_end () const { return name_begin() + NumArgNames; }
- inline size_t name_size () const { return ArgNames.size(); }
- inline bool name_empty() const { return ArgNames.empty(); }
+ inline size_t name_size () const { return NumArgNames; }
+ inline bool name_empty() const { return NumArgNames == 0; }
Init *getBit(unsigned Bit) const override {
llvm_unreachable("Illegal bit reference off dag");
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 1ca32d4c3589..17182b958ecb 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -405,7 +405,9 @@ public:
}
/// Returns if it's reasonable to merge stores to MemVT size.
- virtual bool canMergeStoresTo(EVT MemVT) const { return true; }
+ virtual bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT) const {
+ return true;
+ }
/// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
virtual bool isCheapToSpeculateCttz() const {
@@ -736,7 +738,7 @@ public:
if (VT.isExtended()) return Expand;
// If a target-specific SDNode requires legalization, require the target
// to provide custom legalization for it.
- if (Op > array_lengthof(OpActions[0])) return Custom;
+ if (Op >= array_lengthof(OpActions[0])) return Custom;
return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
}
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index ba0a3ee1287a..856c288a071f 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -356,6 +356,13 @@ FunctionPass *createGVNHoistPass();
//===----------------------------------------------------------------------===//
//
+// GVNSink - This pass uses an "inverted" value numbering to decide the
+// similarity of expressions and sinks similar expressions into successors.
+//
+FunctionPass *createGVNSinkPass();
+
+//===----------------------------------------------------------------------===//
+//
// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
// are hoisted into the header, while stores sink into the footer.
//
diff --git a/include/llvm/Transforms/Scalar/GVN.h b/include/llvm/Transforms/Scalar/GVN.h
index 8f05e8cdb233..3f97789cabbc 100644
--- a/include/llvm/Transforms/Scalar/GVN.h
+++ b/include/llvm/Transforms/Scalar/GVN.h
@@ -68,6 +68,24 @@ public:
class ValueTable {
DenseMap<Value *, uint32_t> valueNumbering;
DenseMap<Expression, uint32_t> expressionNumbering;
+
+ // Expressions is the vector of Expression. ExprIdx is the mapping from
+ // value number to the index of Expression in Expressions. We use it
+ // instead of a DenseMap because filling such mapping is faster than
+ // filling a DenseMap and the compile time is a little better.
+ uint32_t nextExprNumber;
+ std::vector<Expression> Expressions;
+ std::vector<uint32_t> ExprIdx;
+ // Value number to PHINode mapping. Used for phi-translate in scalarpre.
+ DenseMap<uint32_t, PHINode *> NumberingPhi;
+ // Cache for phi-translate in scalarpre.
+ typedef DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>
+ PhiTranslateMap;
+ PhiTranslateMap PhiTranslateTable;
+ // Map the block to reversed postorder traversal number. It is used to
+ // find back edge easily.
+ DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
+
AliasAnalysis *AA;
MemoryDependenceResults *MD;
DominatorTree *DT;
@@ -79,6 +97,10 @@ public:
Value *LHS, Value *RHS);
Expression createExtractvalueExpr(ExtractValueInst *EI);
uint32_t lookupOrAddCall(CallInst *C);
+ uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
+ uint32_t Num, GVN &Gvn);
+ std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
+ bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
public:
ValueTable();
@@ -87,9 +109,12 @@ public:
~ValueTable();
uint32_t lookupOrAdd(Value *V);
- uint32_t lookup(Value *V) const;
+ uint32_t lookup(Value *V, bool Verify = true) const;
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
Value *LHS, Value *RHS);
+ uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
+ uint32_t Num, GVN &Gvn);
+ void assignBlockRPONumber(Function &F);
bool exists(Value *V) const;
void add(Value *V, uint32_t num);
void clear();
@@ -238,7 +263,12 @@ struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
/// \brief Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-
+/// \brief Uses an "inverted" value numbering to decide the similarity of
+/// expressions and sinks similar expressions into successors.
+struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
}
#endif
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index b5a5f4c2704c..8942111307ff 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -356,6 +356,10 @@ void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> Kn
/// Unknown metadata is removed.
void combineMetadataForCSE(Instruction *K, const Instruction *J);
+// Replace each use of 'From' with 'To', if that use does not belong to basic
+// block where 'From' is defined. Returns the number of replacements made.
+unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);
+
/// Replace each use of 'From' with 'To' if that use is dominated by
/// the given edge. Returns the number of replacements made.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
@@ -406,6 +410,14 @@ bool recognizeBSwapOrBitReverseIdiom(
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
const TargetLibraryInfo *TLI);
+//===----------------------------------------------------------------------===//
+// Transform predicates
+//
+
+/// Given an instruction, is it legal to set operand OpIdx to a non-constant
+/// value?
+bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
+
} // End llvm namespace
#endif