aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-06-13 19:31:46 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-06-13 19:37:19 +0000
commite8d8bef961a50d4dc22501cde4fb9fb0be1b2532 (patch)
tree94f04805f47bb7c59ae29690d8952b6074fff602 /contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp
parentbb130ff39747b94592cb26d71b7cb097b9a4ea6b (diff)
parentb60736ec1405bb0a8dd40989f67ef4c93da068ab (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp176
1 files changed, 117 insertions, 59 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp
index d4181591deab..59d98054e3a2 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp
@@ -34,6 +34,7 @@
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
@@ -116,7 +117,7 @@ void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
/// the MCInstrDesc.
MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
DebugLoc dl, bool NoImp)
- : MCID(&tid), debugLoc(std::move(dl)) {
+ : MCID(&tid), debugLoc(std::move(dl)), DebugInstrNum(0) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
// Reserve space for the expected number of operands.
@@ -130,10 +131,12 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
addImplicitDefUseOperands(MF);
}
-/// MachineInstr ctor - Copies MachineInstr arg exactly
-///
+/// MachineInstr ctor - Copies MachineInstr arg exactly.
+/// Does not copy the number from debug instruction numbering, to preserve
+/// uniqueness.
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
- : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
+ : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()),
+ DebugInstrNum(0) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
CapOperands = OperandCapacity::get(MI.getNumOperands());
@@ -147,6 +150,10 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
setFlags(MI.Flags);
}
+void MachineInstr::moveBefore(MachineInstr *MovePos) {
+ MovePos->getParent()->splice(MovePos, getParent(), getIterator());
+}
+
/// getRegInfo - If this instruction is embedded into a MachineFunction,
/// return the MachineRegisterInfo object for the current function, otherwise
/// return null.
@@ -701,11 +708,10 @@ bool MachineInstr::isCandidateForCallSiteEntry(QueryType Type) const {
if (!isCall(Type))
return false;
switch (getOpcode()) {
- case TargetOpcode::PATCHABLE_EVENT_CALL:
- case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
case TargetOpcode::PATCHPOINT:
case TargetOpcode::STACKMAP:
case TargetOpcode::STATEPOINT:
+ case TargetOpcode::FENTRY_CALL:
return false;
}
return true;
@@ -835,27 +841,27 @@ const DILabel *MachineInstr::getDebugLabel() const {
}
const MachineOperand &MachineInstr::getDebugVariableOp() const {
- assert(isDebugValue() && "not a DBG_VALUE");
+ assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE");
return getOperand(2);
}
MachineOperand &MachineInstr::getDebugVariableOp() {
- assert(isDebugValue() && "not a DBG_VALUE");
+ assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE");
return getOperand(2);
}
const DILocalVariable *MachineInstr::getDebugVariable() const {
- assert(isDebugValue() && "not a DBG_VALUE");
+ assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE");
return cast<DILocalVariable>(getOperand(2).getMetadata());
}
MachineOperand &MachineInstr::getDebugExpressionOp() {
- assert(isDebugValue() && "not a DBG_VALUE");
+ assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE");
return getOperand(3);
}
const DIExpression *MachineInstr::getDebugExpression() const {
- assert(isDebugValue() && "not a DBG_VALUE");
+ assert((isDebugValue() || isDebugRef()) && "not a DBG_VALUE");
return cast<DIExpression>(getOperand(3).getMetadata());
}
@@ -1094,10 +1100,12 @@ void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
if (DefIdx < TiedMax)
UseMO.TiedTo = DefIdx + 1;
else {
- // Inline asm can use the group descriptors to find tied operands, but on
- // normal instruction, the tied def must be within the first TiedMax
+ // Inline asm can use the group descriptors to find tied operands,
+ // statepoint tied operands are trivial to match (1-1 reg def with reg use),
+ // but on normal instruction, the tied def must be within the first TiedMax
// operands.
- assert(isInlineAsm() && "DefIdx out of range");
+ assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
+ "DefIdx out of range");
UseMO.TiedTo = TiedMax;
}
@@ -1117,7 +1125,7 @@ unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
return MO.TiedTo - 1;
// Uses on normal instructions can be out of range.
- if (!isInlineAsm()) {
+ if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
// Normal tied defs must be in the 0..TiedMax-1 range.
if (MO.isUse())
return TiedMax - 1;
@@ -1130,6 +1138,25 @@ unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
llvm_unreachable("Can't find tied use");
}
+ if (getOpcode() == TargetOpcode::STATEPOINT) {
+ // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
+ // on registers.
+ StatepointOpers SO(this);
+ unsigned CurUseIdx = SO.getFirstGCPtrIdx();
+ assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
+ unsigned NumDefs = getNumDefs();
+ for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
+ while (!getOperand(CurUseIdx).isReg())
+ CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
+ if (OpIdx == CurDefIdx)
+ return CurUseIdx;
+ if (OpIdx == CurUseIdx)
+ return CurDefIdx;
+ CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
+ }
+ llvm_unreachable("Can't find tied use");
+ }
+
// Now deal with inline asm by parsing the operand group descriptor flags.
// Find the beginning of each operand group.
SmallVector<unsigned, 8> GroupIdx;
@@ -1213,7 +1240,7 @@ bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const {
// See if this instruction does a load. If so, we have to guarantee that the
// loaded value doesn't change between the load and the its intended
- // destination. The check for isInvariantLoad gives the targe the chance to
+ // destination. The check for isInvariantLoad gives the target the chance to
// classify the load as always returning a constant, e.g. a constant pool
// load.
if (mayLoad() && !isDereferenceableInvariantLoad(AA))
@@ -1224,47 +1251,21 @@ bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const {
return true;
}
-bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
- bool UseTBAA) const {
- const MachineFunction *MF = getMF();
- const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
- const MachineFrameInfo &MFI = MF->getFrameInfo();
-
- // If neither instruction stores to memory, they can't alias in any
- // meaningful way, even if they read from the same address.
- if (!mayStore() && !Other.mayStore())
- return false;
-
- // Both instructions must be memory operations to be able to alias.
- if (!mayLoadOrStore() || !Other.mayLoadOrStore())
- return false;
-
- // Let the target decide if memory accesses cannot possibly overlap.
- if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
- return false;
-
- // FIXME: Need to handle multiple memory operands to support all targets.
- if (!hasOneMemOperand() || !Other.hasOneMemOperand())
- return true;
-
- MachineMemOperand *MMOa = *memoperands_begin();
- MachineMemOperand *MMOb = *Other.memoperands_begin();
-
- // The following interface to AA is fashioned after DAGCombiner::isAlias
- // and operates with MachineMemOperand offset with some important
- // assumptions:
+static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
+ bool UseTBAA, const MachineMemOperand *MMOa,
+ const MachineMemOperand *MMOb) {
+ // The following interface to AA is fashioned after DAGCombiner::isAlias and
+ // operates with MachineMemOperand offset with some important assumptions:
// - LLVM fundamentally assumes flat address spaces.
- // - MachineOperand offset can *only* result from legalization and
- // cannot affect queries other than the trivial case of overlap
- // checking.
- // - These offsets never wrap and never step outside
- // of allocated objects.
+ // - MachineOperand offset can *only* result from legalization and cannot
+ // affect queries other than the trivial case of overlap checking.
+ // - These offsets never wrap and never step outside of allocated objects.
// - There should never be any negative offsets here.
//
// FIXME: Modify API to hide this math from "user"
- // Even before we go to AA we can reason locally about some
- // memory objects. It can save compile time, and possibly catch some
- // corner cases not currently covered.
+ // Even before we go to AA we can reason locally about some memory objects. It
+ // can save compile time, and possibly catch some corner cases not currently
+ // covered.
int64_t OffsetA = MMOa->getOffset();
int64_t OffsetB = MMOb->getOffset();
@@ -1306,20 +1307,63 @@ bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
- int64_t OverlapA = KnownWidthA ? WidthA + OffsetA - MinOffset
- : MemoryLocation::UnknownSize;
- int64_t OverlapB = KnownWidthB ? WidthB + OffsetB - MinOffset
- : MemoryLocation::UnknownSize;
+ int64_t OverlapA =
+ KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize;
+ int64_t OverlapB =
+ KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize;
AliasResult AAResult = AA->alias(
- MemoryLocation(ValA, OverlapA,
- UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
+ MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
MemoryLocation(ValB, OverlapB,
UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
return (AAResult != NoAlias);
}
+bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
+ bool UseTBAA) const {
+ const MachineFunction *MF = getMF();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ const MachineFrameInfo &MFI = MF->getFrameInfo();
+
+ // Exclude call instruction which may alter the memory but can not be handled
+ // by this function.
+ if (isCall() || Other.isCall())
+ return true;
+
+ // If neither instruction stores to memory, they can't alias in any
+ // meaningful way, even if they read from the same address.
+ if (!mayStore() && !Other.mayStore())
+ return false;
+
+ // Both instructions must be memory operations to be able to alias.
+ if (!mayLoadOrStore() || !Other.mayLoadOrStore())
+ return false;
+
+ // Let the target decide if memory accesses cannot possibly overlap.
+ if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
+ return false;
+
+ // Memory operations without memory operands may access anything. Be
+ // conservative and assume `MayAlias`.
+ if (memoperands_empty() || Other.memoperands_empty())
+ return true;
+
+ // Skip if there are too many memory operands.
+ auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
+ if (NumChecks > TII->getMemOperandAACheckLimit())
+ return true;
+
+ // Check each pair of memory operands from both instructions, which can't
+ // alias only if all pairs won't alias.
+ for (auto *MMOa : memoperands())
+ for (auto *MMOb : Other.memoperands())
+ if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
+ return true;
+
+ return false;
+}
+
/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
/// or volatile memory reference, or if the information describing the memory
/// reference is not available. Return false if it is known to have no ordered
@@ -1447,6 +1491,8 @@ void MachineInstr::copyImplicitOps(MachineFunction &MF,
bool MachineInstr::hasComplexRegisterTies() const {
const MCInstrDesc &MCID = getDesc();
+ if (MCID.Opcode == TargetOpcode::STATEPOINT)
+ return true;
for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
const auto &Operand = getOperand(I);
if (!Operand.isReg() || Operand.isDef())
@@ -1753,6 +1799,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
HeapAllocMarker->printAsOperand(OS, MST);
}
+ if (DebugInstrNum) {
+ if (!FirstOp)
+ OS << ",";
+ OS << " debug-instr-number " << DebugInstrNum;
+ }
+
if (!SkipDebugLoc) {
if (const DebugLoc &DL = getDebugLoc()) {
if (!FirstOp)
@@ -2227,3 +2279,9 @@ MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
return None;
}
+
+unsigned MachineInstr::getDebugInstrNum() {
+ if (DebugInstrNum == 0)
+ DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
+ return DebugInstrNum;
+}