summaryrefslogtreecommitdiff
path: root/lib/CodeGen/TargetInstrInfo.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-12-18 20:10:56 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-12-18 20:10:56 +0000
commit044eb2f6afba375a914ac9d8024f8f5142bb912e (patch)
tree1475247dc9f9fe5be155ebd4c9069c75aadf8c20 /lib/CodeGen/TargetInstrInfo.cpp
parenteb70dddbd77e120e5d490bd8fbe7ff3f8fa81c6b (diff)
Notes
Diffstat (limited to 'lib/CodeGen/TargetInstrInfo.cpp')
-rw-r--r--lib/CodeGen/TargetInstrInfo.cpp187
1 files changed, 101 insertions, 86 deletions
diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp
index 14c5adc0d898..db925f803db6 100644
--- a/lib/CodeGen/TargetInstrInfo.cpp
+++ b/lib/CodeGen/TargetInstrInfo.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
@@ -19,6 +19,9 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/StackMaps.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -26,10 +29,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include <cctype>
using namespace llvm;
@@ -67,6 +67,11 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
llvm_unreachable("Target didn't implement insertNoop!");
}
+static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
+ return strncmp(Str, MAI.getCommentString().data(),
+ MAI.getCommentString().size()) == 0;
+}
+
/// Measure the specified inline asm to determine an approximation of its
/// length.
/// Comments (which run till the next SeparatorString or newline) do not
@@ -75,29 +80,46 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
/// multiple instructions separated by SeparatorString or newlines.
/// Variable-length instructions are not handled here; this function
/// may be overloaded in the target code to do that.
+/// We implement a special case of the .space directive which takes only a
+/// single integer argument in base 10 that is the size in bytes. This is a
+/// restricted form of the GAS directive in that we only interpret
+/// simple--i.e. not a logical or arithmetic expression--size values without
+/// the optional fill value. This is primarily used for creating arbitrary
+/// sized inline asm blocks for testing purposes.
unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const {
// Count the number of instructions in the asm.
- bool atInsnStart = true;
- unsigned InstCount = 0;
+ bool AtInsnStart = true;
+ unsigned Length = 0;
for (; *Str; ++Str) {
if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
strlen(MAI.getSeparatorString())) == 0) {
- atInsnStart = true;
- } else if (strncmp(Str, MAI.getCommentString().data(),
- MAI.getCommentString().size()) == 0) {
+ AtInsnStart = true;
+ } else if (isAsmComment(Str, MAI)) {
// Stop counting as an instruction after a comment until the next
// separator.
- atInsnStart = false;
+ AtInsnStart = false;
}
- if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
- ++InstCount;
- atInsnStart = false;
+ if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
+ unsigned AddLength = MAI.getMaxInstLength();
+ if (strncmp(Str, ".space", 6) == 0) {
+ char *EStr;
+ int SpaceSize;
+ SpaceSize = strtol(Str + 6, &EStr, 10);
+ SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
+ while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
+ ++EStr;
+ if (*EStr == '\0' || *EStr == '\n' ||
+ isAsmComment(EStr, MAI)) // Successfully parsed .space argument
+ AddLength = SpaceSize;
+ }
+ Length += AddLength;
+ AtInsnStart = false;
}
}
- return InstCount * MAI.getMaxInstLength();
+ return Length;
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
@@ -169,7 +191,7 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
MachineInstr *CommutedMI = nullptr;
if (NewMI) {
// Create a new instruction.
- MachineFunction &MF = *MI.getParent()->getParent();
+ MachineFunction &MF = *MI.getMF();
CommutedMI = MF.CloneMachineInstr(&MI);
} else {
CommutedMI = &MI;
@@ -388,10 +410,11 @@ bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
-MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig,
- MachineFunction &MF) const {
+MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
- return MF.CloneMachineInstr(&Orig);
+ MachineFunction &MF = *MBB.getParent();
+ return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
}
// If the COPY instruction in MI can be folded to a stack operation, return
@@ -415,7 +438,7 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
"Cannot fold physregs");
- const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
@@ -495,21 +518,13 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
return NewMI;
}
-/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
-/// slot into the specified machine instruction for the specified operand(s).
-/// If this is possible, a new instruction is returned with the specified
-/// operand folded, otherwise NULL is returned. The client is responsible for
-/// removing the old instruction and adding the new one in the instruction
-/// stream.
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops, int FI,
LiveIntervals *LIS) const {
auto Flags = MachineMemOperand::MONone;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (MI.getOperand(Ops[i]).isDef())
- Flags |= MachineMemOperand::MOStore;
- else
- Flags |= MachineMemOperand::MOLoad;
+ for (unsigned OpIdx : Ops)
+ Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
+ : MachineMemOperand::MOLoad;
MachineBasicBlock *MBB = MI.getParent();
assert(MBB && "foldMemoryOperand needs an inserted instruction");
@@ -525,10 +540,10 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
if (Flags & MachineMemOperand::MOStore) {
MemSize = MFI.getObjectSize(FI);
} else {
- for (unsigned Idx : Ops) {
+ for (unsigned OpIdx : Ops) {
int64_t OpSize = MFI.getObjectSize(FI);
- if (auto SubReg = MI.getOperand(Idx).getSubReg()) {
+ if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
if (SubRegSize > 0 && !(SubRegSize % 8))
OpSize = SubRegSize / 8;
@@ -590,6 +605,54 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
return &*--Pos;
}
+MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineInstr &LoadMI,
+ LiveIntervals *LIS) const {
+ assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
+#ifndef NDEBUG
+ for (unsigned OpIdx : Ops)
+ assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
+#endif
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+
+ // Ask the target to do the actual folding.
+ MachineInstr *NewMI = nullptr;
+ int FrameIndex = 0;
+
+ if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
+ MI.getOpcode() == TargetOpcode::PATCHPOINT ||
+ MI.getOpcode() == TargetOpcode::STATEPOINT) &&
+ isLoadFromStackSlot(LoadMI, FrameIndex)) {
+ // Fold stackmap/patchpoint.
+ NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ if (NewMI)
+ NewMI = &*MBB.insert(MI, NewMI);
+ } else {
+ // Ask the target to do the actual folding.
+ NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
+ }
+
+ if (!NewMI)
+ return nullptr;
+
+ // Copy the memoperands from the load to the folded instruction.
+ if (MI.memoperands_empty()) {
+ NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
+ } else {
+ // Handle the rare case of folding multiple loads.
+ NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
+ E = LoadMI.memoperands_end();
+ I != E; ++I) {
+ NewMI->addMemOperand(MF, *I);
+ }
+ }
+ return NewMI;
+}
+
bool TargetInstrInfo::hasReassociableOperands(
const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
const MachineOperand &Op1 = Inst.getOperand(1);
@@ -685,11 +748,13 @@ bool TargetInstrInfo::getMachineCombinerPatterns(
return false;
}
+
/// Return true when a code sequence can improve loop throughput.
bool
TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
return false;
}
+
/// Attempt the reassociation transformation to reduce critical path length.
/// See the above comments before getMachineCombinerPatterns().
void TargetInstrInfo::reassociateOps(
@@ -698,7 +763,7 @@ void TargetInstrInfo::reassociateOps(
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
- MachineFunction *MF = Root.getParent()->getParent();
+ MachineFunction *MF = Root.getMF();
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
@@ -781,7 +846,7 @@ void TargetInstrInfo::genAlternativeCodeSequence(
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
- MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo();
+ MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
// Select the previous instruction in the sequence based on the input pattern.
MachineInstr *Prev = nullptr;
@@ -803,59 +868,9 @@ void TargetInstrInfo::genAlternativeCodeSequence(
reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
}
-/// foldMemoryOperand - Same as the previous version except it allows folding
-/// of any load and store from / to any address, not just from a specific
-/// stack slot.
-MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
- ArrayRef<unsigned> Ops,
- MachineInstr &LoadMI,
- LiveIntervals *LIS) const {
- assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
-#ifndef NDEBUG
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!");
-#endif
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
-
- // Ask the target to do the actual folding.
- MachineInstr *NewMI = nullptr;
- int FrameIndex = 0;
-
- if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
- MI.getOpcode() == TargetOpcode::PATCHPOINT ||
- MI.getOpcode() == TargetOpcode::STATEPOINT) &&
- isLoadFromStackSlot(LoadMI, FrameIndex)) {
- // Fold stackmap/patchpoint.
- NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
- if (NewMI)
- NewMI = &*MBB.insert(MI, NewMI);
- } else {
- // Ask the target to do the actual folding.
- NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
- }
-
- if (!NewMI) return nullptr;
-
- // Copy the memoperands from the load to the folded instruction.
- if (MI.memoperands_empty()) {
- NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
- }
- else {
- // Handle the rare case of folding multiple loads.
- NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
- E = LoadMI.memoperands_end();
- I != E; ++I) {
- NewMI->addMemOperand(MF, *I);
- }
- }
- return NewMI;
-}
-
bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
const MachineInstr &MI, AliasAnalysis *AA) const {
- const MachineFunction &MF = *MI.getParent()->getParent();
+ const MachineFunction &MF = *MI.getMF();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Remat clients assume operand 0 is the defined register.
@@ -933,7 +948,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
}
int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
- const MachineFunction *MF = MI.getParent()->getParent();
+ const MachineFunction *MF = MI.getMF();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;