aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-12-20 14:16:56 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-12-20 14:16:56 +0000
commit2cab237b5dbfe1b3e9c7aa7a3c02d2b98fcf7462 (patch)
tree524fe828571f81358bba62fdb6d04c6e5e96a2a4 /contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
parent6c7828a2807ea5e50c79ca42dbedf2b589ce63b2 (diff)
parent044eb2f6afba375a914ac9d8024f8f5142bb912e (diff)
Notes
Diffstat (limited to 'contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp')
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp156
1 files changed, 153 insertions, 3 deletions
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index d14a0fb0b0b2b..856505e00a109 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -10,9 +10,12 @@
#include "SystemZRegisterInfo.h"
#include "SystemZInstrInfo.h"
#include "SystemZSubtarget.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/VirtRegMap.h"
using namespace llvm;
@@ -22,10 +25,91 @@ using namespace llvm;
SystemZRegisterInfo::SystemZRegisterInfo()
: SystemZGenRegisterInfo(SystemZ::R14D) {}
+// Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
+// somehow belongs in it. Otherwise, return GRX32.
+static const TargetRegisterClass *getRC32(MachineOperand &MO,
+ const VirtRegMap *VRM,
+ const MachineRegisterInfo *MRI) {
+ const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
+
+ if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
+ MO.getSubReg() == SystemZ::subreg_l32 ||
+ MO.getSubReg() == SystemZ::subreg_hl32)
+ return &SystemZ::GR32BitRegClass;
+ if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
+ MO.getSubReg() == SystemZ::subreg_h32 ||
+ MO.getSubReg() == SystemZ::subreg_hh32)
+ return &SystemZ::GRH32BitRegClass;
+
+ if (VRM && VRM->hasPhys(MO.getReg())) {
+ unsigned PhysReg = VRM->getPhys(MO.getReg());
+ if (SystemZ::GR32BitRegClass.contains(PhysReg))
+ return &SystemZ::GR32BitRegClass;
+ assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
+ "Phys reg not in GR32 or GRH32?");
+ return &SystemZ::GRH32BitRegClass;
+ }
+
+ assert (RC == &SystemZ::GRX32BitRegClass);
+ return RC;
+}
+
+bool
+SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
+ ArrayRef<MCPhysReg> Order,
+ SmallVectorImpl<MCPhysReg> &Hints,
+ const MachineFunction &MF,
+ const VirtRegMap *VRM,
+ const LiveRegMatrix *Matrix) const {
+ const MachineRegisterInfo *MRI = &MF.getRegInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
+ SmallVector<unsigned, 8> Worklist;
+ SmallSet<unsigned, 4> DoneRegs;
+ Worklist.push_back(VirtReg);
+ while (Worklist.size()) {
+ unsigned Reg = Worklist.pop_back_val();
+ if (!DoneRegs.insert(Reg).second)
+ continue;
+
+ for (auto &Use : MRI->use_instructions(Reg))
+ // For LOCRMux, see if the other operand is already a high or low
+ // register, and in that case give the correpsonding hints for
+ // VirtReg. LOCR instructions need both operands in either high or
+ // low parts.
+ if (Use.getOpcode() == SystemZ::LOCRMux) {
+ MachineOperand &TrueMO = Use.getOperand(1);
+ MachineOperand &FalseMO = Use.getOperand(2);
+ const TargetRegisterClass *RC =
+ TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
+ getRC32(TrueMO, VRM, MRI));
+ if (RC && RC != &SystemZ::GRX32BitRegClass) {
+ for (MCPhysReg Reg : Order)
+ if (RC->contains(Reg) && !MRI->isReserved(Reg))
+ Hints.push_back(Reg);
+ // Return true to make these hints the only regs available to
+ // RA. This may mean extra spilling but since the alternative is
+ // a jump sequence expansion of the LOCRMux, it is preferred.
+ return true;
+ }
+
+ // Add the other operand of the LOCRMux to the worklist.
+ unsigned OtherReg =
+ (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
+ if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
+ Worklist.push_back(OtherReg);
+ }
+ }
+ }
+
+ return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
+ VRM, Matrix);
+}
+
const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
- MF->getFunction()->getAttributes().hasAttrSomewhere(
+ MF->getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_SaveList;
return CSR_SystemZ_SaveList;
@@ -35,7 +119,7 @@ const uint32_t *
SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(
+ MF.getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_RegMask;
return CSR_SystemZ_RegMask;
@@ -152,6 +236,72 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
+bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
+ const TargetRegisterClass *SrcRC,
+ unsigned SubReg,
+ const TargetRegisterClass *DstRC,
+ unsigned DstSubReg,
+ const TargetRegisterClass *NewRC,
+ LiveIntervals &LIS) const {
+ assert (MI->isCopy() && "Only expecting COPY instructions");
+
+ // Coalesce anything which is not a COPY involving a subreg to/from GR128.
+ if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
+ (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
+ return true;
+
+ // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
+ // and local to one MBB with not too much interferring registers. Otherwise
+ // regalloc may run out of registers.
+
+ unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
+ unsigned GR128Reg = MI->getOperand(WideOpNo).getReg();
+ unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
+ LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
+ LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
+
+ // Check that the two virtual registers are local to MBB.
+ MachineBasicBlock *MBB = MI->getParent();
+ if (LIS.isLiveInToMBB(IntGR128, MBB) || LIS.isLiveOutOfMBB(IntGR128, MBB) ||
+ LIS.isLiveInToMBB(IntGRNar, MBB) || LIS.isLiveOutOfMBB(IntGRNar, MBB))
+ return false;
+
+ // Find the first and last MIs of the registers.
+ MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
+ if (WideOpNo == 1) {
+ FirstMI = LIS.getInstructionFromIndex(IntGR128.beginIndex());
+ LastMI = LIS.getInstructionFromIndex(IntGRNar.endIndex());
+ } else {
+ FirstMI = LIS.getInstructionFromIndex(IntGRNar.beginIndex());
+ LastMI = LIS.getInstructionFromIndex(IntGR128.endIndex());
+ }
+ assert (FirstMI && LastMI && "No instruction from index?");
+
+ // Check if coalescing seems safe by finding the set of clobbered physreg
+ // pairs in the region.
+ BitVector PhysClobbered(getNumRegs());
+ MachineBasicBlock::iterator MII = FirstMI, MEE = LastMI;
+ MEE++;
+ for (; MII != MEE; ++MII) {
+ for (const MachineOperand &MO : MII->operands())
+ if (MO.isReg() && isPhysicalRegister(MO.getReg())) {
+ for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
+ SI.isValid(); ++SI)
+ if (NewRC->contains(*SI)) {
+ PhysClobbered.set(*SI);
+ break;
+ }
+ }
+ }
+
+ // Demand an arbitrary margin of free regs.
+ unsigned const DemandedFreeGR128 = 3;
+ if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
+ return false;
+
+ return true;
+}
+
unsigned
SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const SystemZFrameLowering *TFI = getFrameLowering(MF);