aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp')
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp160
1 files changed, 113 insertions, 47 deletions
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 65b5f906e3f6..a0c4a25bb5b9 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -201,8 +201,22 @@ static bool isNarrowStore(unsigned Opc) {
}
}
+// These instruction set memory tag and either keep memory contents unchanged or
+// set it to zero, ignoring the address part of the source register.
+static bool isTagStore(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case AArch64::STGOffset:
+ case AArch64::STZGOffset:
+ case AArch64::ST2GOffset:
+ case AArch64::STZ2GOffset:
+ return true;
+ }
+}
+
// Scaling factor for unscaled load or store.
-static int getMemScale(MachineInstr &MI) {
+static int getMemScale(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
llvm_unreachable("Opcode has unknown scale!");
@@ -255,6 +269,11 @@ static int getMemScale(MachineInstr &MI) {
case AArch64::STURQi:
case AArch64::LDPQi:
case AArch64::STPQi:
+ case AArch64::STGOffset:
+ case AArch64::STZGOffset:
+ case AArch64::ST2GOffset:
+ case AArch64::STZ2GOffset:
+ case AArch64::STGPi:
return 16;
}
}
@@ -449,6 +468,16 @@ static unsigned getPreIndexedOpcode(unsigned Opc) {
return AArch64::STPWpre;
case AArch64::STPXi:
return AArch64::STPXpre;
+ case AArch64::STGOffset:
+ return AArch64::STGPreIndex;
+ case AArch64::STZGOffset:
+ return AArch64::STZGPreIndex;
+ case AArch64::ST2GOffset:
+ return AArch64::ST2GPreIndex;
+ case AArch64::STZ2GOffset:
+ return AArch64::STZ2GPreIndex;
+ case AArch64::STGPi:
+ return AArch64::STGPpre;
}
}
@@ -518,6 +547,16 @@ static unsigned getPostIndexedOpcode(unsigned Opc) {
return AArch64::STPWpost;
case AArch64::STPXi:
return AArch64::STPXpost;
+ case AArch64::STGOffset:
+ return AArch64::STGPostIndex;
+ case AArch64::STZGOffset:
+ return AArch64::STZGPostIndex;
+ case AArch64::ST2GOffset:
+ return AArch64::ST2GPostIndex;
+ case AArch64::STZ2GOffset:
+ return AArch64::STZ2GPostIndex;
+ case AArch64::STGPi:
+ return AArch64::STGPpost;
}
}
@@ -536,10 +575,30 @@ static bool isPairedLdSt(const MachineInstr &MI) {
case AArch64::STPQi:
case AArch64::STPWi:
case AArch64::STPXi:
+ case AArch64::STGPi:
return true;
}
}
+// Returns the scale and offset range of pre/post indexed variants of MI.
+static void getPrePostIndexedMemOpInfo(const MachineInstr &MI, int &Scale,
+ int &MinOffset, int &MaxOffset) {
+ bool IsPaired = isPairedLdSt(MI);
+ bool IsTagStore = isTagStore(MI);
+ // ST*G and all paired ldst have the same scale in pre/post-indexed variants
+ // as in the "unsigned offset" variant.
+ // All other pre/post indexed ldst instructions are unscaled.
+ Scale = (IsTagStore || IsPaired) ? getMemScale(MI) : 1;
+
+ if (IsPaired) {
+ MinOffset = -64;
+ MaxOffset = 63;
+ } else {
+ MinOffset = -256;
+ MaxOffset = 255;
+ }
+}
+
static const MachineOperand &getLdStRegOp(const MachineInstr &MI,
unsigned PairedRegOp = 0) {
assert(PairedRegOp < 2 && "Unexpected register operand idx.");
@@ -618,6 +677,11 @@ static bool isMergeableLdStUpdate(MachineInstr &MI) {
case AArch64::LDRWui:
case AArch64::LDRHHui:
case AArch64::LDRBBui:
+ case AArch64::STGOffset:
+ case AArch64::STZGOffset:
+ case AArch64::ST2GOffset:
+ case AArch64::STZ2GOffset:
+ case AArch64::STGPi:
// Unscaled instructions.
case AArch64::STURSi:
case AArch64::STURDi:
@@ -808,7 +872,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
// STRWui %w1, ...
// USE kill %w1 ; need to clear kill flag when moving STRWui downwards
// STRW %w0
- unsigned Reg = getLdStRegOp(*I).getReg();
+ Register Reg = getLdStRegOp(*I).getReg();
for (MachineInstr &MI : make_range(std::next(I), Paired))
MI.clearRegisterKills(Reg, TRI);
}
@@ -837,9 +901,9 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
MachineOperand &DstMO = MIB->getOperand(SExtIdx);
// Right now, DstMO has the extended register, since it comes from an
// extended opcode.
- unsigned DstRegX = DstMO.getReg();
+ Register DstRegX = DstMO.getReg();
// Get the W variant of that register.
- unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
+ Register DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
// Update the result of LDP to use the W instead of the X variant.
DstMO.setReg(DstRegW);
LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
@@ -882,9 +946,9 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
int LoadSize = getMemScale(*LoadI);
int StoreSize = getMemScale(*StoreI);
- unsigned LdRt = getLdStRegOp(*LoadI).getReg();
+ Register LdRt = getLdStRegOp(*LoadI).getReg();
const MachineOperand &StMO = getLdStRegOp(*StoreI);
- unsigned StRt = getLdStRegOp(*StoreI).getReg();
+ Register StRt = getLdStRegOp(*StoreI).getReg();
bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
assert((IsStoreXReg ||
@@ -933,10 +997,10 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
? getLdStOffsetOp(*StoreI).getImm()
: getLdStOffsetOp(*StoreI).getImm() * StoreSize;
int Width = LoadSize * 8;
- unsigned DestReg = IsStoreXReg
- ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
- &AArch64::GPR64RegClass)
- : LdRt;
+ unsigned DestReg =
+ IsStoreXReg ? Register(TRI->getMatchingSuperReg(
+ LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
+ : LdRt;
assert((UnscaledLdOffset >= UnscaledStOffset &&
(UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
@@ -1042,7 +1106,7 @@ bool AArch64LoadStoreOpt::findMatchingStore(
MachineBasicBlock::iterator B = I->getParent()->begin();
MachineBasicBlock::iterator MBBI = I;
MachineInstr &LoadMI = *I;
- unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
+ Register BaseReg = getLdStBaseOp(LoadMI).getReg();
// If the load is the first instruction in the block, there's obviously
// not any matching store.
@@ -1156,8 +1220,8 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
bool MayLoad = FirstMI.mayLoad();
bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
- unsigned Reg = getLdStRegOp(FirstMI).getReg();
- unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
+ Register Reg = getLdStRegOp(FirstMI).getReg();
+ Register BaseReg = getLdStBaseOp(FirstMI).getReg();
int Offset = getLdStOffsetOp(FirstMI).getImm();
int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
@@ -1188,7 +1252,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// check for +1/-1. Make sure to check the new instruction offset is
// actually an immediate and not a symbolic reference destined for
// a relocation.
- unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
+ Register MIBaseReg = getLdStBaseOp(MI).getReg();
int MIOffset = getLdStOffsetOp(MI).getImm();
bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
if (IsUnscaled != MIIsUnscaled) {
@@ -1328,18 +1392,19 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
: getPostIndexedOpcode(I->getOpcode());
MachineInstrBuilder MIB;
+ int Scale, MinOffset, MaxOffset;
+ getPrePostIndexedMemOpInfo(*I, Scale, MinOffset, MaxOffset);
if (!isPairedLdSt(*I)) {
// Non-paired instruction.
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
.add(getLdStRegOp(*Update))
.add(getLdStRegOp(*I))
.add(getLdStBaseOp(*I))
- .addImm(Value)
+ .addImm(Value / Scale)
.setMemRefs(I->memoperands())
.setMIFlags(I->mergeFlagsWith(*Update));
} else {
// Paired instruction.
- int Scale = getMemScale(*I);
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
.add(getLdStRegOp(*Update))
.add(getLdStRegOp(*I, 0))
@@ -1395,28 +1460,21 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
MI.getOperand(1).getReg() != BaseReg)
break;
- bool IsPairedInsn = isPairedLdSt(MemMI);
int UpdateOffset = MI.getOperand(2).getImm();
if (MI.getOpcode() == AArch64::SUBXri)
UpdateOffset = -UpdateOffset;
- // For non-paired load/store instructions, the immediate must fit in a
- // signed 9-bit integer.
- if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
+ // The immediate must be a multiple of the scaling factor of the pre/post
+ // indexed instruction.
+ int Scale, MinOffset, MaxOffset;
+ getPrePostIndexedMemOpInfo(MemMI, Scale, MinOffset, MaxOffset);
+ if (UpdateOffset % Scale != 0)
break;
- // For paired load/store instructions, the immediate must be a multiple of
- // the scaling factor. The scaled offset must also fit into a signed 7-bit
- // integer.
- if (IsPairedInsn) {
- int Scale = getMemScale(MemMI);
- if (UpdateOffset % Scale != 0)
- break;
-
- int ScaledOffset = UpdateOffset / Scale;
- if (ScaledOffset > 63 || ScaledOffset < -64)
- break;
- }
+ // Scaled offset must fit in the instruction immediate.
+ int ScaledOffset = UpdateOffset / Scale;
+ if (ScaledOffset > MaxOffset || ScaledOffset < MinOffset)
+ break;
// If we have a non-zero Offset, we check that it matches the amount
// we're adding to the register.
@@ -1433,7 +1491,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
MachineInstr &MemMI = *I;
MachineBasicBlock::iterator MBBI = I;
- unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
+ Register BaseReg = getLdStBaseOp(MemMI).getReg();
int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
// Scan forward looking for post-index opportunities. Updating instructions
@@ -1442,13 +1500,19 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
if (MIUnscaledOffset != UnscaledOffset)
return E;
- // If the base register overlaps a destination register, we can't
- // merge the update.
- bool IsPairedInsn = isPairedLdSt(MemMI);
- for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
- unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
- if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
- return E;
+ // If the base register overlaps a source/destination register, we can't
+ // merge the update. This does not apply to tag store instructions which
+ // ignore the address part of the source register.
+ // This does not apply to STGPi as well, which does not have unpredictable
+ // behavior in this case unlike normal stores, and always performs writeback
+ // after reading the source register value.
+ if (!isTagStore(MemMI) && MemMI.getOpcode() != AArch64::STGPi) {
+ bool IsPairedInsn = isPairedLdSt(MemMI);
+ for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
+ Register DestReg = getLdStRegOp(MemMI, i).getReg();
+ if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
+ return E;
+ }
}
// Track which register units have been modified and used between the first
@@ -1487,7 +1551,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
MachineInstr &MemMI = *I;
MachineBasicBlock::iterator MBBI = I;
- unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
+ Register BaseReg = getLdStBaseOp(MemMI).getReg();
int Offset = getLdStOffsetOp(MemMI).getImm();
// If the load/store is the first instruction in the block, there's obviously
@@ -1496,11 +1560,13 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
return E;
// If the base register overlaps a destination register, we can't
// merge the update.
- bool IsPairedInsn = isPairedLdSt(MemMI);
- for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
- unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
- if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
- return E;
+ if (!isTagStore(MemMI)) {
+ bool IsPairedInsn = isPairedLdSt(MemMI);
+ for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
+ Register DestReg = getLdStRegOp(MemMI, i).getReg();
+ if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
+ return E;
+ }
}
// Track which register units have been modified and used between the first
@@ -1659,7 +1725,7 @@ bool AArch64LoadStoreOpt::tryToMergeLdStUpdate
// however, is not, so adjust here.
int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
- // Look forward to try to find a post-index instruction. For example,
+ // Look forward to try to find a pre-index instruction. For example,
// ldr x1, [x0, #64]
// add x0, x0, #64
// merged into: