summaryrefslogtreecommitdiff
path: root/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/PowerPC/PPCInstrInfo.cpp')
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp549
1 files changed, 381 insertions, 168 deletions
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 6b10672965c9..30906a32b00c 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -108,7 +108,7 @@ ScheduleHazardRecognizer *
PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
const ScheduleDAG *DAG) const {
unsigned Directive =
- static_cast<const PPCSubtarget *>(STI)->getDarwinDirective();
+ static_cast<const PPCSubtarget *>(STI)->getCPUDirective();
if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
const InstrItineraryData *II =
@@ -125,7 +125,7 @@ ScheduleHazardRecognizer *
PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const {
unsigned Directive =
- DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ DAG->MF.getSubtarget<PPCSubtarget>().getCPUDirective();
// FIXME: Leaving this as-is until we have POWER9 scheduling info
if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
@@ -202,7 +202,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// On some cores, there is an additional delay between writing to a condition
// register, and using it from a branch.
- unsigned Directive = Subtarget.getDarwinDirective();
+ unsigned Directive = Subtarget.getCPUDirective();
switch (Directive) {
default: break;
case PPC::DIR_7400:
@@ -371,7 +371,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
MachineFunction &MF = *MI.getParent()->getParent();
// Normal instructions can be commuted the obvious way.
- if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo)
+ if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMI_rec)
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
// Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
// 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
@@ -391,7 +391,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
// Swap op1/op2
assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
- "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
+ "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
Register Reg0 = MI.getOperand(0).getReg();
Register Reg1 = MI.getOperand(1).getReg();
Register Reg2 = MI.getOperand(2).getReg();
@@ -469,7 +469,7 @@ void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
// This function is used for scheduling, and the nop wanted here is the type
// that terminates dispatch groups on the POWER cores.
- unsigned Directive = Subtarget.getDarwinDirective();
+ unsigned Directive = Subtarget.getCPUDirective();
unsigned Opcode;
switch (Directive) {
default: Opcode = PPC::NOP; break;
@@ -903,15 +903,15 @@ static unsigned getCRBitValue(unsigned CRBit) {
void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
- const DebugLoc &DL, unsigned DestReg,
- unsigned SrcReg, bool KillSrc) const {
+ const DebugLoc &DL, MCRegister DestReg,
+ MCRegister SrcReg, bool KillSrc) const {
// We can end up with self copies and similar things as a result of VSX copy
// legalization. Promote them here.
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (PPC::F8RCRegClass.contains(DestReg) &&
PPC::VSRCRegClass.contains(SrcReg)) {
- unsigned SuperReg =
- TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
+ MCRegister SuperReg =
+ TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
if (VSXSelfCopyCrash && SrcReg == SuperReg)
llvm_unreachable("nop VSX copy");
@@ -919,8 +919,8 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
DestReg = SuperReg;
} else if (PPC::F8RCRegClass.contains(SrcReg) &&
PPC::VSRCRegClass.contains(DestReg)) {
- unsigned SuperReg =
- TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
+ MCRegister SuperReg =
+ TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
if (VSXSelfCopyCrash && DestReg == SuperReg)
llvm_unreachable("nop VSX copy");
@@ -931,7 +931,7 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// Different class register copy
if (PPC::CRBITRCRegClass.contains(SrcReg) &&
PPC::GPRCRegClass.contains(DestReg)) {
- unsigned CRReg = getCRFromCRBit(SrcReg);
+ MCRegister CRReg = getCRFromCRBit(SrcReg);
BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(CRReg);
getKillRegState(KillSrc);
// Rotate the CR bit in the CR fields to be the least significant bit and
@@ -1587,22 +1587,6 @@ bool PPCInstrInfo::DefinesPredicate(MachineInstr &MI,
return Found;
}
-bool PPCInstrInfo::isPredicable(const MachineInstr &MI) const {
- unsigned OpC = MI.getOpcode();
- switch (OpC) {
- default:
- return false;
- case PPC::B:
- case PPC::BLR:
- case PPC::BLR8:
- case PPC::BCTR:
- case PPC::BCTR8:
- case PPC::BCTRL:
- case PPC::BCTRL8:
- return true;
- }
-}
-
bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask,
int &Value) const {
@@ -1836,8 +1820,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
int NewOpC = -1;
int MIOpC = MI->getOpcode();
- if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8 ||
- MIOpC == PPC::ANDISo || MIOpC == PPC::ANDISo8)
+ if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
+ MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
NewOpC = MIOpC;
else {
NewOpC = PPC::getRecordFormOpcode(MIOpC);
@@ -1943,9 +1927,9 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
// The mask value needs to shift right 16 if we're emitting andis.
Mask >>= MBInLoHWord ? 0 : 16;
- NewOpC = MIOpC == PPC::RLWINM ?
- (MBInLoHWord ? PPC::ANDIo : PPC::ANDISo) :
- (MBInLoHWord ? PPC::ANDIo8 :PPC::ANDISo8);
+ NewOpC = MIOpC == PPC::RLWINM
+ ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
+ : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
} else if (MRI->use_empty(GPRRes) && (ME == 31) &&
(ME - MB + 1 == SH) && (MB >= 16)) {
// If we are rotating by the exact number of bits as are in the mask
@@ -1953,7 +1937,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
// that's just an andis. (as long as the GPR result has no uses).
Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
Mask >>= 16;
- NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDISo :PPC::ANDISo8;
+ NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
}
// If we've set the mask, we can transform.
if (Mask != ~0LLU) {
@@ -1966,7 +1950,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
int64_t MB = MI->getOperand(3).getImm();
if (MB >= 48) {
uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
- NewOpC = PPC::ANDIo8;
+ NewOpC = PPC::ANDI8_rec;
MI->RemoveOperand(3);
MI->getOperand(2).setImm(Mask);
NumRcRotatesConvertedToRcAnd++;
@@ -2306,7 +2290,7 @@ void PPCInstrInfo::replaceInstrWithLI(MachineInstr &MI,
// Replace the instruction.
if (LII.SetCR) {
- MI.setDesc(get(LII.Is64Bit ? PPC::ANDIo8 : PPC::ANDIo));
+ MI.setDesc(get(LII.Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
// Set the immediate.
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(LII.Imm).addReg(PPC::CR0, RegState::ImplicitDefine);
@@ -2370,15 +2354,13 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI(
ImmInstrInfo III;
unsigned Opc = MI.getOpcode();
bool ConvertibleImmForm =
- Opc == PPC::CMPWI || Opc == PPC::CMPLWI ||
- Opc == PPC::CMPDI || Opc == PPC::CMPLDI ||
- Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
- Opc == PPC::ORI || Opc == PPC::ORI8 ||
- Opc == PPC::XORI || Opc == PPC::XORI8 ||
- Opc == PPC::RLDICL || Opc == PPC::RLDICLo ||
- Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
- Opc == PPC::RLWINM || Opc == PPC::RLWINMo ||
- Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o;
+ Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI ||
+ Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
+ Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI ||
+ Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec ||
+ Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
+ Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 ||
+ Opc == PPC::RLWINM8_rec;
bool IsVFReg = (MI.getNumOperands() && MI.getOperand(0).isReg())
? isVFRegister(MI.getOperand(0).getReg())
: false;
@@ -2527,6 +2509,225 @@ void PPCInstrInfo::fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI,
"RegNo should be killed or dead");
}
+// This opt tries to convert the following imm form to an index form to save an
+// add for stack variables.
+// Return false if no such pattern found.
+//
+// ADDI instr: ToBeChangedReg = ADDI FrameBaseReg, OffsetAddi
+// ADD instr: ToBeDeletedReg = ADD ToBeChangedReg(killed), ScaleReg
+// Imm instr: Reg = op OffsetImm, ToBeDeletedReg(killed)
+//
+// can be converted to:
+//
+// new ADDI instr: ToBeChangedReg = ADDI FrameBaseReg, (OffsetAddi + OffsetImm)
+// Index instr: Reg = opx ScaleReg, ToBeChangedReg(killed)
+//
+// In order to eliminate ADD instr, make sure that:
+// 1: (OffsetAddi + OffsetImm) must be int16 since this offset will be used in
+// new ADDI instr and ADDI can only take int16 Imm.
+// 2: ToBeChangedReg must be killed in ADD instr and there is no other use
+// between ADDI and ADD instr since its original def in ADDI will be changed
+// in new ADDI instr. And also there should be no new def for it between
+// ADD and Imm instr as ToBeChangedReg will be used in Index instr.
+// 3: ToBeDeletedReg must be killed in Imm instr and there is no other use
+// between ADD and Imm instr since ADD instr will be eliminated.
+// 4: ScaleReg must not be redefined between ADD and Imm instr since it will be
+// moved to Index instr.
+bool PPCInstrInfo::foldFrameOffset(MachineInstr &MI) const {
+ MachineFunction *MF = MI.getParent()->getParent();
+ MachineRegisterInfo *MRI = &MF->getRegInfo();
+ bool PostRA = !MRI->isSSA();
+ // Do this opt after PEI which is after RA. The reason is stack slot expansion
+ // in PEI may expose such opportunities since in PEI, stack slot offsets to
+ // frame base(OffsetAddi) are determined.
+ if (!PostRA)
+ return false;
+ unsigned ToBeDeletedReg = 0;
+ int64_t OffsetImm = 0;
+ unsigned XFormOpcode = 0;
+ ImmInstrInfo III;
+
+ // Check if Imm instr meets requirement.
+ if (!isImmInstrEligibleForFolding(MI, ToBeDeletedReg, XFormOpcode, OffsetImm,
+ III))
+ return false;
+
+ bool OtherIntermediateUse = false;
+ MachineInstr *ADDMI = getDefMIPostRA(ToBeDeletedReg, MI, OtherIntermediateUse);
+
+ // Exit if there is other use between ADD and Imm instr or no def found.
+ if (OtherIntermediateUse || !ADDMI)
+ return false;
+
+ // Check if ADD instr meets requirement.
+ if (!isADDInstrEligibleForFolding(*ADDMI))
+ return false;
+
+ unsigned ScaleRegIdx = 0;
+ int64_t OffsetAddi = 0;
+ MachineInstr *ADDIMI = nullptr;
+
+ // Check if there is a valid ToBeChangedReg in ADDMI.
+ // 1: It must be killed.
+ // 2: Its definition must be a valid ADDIMI.
+ // 3: It must satify int16 offset requirement.
+ if (isValidToBeChangedReg(ADDMI, 1, ADDIMI, OffsetAddi, OffsetImm))
+ ScaleRegIdx = 2;
+ else if (isValidToBeChangedReg(ADDMI, 2, ADDIMI, OffsetAddi, OffsetImm))
+ ScaleRegIdx = 1;
+ else
+ return false;
+
+ assert(ADDIMI && "There should be ADDIMI for valid ToBeChangedReg.");
+ unsigned ToBeChangedReg = ADDIMI->getOperand(0).getReg();
+ unsigned ScaleReg = ADDMI->getOperand(ScaleRegIdx).getReg();
+ auto NewDefFor = [&](unsigned Reg, MachineBasicBlock::iterator Start,
+ MachineBasicBlock::iterator End) {
+ for (auto It = ++Start; It != End; It++)
+ if (It->modifiesRegister(Reg, &getRegisterInfo()))
+ return true;
+ return false;
+ };
+ // Make sure no other def for ToBeChangedReg and ScaleReg between ADD Instr
+ // and Imm Instr.
+ if (NewDefFor(ToBeChangedReg, *ADDMI, MI) || NewDefFor(ScaleReg, *ADDMI, MI))
+ return false;
+
+ // Now start to do the transformation.
+ LLVM_DEBUG(dbgs() << "Replace instruction: "
+ << "\n");
+ LLVM_DEBUG(ADDIMI->dump());
+ LLVM_DEBUG(ADDMI->dump());
+ LLVM_DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "with: "
+ << "\n");
+
+ // Update ADDI instr.
+ ADDIMI->getOperand(2).setImm(OffsetAddi + OffsetImm);
+
+ // Update Imm instr.
+ MI.setDesc(get(XFormOpcode));
+ MI.getOperand(III.ImmOpNo)
+ .ChangeToRegister(ScaleReg, false, false,
+ ADDMI->getOperand(ScaleRegIdx).isKill());
+
+ MI.getOperand(III.OpNoForForwarding)
+ .ChangeToRegister(ToBeChangedReg, false, false, true);
+
+ // Eliminate ADD instr.
+ ADDMI->eraseFromParent();
+
+ LLVM_DEBUG(ADDIMI->dump());
+ LLVM_DEBUG(MI.dump());
+
+ return true;
+}
+
+bool PPCInstrInfo::isADDIInstrEligibleForFolding(MachineInstr &ADDIMI,
+ int64_t &Imm) const {
+ unsigned Opc = ADDIMI.getOpcode();
+
+ // Exit if the instruction is not ADDI.
+ if (Opc != PPC::ADDI && Opc != PPC::ADDI8)
+ return false;
+
+ Imm = ADDIMI.getOperand(2).getImm();
+
+ return true;
+}
+
+bool PPCInstrInfo::isADDInstrEligibleForFolding(MachineInstr &ADDMI) const {
+ unsigned Opc = ADDMI.getOpcode();
+
+ // Exit if the instruction is not ADD.
+ return Opc == PPC::ADD4 || Opc == PPC::ADD8;
+}
+
+bool PPCInstrInfo::isImmInstrEligibleForFolding(MachineInstr &MI,
+ unsigned &ToBeDeletedReg,
+ unsigned &XFormOpcode,
+ int64_t &OffsetImm,
+ ImmInstrInfo &III) const {
+ // Only handle load/store.
+ if (!MI.mayLoadOrStore())
+ return false;
+
+ unsigned Opc = MI.getOpcode();
+
+ XFormOpcode = RI.getMappedIdxOpcForImmOpc(Opc);
+
+ // Exit if instruction has no index form.
+ if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
+ return false;
+
+ // TODO: sync the logic between instrHasImmForm() and ImmToIdxMap.
+ if (!instrHasImmForm(XFormOpcode, isVFRegister(MI.getOperand(0).getReg()),
+ III, true))
+ return false;
+
+ if (!III.IsSummingOperands)
+ return false;
+
+ MachineOperand ImmOperand = MI.getOperand(III.ImmOpNo);
+ MachineOperand RegOperand = MI.getOperand(III.OpNoForForwarding);
+ // Only support imm operands, not relocation slots or others.
+ if (!ImmOperand.isImm())
+ return false;
+
+ assert(RegOperand.isReg() && "Instruction format is not right");
+
+ // There are other use for ToBeDeletedReg after Imm instr, can not delete it.
+ if (!RegOperand.isKill())
+ return false;
+
+ ToBeDeletedReg = RegOperand.getReg();
+ OffsetImm = ImmOperand.getImm();
+
+ return true;
+}
+
+bool PPCInstrInfo::isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index,
+ MachineInstr *&ADDIMI,
+ int64_t &OffsetAddi,
+ int64_t OffsetImm) const {
+ assert((Index == 1 || Index == 2) && "Invalid operand index for add.");
+ MachineOperand &MO = ADDMI->getOperand(Index);
+
+ if (!MO.isKill())
+ return false;
+
+ bool OtherIntermediateUse = false;
+
+ ADDIMI = getDefMIPostRA(MO.getReg(), *ADDMI, OtherIntermediateUse);
+ // Currently handle only one "add + Imminstr" pair case, exit if other
+ // intermediate use for ToBeChangedReg found.
+ // TODO: handle the cases where there are other "add + Imminstr" pairs
+ // with same offset in Imminstr which is like:
+ //
+ // ADDI instr: ToBeChangedReg = ADDI FrameBaseReg, OffsetAddi
+ // ADD instr1: ToBeDeletedReg1 = ADD ToBeChangedReg, ScaleReg1
+ // Imm instr1: Reg1 = op1 OffsetImm, ToBeDeletedReg1(killed)
+ // ADD instr2: ToBeDeletedReg2 = ADD ToBeChangedReg(killed), ScaleReg2
+ // Imm instr2: Reg2 = op2 OffsetImm, ToBeDeletedReg2(killed)
+ //
+ // can be converted to:
+ //
+ // new ADDI instr: ToBeChangedReg = ADDI FrameBaseReg,
+ // (OffsetAddi + OffsetImm)
+ // Index instr1: Reg1 = opx1 ScaleReg1, ToBeChangedReg
+ // Index instr2: Reg2 = opx2 ScaleReg2, ToBeChangedReg(killed)
+
+ if (OtherIntermediateUse || !ADDIMI)
+ return false;
+ // Check if ADDI instr meets requirement.
+ if (!isADDIInstrEligibleForFolding(*ADDIMI, OffsetAddi))
+ return false;
+
+ if (isInt<16>(OffsetAddi + OffsetImm))
+ return true;
+ return false;
+}
+
// If this instruction has an immediate form and one of its operands is a
// result of a load-immediate or an add-immediate, convert it to
// the immediate form if the constant is in range.
@@ -2660,34 +2861,34 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
return false;
}
case PPC::RLDICL:
- case PPC::RLDICLo:
+ case PPC::RLDICL_rec:
case PPC::RLDICL_32:
case PPC::RLDICL_32_64: {
// Use APInt's rotate function.
int64_t SH = MI.getOperand(2).getImm();
int64_t MB = MI.getOperand(3).getImm();
- APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICLo) ?
- 64 : 32, SExtImm, true);
+ APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32,
+ SExtImm, true);
InVal = InVal.rotl(SH);
uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
InVal &= Mask;
// Can't replace negative values with an LI as that will sign-extend
// and not clear the left bits. If we're setting the CR bit, we will use
- // ANDIo which won't sign extend, so that's safe.
+ // ANDI_rec which won't sign extend, so that's safe.
if (isUInt<15>(InVal.getSExtValue()) ||
- (Opc == PPC::RLDICLo && isUInt<16>(InVal.getSExtValue()))) {
+ (Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) {
ReplaceWithLI = true;
Is64BitLI = Opc != PPC::RLDICL_32;
NewImm = InVal.getSExtValue();
- SetCR = Opc == PPC::RLDICLo;
+ SetCR = Opc == PPC::RLDICL_rec;
break;
}
return false;
}
case PPC::RLWINM:
case PPC::RLWINM8:
- case PPC::RLWINMo:
- case PPC::RLWINM8o: {
+ case PPC::RLWINM_rec:
+ case PPC::RLWINM8_rec: {
int64_t SH = MI.getOperand(2).getImm();
int64_t MB = MI.getOperand(3).getImm();
int64_t ME = MI.getOperand(4).getImm();
@@ -2698,15 +2899,15 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
InVal &= Mask;
// Can't replace negative values with an LI as that will sign-extend
// and not clear the left bits. If we're setting the CR bit, we will use
- // ANDIo which won't sign extend, so that's safe.
+ // ANDI_rec which won't sign extend, so that's safe.
bool ValueFits = isUInt<15>(InVal.getSExtValue());
- ValueFits |= ((Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o) &&
+ ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) &&
isUInt<16>(InVal.getSExtValue()));
if (ValueFits) {
ReplaceWithLI = true;
- Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o;
+ Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec;
NewImm = InVal.getSExtValue();
- SetCR = Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o;
+ SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec;
break;
}
return false;
@@ -2768,7 +2969,7 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
LII.Is64Bit = Is64BitLI;
LII.SetCR = SetCR;
// If we're setting the CR, the original load-immediate must be kept (as an
- // operand to ANDIo/ANDI8o).
+ // operand to ANDI_rec/ANDI8_rec).
if (KilledDef && SetCR)
*KilledDef = nullptr;
replaceInstrWithLI(MI, LII);
@@ -2819,13 +3020,13 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
III.IsSummingOperands = true;
III.ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
break;
- case PPC::ADDCo:
+ case PPC::ADDC_rec:
III.SignedImm = true;
III.ZeroIsSpecialOrig = 0;
III.ZeroIsSpecialNew = 0;
III.IsCommutative = true;
III.IsSummingOperands = true;
- III.ImmOpcode = PPC::ADDICo;
+ III.ImmOpcode = PPC::ADDIC_rec;
break;
case PPC::SUBFC:
case PPC::SUBFC8:
@@ -2851,8 +3052,8 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
III.IsCommutative = false;
III.ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
break;
- case PPC::ANDo:
- case PPC::AND8o:
+ case PPC::AND_rec:
+ case PPC::AND8_rec:
case PPC::OR:
case PPC::OR8:
case PPC::XOR:
@@ -2863,8 +3064,12 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
III.IsCommutative = true;
switch(Opc) {
default: llvm_unreachable("Unknown opcode");
- case PPC::ANDo: III.ImmOpcode = PPC::ANDIo; break;
- case PPC::AND8o: III.ImmOpcode = PPC::ANDIo8; break;
+ case PPC::AND_rec:
+ III.ImmOpcode = PPC::ANDI_rec;
+ break;
+ case PPC::AND8_rec:
+ III.ImmOpcode = PPC::ANDI8_rec;
+ break;
case PPC::OR: III.ImmOpcode = PPC::ORI; break;
case PPC::OR8: III.ImmOpcode = PPC::ORI8; break;
case PPC::XOR: III.ImmOpcode = PPC::XORI; break;
@@ -2873,18 +3078,18 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
break;
case PPC::RLWNM:
case PPC::RLWNM8:
- case PPC::RLWNMo:
- case PPC::RLWNM8o:
+ case PPC::RLWNM_rec:
+ case PPC::RLWNM8_rec:
case PPC::SLW:
case PPC::SLW8:
- case PPC::SLWo:
- case PPC::SLW8o:
+ case PPC::SLW_rec:
+ case PPC::SLW8_rec:
case PPC::SRW:
case PPC::SRW8:
- case PPC::SRWo:
- case PPC::SRW8o:
+ case PPC::SRW_rec:
+ case PPC::SRW8_rec:
case PPC::SRAW:
- case PPC::SRAWo:
+ case PPC::SRAW_rec:
III.SignedImm = false;
III.ZeroIsSpecialOrig = 0;
III.ZeroIsSpecialNew = 0;
@@ -2894,8 +3099,8 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
// This does not apply to shift right algebraic because a value
// out of range will produce a -1/0.
III.ImmWidth = 16;
- if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 ||
- Opc == PPC::RLWNMo || Opc == PPC::RLWNM8o)
+ if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec ||
+ Opc == PPC::RLWNM8_rec)
III.TruncateImmTo = 5;
else
III.TruncateImmTo = 6;
@@ -2903,38 +3108,50 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
default: llvm_unreachable("Unknown opcode");
case PPC::RLWNM: III.ImmOpcode = PPC::RLWINM; break;
case PPC::RLWNM8: III.ImmOpcode = PPC::RLWINM8; break;
- case PPC::RLWNMo: III.ImmOpcode = PPC::RLWINMo; break;
- case PPC::RLWNM8o: III.ImmOpcode = PPC::RLWINM8o; break;
+ case PPC::RLWNM_rec:
+ III.ImmOpcode = PPC::RLWINM_rec;
+ break;
+ case PPC::RLWNM8_rec:
+ III.ImmOpcode = PPC::RLWINM8_rec;
+ break;
case PPC::SLW: III.ImmOpcode = PPC::RLWINM; break;
case PPC::SLW8: III.ImmOpcode = PPC::RLWINM8; break;
- case PPC::SLWo: III.ImmOpcode = PPC::RLWINMo; break;
- case PPC::SLW8o: III.ImmOpcode = PPC::RLWINM8o; break;
+ case PPC::SLW_rec:
+ III.ImmOpcode = PPC::RLWINM_rec;
+ break;
+ case PPC::SLW8_rec:
+ III.ImmOpcode = PPC::RLWINM8_rec;
+ break;
case PPC::SRW: III.ImmOpcode = PPC::RLWINM; break;
case PPC::SRW8: III.ImmOpcode = PPC::RLWINM8; break;
- case PPC::SRWo: III.ImmOpcode = PPC::RLWINMo; break;
- case PPC::SRW8o: III.ImmOpcode = PPC::RLWINM8o; break;
+ case PPC::SRW_rec:
+ III.ImmOpcode = PPC::RLWINM_rec;
+ break;
+ case PPC::SRW8_rec:
+ III.ImmOpcode = PPC::RLWINM8_rec;
+ break;
case PPC::SRAW:
III.ImmWidth = 5;
III.TruncateImmTo = 0;
III.ImmOpcode = PPC::SRAWI;
break;
- case PPC::SRAWo:
+ case PPC::SRAW_rec:
III.ImmWidth = 5;
III.TruncateImmTo = 0;
- III.ImmOpcode = PPC::SRAWIo;
+ III.ImmOpcode = PPC::SRAWI_rec;
break;
}
break;
case PPC::RLDCL:
- case PPC::RLDCLo:
+ case PPC::RLDCL_rec:
case PPC::RLDCR:
- case PPC::RLDCRo:
+ case PPC::RLDCR_rec:
case PPC::SLD:
- case PPC::SLDo:
+ case PPC::SLD_rec:
case PPC::SRD:
- case PPC::SRDo:
+ case PPC::SRD_rec:
case PPC::SRAD:
- case PPC::SRADo:
+ case PPC::SRAD_rec:
III.SignedImm = false;
III.ZeroIsSpecialOrig = 0;
III.ZeroIsSpecialNew = 0;
@@ -2944,30 +3161,38 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
// This does not apply to shift right algebraic because a value
// out of range will produce a -1/0.
III.ImmWidth = 16;
- if (Opc == PPC::RLDCL || Opc == PPC::RLDCLo ||
- Opc == PPC::RLDCR || Opc == PPC::RLDCRo)
+ if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR ||
+ Opc == PPC::RLDCR_rec)
III.TruncateImmTo = 6;
else
III.TruncateImmTo = 7;
switch(Opc) {
default: llvm_unreachable("Unknown opcode");
case PPC::RLDCL: III.ImmOpcode = PPC::RLDICL; break;
- case PPC::RLDCLo: III.ImmOpcode = PPC::RLDICLo; break;
+ case PPC::RLDCL_rec:
+ III.ImmOpcode = PPC::RLDICL_rec;
+ break;
case PPC::RLDCR: III.ImmOpcode = PPC::RLDICR; break;
- case PPC::RLDCRo: III.ImmOpcode = PPC::RLDICRo; break;
+ case PPC::RLDCR_rec:
+ III.ImmOpcode = PPC::RLDICR_rec;
+ break;
case PPC::SLD: III.ImmOpcode = PPC::RLDICR; break;
- case PPC::SLDo: III.ImmOpcode = PPC::RLDICRo; break;
+ case PPC::SLD_rec:
+ III.ImmOpcode = PPC::RLDICR_rec;
+ break;
case PPC::SRD: III.ImmOpcode = PPC::RLDICL; break;
- case PPC::SRDo: III.ImmOpcode = PPC::RLDICLo; break;
+ case PPC::SRD_rec:
+ III.ImmOpcode = PPC::RLDICL_rec;
+ break;
case PPC::SRAD:
III.ImmWidth = 6;
III.TruncateImmTo = 0;
III.ImmOpcode = PPC::SRADI;
break;
- case PPC::SRADo:
+ case PPC::SRAD_rec:
III.ImmWidth = 6;
III.TruncateImmTo = 0;
- III.ImmOpcode = PPC::SRADIo;
+ III.ImmOpcode = PPC::SRADI_rec;
break;
}
break;
@@ -3538,14 +3763,16 @@ bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI,
ForwardKilledOperandReg = MI.getOperand(ConstantOpNo).getReg();
unsigned Opc = MI.getOpcode();
- bool SpecialShift32 =
- Opc == PPC::SLW || Opc == PPC::SLWo || Opc == PPC::SRW || Opc == PPC::SRWo;
- bool SpecialShift64 =
- Opc == PPC::SLD || Opc == PPC::SLDo || Opc == PPC::SRD || Opc == PPC::SRDo;
- bool SetCR = Opc == PPC::SLWo || Opc == PPC::SRWo ||
- Opc == PPC::SLDo || Opc == PPC::SRDo;
- bool RightShift =
- Opc == PPC::SRW || Opc == PPC::SRWo || Opc == PPC::SRD || Opc == PPC::SRDo;
+ bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec ||
+ Opc == PPC::SRW || Opc == PPC::SRW_rec ||
+ Opc == PPC::SLW8 || Opc == PPC::SLW8_rec ||
+ Opc == PPC::SRW8 || Opc == PPC::SRW8_rec;
+ bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec ||
+ Opc == PPC::SRD || Opc == PPC::SRD_rec;
+ bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec ||
+ Opc == PPC::SLD_rec || Opc == PPC::SRD_rec;
+ bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD ||
+ Opc == PPC::SRD_rec;
MI.setDesc(get(III.ImmOpcode));
if (ConstantOpNo == III.OpNoForForwarding) {
@@ -3649,27 +3876,21 @@ int PPCInstrInfo::getRecordFormOpcode(unsigned Opcode) {
// i.e. 0 to 31-th bits are same as 32-th bit.
static bool isSignExtendingOp(const MachineInstr &MI) {
int Opcode = MI.getOpcode();
- if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
- Opcode == PPC::LIS || Opcode == PPC::LIS8 ||
- Opcode == PPC::SRAW || Opcode == PPC::SRAWo ||
- Opcode == PPC::SRAWI || Opcode == PPC::SRAWIo ||
- Opcode == PPC::LWA || Opcode == PPC::LWAX ||
- Opcode == PPC::LWA_32 || Opcode == PPC::LWAX_32 ||
- Opcode == PPC::LHA || Opcode == PPC::LHAX ||
- Opcode == PPC::LHA8 || Opcode == PPC::LHAX8 ||
- Opcode == PPC::LBZ || Opcode == PPC::LBZX ||
- Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 ||
- Opcode == PPC::LBZU || Opcode == PPC::LBZUX ||
- Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 ||
- Opcode == PPC::LHZ || Opcode == PPC::LHZX ||
- Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 ||
- Opcode == PPC::LHZU || Opcode == PPC::LHZUX ||
- Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 ||
- Opcode == PPC::EXTSB || Opcode == PPC::EXTSBo ||
- Opcode == PPC::EXTSH || Opcode == PPC::EXTSHo ||
- Opcode == PPC::EXTSB8 || Opcode == PPC::EXTSH8 ||
- Opcode == PPC::EXTSW || Opcode == PPC::EXTSWo ||
- Opcode == PPC::SETB || Opcode == PPC::SETB8 ||
+ if (Opcode == PPC::LI || Opcode == PPC::LI8 || Opcode == PPC::LIS ||
+ Opcode == PPC::LIS8 || Opcode == PPC::SRAW || Opcode == PPC::SRAW_rec ||
+ Opcode == PPC::SRAWI || Opcode == PPC::SRAWI_rec || Opcode == PPC::LWA ||
+ Opcode == PPC::LWAX || Opcode == PPC::LWA_32 || Opcode == PPC::LWAX_32 ||
+ Opcode == PPC::LHA || Opcode == PPC::LHAX || Opcode == PPC::LHA8 ||
+ Opcode == PPC::LHAX8 || Opcode == PPC::LBZ || Opcode == PPC::LBZX ||
+ Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 || Opcode == PPC::LBZU ||
+ Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 ||
+ Opcode == PPC::LHZ || Opcode == PPC::LHZX || Opcode == PPC::LHZ8 ||
+ Opcode == PPC::LHZX8 || Opcode == PPC::LHZU || Opcode == PPC::LHZUX ||
+ Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 || Opcode == PPC::EXTSB ||
+ Opcode == PPC::EXTSB_rec || Opcode == PPC::EXTSH ||
+ Opcode == PPC::EXTSH_rec || Opcode == PPC::EXTSB8 ||
+ Opcode == PPC::EXTSH8 || Opcode == PPC::EXTSW ||
+ Opcode == PPC::EXTSW_rec || Opcode == PPC::SETB || Opcode == PPC::SETB8 ||
Opcode == PPC::EXTSH8_32_64 || Opcode == PPC::EXTSW_32_64 ||
Opcode == PPC::EXTSB8_32_64)
return true;
@@ -3677,8 +3898,8 @@ static bool isSignExtendingOp(const MachineInstr &MI) {
if (Opcode == PPC::RLDICL && MI.getOperand(3).getImm() >= 33)
return true;
- if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo ||
- Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo) &&
+ if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
+ Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
MI.getOperand(3).getImm() > 0 &&
MI.getOperand(3).getImm() <= MI.getOperand(4).getImm())
return true;
@@ -3701,52 +3922,46 @@ static bool isZeroExtendingOp(const MachineInstr &MI) {
// We have some variations of rotate-and-mask instructions
// that clear higher 32-bits.
- if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICLo ||
- Opcode == PPC::RLDCL || Opcode == PPC::RLDCLo ||
+ if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
+ Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
Opcode == PPC::RLDICL_32_64) &&
MI.getOperand(3).getImm() >= 32)
return true;
- if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDICo) &&
+ if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
MI.getOperand(3).getImm() >= 32 &&
MI.getOperand(3).getImm() <= 63 - MI.getOperand(2).getImm())
return true;
- if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo ||
- Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo ||
+ if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
+ Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
MI.getOperand(3).getImm() <= MI.getOperand(4).getImm())
return true;
// There are other instructions that clear higher 32-bits.
- if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZWo ||
- Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZWo ||
+ if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZW_rec ||
+ Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZW_rec ||
Opcode == PPC::CNTLZW8 || Opcode == PPC::CNTTZW8 ||
- Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZDo ||
- Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZDo ||
- Opcode == PPC::POPCNTD || Opcode == PPC::POPCNTW ||
- Opcode == PPC::SLW || Opcode == PPC::SLWo ||
- Opcode == PPC::SRW || Opcode == PPC::SRWo ||
- Opcode == PPC::SLW8 || Opcode == PPC::SRW8 ||
- Opcode == PPC::SLWI || Opcode == PPC::SLWIo ||
- Opcode == PPC::SRWI || Opcode == PPC::SRWIo ||
- Opcode == PPC::LWZ || Opcode == PPC::LWZX ||
- Opcode == PPC::LWZU || Opcode == PPC::LWZUX ||
- Opcode == PPC::LWBRX || Opcode == PPC::LHBRX ||
- Opcode == PPC::LHZ || Opcode == PPC::LHZX ||
- Opcode == PPC::LHZU || Opcode == PPC::LHZUX ||
- Opcode == PPC::LBZ || Opcode == PPC::LBZX ||
- Opcode == PPC::LBZU || Opcode == PPC::LBZUX ||
- Opcode == PPC::LWZ8 || Opcode == PPC::LWZX8 ||
- Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8 ||
- Opcode == PPC::LWBRX8 || Opcode == PPC::LHBRX8 ||
- Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 ||
- Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 ||
- Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 ||
- Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 ||
- Opcode == PPC::ANDIo || Opcode == PPC::ANDISo ||
- Opcode == PPC::ROTRWI || Opcode == PPC::ROTRWIo ||
- Opcode == PPC::EXTLWI || Opcode == PPC::EXTLWIo ||
+ Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZD_rec ||
+ Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZD_rec ||
+ Opcode == PPC::POPCNTD || Opcode == PPC::POPCNTW || Opcode == PPC::SLW ||
+ Opcode == PPC::SLW_rec || Opcode == PPC::SRW || Opcode == PPC::SRW_rec ||
+ Opcode == PPC::SLW8 || Opcode == PPC::SRW8 || Opcode == PPC::SLWI ||
+ Opcode == PPC::SLWI_rec || Opcode == PPC::SRWI ||
+ Opcode == PPC::SRWI_rec || Opcode == PPC::LWZ || Opcode == PPC::LWZX ||
+ Opcode == PPC::LWZU || Opcode == PPC::LWZUX || Opcode == PPC::LWBRX ||
+ Opcode == PPC::LHBRX || Opcode == PPC::LHZ || Opcode == PPC::LHZX ||
+ Opcode == PPC::LHZU || Opcode == PPC::LHZUX || Opcode == PPC::LBZ ||
+ Opcode == PPC::LBZX || Opcode == PPC::LBZU || Opcode == PPC::LBZUX ||
+ Opcode == PPC::LWZ8 || Opcode == PPC::LWZX8 || Opcode == PPC::LWZU8 ||
+ Opcode == PPC::LWZUX8 || Opcode == PPC::LWBRX8 || Opcode == PPC::LHBRX8 ||
+ Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 || Opcode == PPC::LHZU8 ||
+ Opcode == PPC::LHZUX8 || Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 ||
+ Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 ||
+ Opcode == PPC::ANDI_rec || Opcode == PPC::ANDIS_rec ||
+ Opcode == PPC::ROTRWI || Opcode == PPC::ROTRWI_rec ||
+ Opcode == PPC::EXTLWI || Opcode == PPC::EXTLWI_rec ||
Opcode == PPC::MFVSRWZ)
return true;
@@ -3840,14 +4055,14 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
return false;
}
- case PPC::ANDIo:
- case PPC::ANDISo:
+ case PPC::ANDI_rec:
+ case PPC::ANDIS_rec:
case PPC::ORI:
case PPC::ORIS:
case PPC::XORI:
case PPC::XORIS:
- case PPC::ANDIo8:
- case PPC::ANDISo8:
+ case PPC::ANDI8_rec:
+ case PPC::ANDIS8_rec:
case PPC::ORI8:
case PPC::ORIS8:
case PPC::XORI8:
@@ -4042,12 +4257,10 @@ MachineInstr *PPCInstrInfo::findLoopInstr(
// Return true if get the base operand, byte offset of an instruction and the
// memory width. Width is the size of memory that is being loaded/stored.
bool PPCInstrInfo::getMemOperandWithOffsetWidth(
- const MachineInstr &LdSt,
- const MachineOperand *&BaseReg,
- int64_t &Offset,
- unsigned &Width,
- const TargetRegisterInfo *TRI) const {
- assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
+ const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
+ unsigned &Width, const TargetRegisterInfo *TRI) const {
+ if (!LdSt.mayLoadOrStore())
+ return false;
// Handle only loads/stores with base register followed by immediate offset.
if (LdSt.getNumExplicitOperands() != 3)