diff options
Diffstat (limited to 'lib/Target/X86/X86InstrInfo.cpp')
-rw-r--r-- | lib/Target/X86/X86InstrInfo.cpp | 106 |
1 files changed, 52 insertions, 54 deletions
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index b92ba99fb100..786150760b93 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -269,14 +269,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::XOR8rr, X86::XOR8mr, 0 } }; - for (unsigned i = 0, e = array_lengthof(MemoryFoldTable2Addr); i != e; ++i) { - unsigned RegOp = MemoryFoldTable2Addr[i].RegOp; - unsigned MemOp = MemoryFoldTable2Addr[i].MemOp; - unsigned Flags = MemoryFoldTable2Addr[i].Flags; + for (X86MemoryFoldTableEntry Entry : MemoryFoldTable2Addr) { AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable, - RegOp, MemOp, + Entry.RegOp, Entry.MemOp, // Index 0, folded load and store, no alignment requirement. - Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE); + Entry.Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE); } static const X86MemoryFoldTableEntry MemoryFoldTable0[] = { @@ -424,12 +421,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VCVTPS2PHYrr, X86::VCVTPS2PHYmr, TB_FOLDED_STORE } }; - for (unsigned i = 0, e = array_lengthof(MemoryFoldTable0); i != e; ++i) { - unsigned RegOp = MemoryFoldTable0[i].RegOp; - unsigned MemOp = MemoryFoldTable0[i].MemOp; - unsigned Flags = MemoryFoldTable0[i].Flags; + for (X86MemoryFoldTableEntry Entry : MemoryFoldTable0) { AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable, - RegOp, MemOp, TB_INDEX_0 | Flags); + Entry.RegOp, Entry.MemOp, TB_INDEX_0 | Entry.Flags); } static const X86MemoryFoldTableEntry MemoryFoldTable1[] = { @@ -862,14 +856,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, 0 } }; - for (unsigned i = 0, e = array_lengthof(MemoryFoldTable1); i != e; ++i) { - unsigned RegOp = MemoryFoldTable1[i].RegOp; - unsigned MemOp = MemoryFoldTable1[i].MemOp; - unsigned Flags = MemoryFoldTable1[i].Flags; + for (X86MemoryFoldTableEntry Entry : MemoryFoldTable1) { AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable, - RegOp, MemOp, + Entry.RegOp, Entry.MemOp, // Index 1, folded load - Flags | TB_INDEX_1 | TB_FOLDED_LOAD); + Entry.Flags | TB_INDEX_1 | TB_FOLDED_LOAD); } static const X86MemoryFoldTableEntry MemoryFoldTable2[] = { @@ -1116,6 +1107,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm, TB_ALIGN_16 }, { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm, TB_ALIGN_16 }, { X86::PXORrr, X86::PXORrm, TB_ALIGN_16 }, + { X86::ROUNDSDr, X86::ROUNDSDm, 0 }, + { X86::ROUNDSSr, X86::ROUNDSSm, 0 }, { X86::SBB32rr, X86::SBB32rm, 0 }, { X86::SBB64rr, X86::SBB64rm, 0 }, { X86::SHUFPDrri, X86::SHUFPDrmi, TB_ALIGN_16 }, @@ -1412,6 +1405,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPUNPCKLQDQrr, X86::VPUNPCKLQDQrm, 0 }, { X86::VPUNPCKLWDrr, X86::VPUNPCKLWDrm, 0 }, { X86::VPXORrr, X86::VPXORrm, 0 }, + { X86::VROUNDSDr, X86::VROUNDSDm, 0 }, + { X86::VROUNDSSr, X86::VROUNDSSm, 0 }, { X86::VSHUFPDrri, X86::VSHUFPDrmi, 0 }, { X86::VSHUFPSrri, X86::VSHUFPSrmi, 0 }, { X86::VSUBPDrr, X86::VSUBPDrm, 0 }, @@ -1733,14 +1728,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::SHA256RNDS2rr, X86::SHA256RNDS2rm, TB_ALIGN_16 } }; - for (unsigned i = 0, e = array_lengthof(MemoryFoldTable2); i != e; ++i) { - unsigned RegOp = MemoryFoldTable2[i].RegOp; - unsigned MemOp = MemoryFoldTable2[i].MemOp; - unsigned Flags = MemoryFoldTable2[i].Flags; + for (X86MemoryFoldTableEntry Entry : MemoryFoldTable2) { AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable, - RegOp, MemOp, + Entry.RegOp, Entry.MemOp, // Index 2, folded load - Flags | TB_INDEX_2 | TB_FOLDED_LOAD); + Entry.Flags | TB_INDEX_2 | TB_FOLDED_LOAD); } static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { @@ -1949,14 +1941,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMAXPDZ128rrkz, X86::VMAXPDZ128rmkz, 0 } }; - for (unsigned i = 0, e = array_lengthof(MemoryFoldTable3); i != e; ++i) { - unsigned RegOp = MemoryFoldTable3[i].RegOp; - unsigned MemOp = MemoryFoldTable3[i].MemOp; - unsigned Flags = MemoryFoldTable3[i].Flags; + for (X86MemoryFoldTableEntry Entry : MemoryFoldTable3) { AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable, - RegOp, MemOp, + Entry.RegOp, Entry.MemOp, // Index 3, folded load - Flags | TB_INDEX_3 | TB_FOLDED_LOAD); + Entry.Flags | TB_INDEX_3 | TB_FOLDED_LOAD); } static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { @@ -2001,14 +1990,11 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMAXPDZ128rrk, X86::VMAXPDZ128rmk, 0 } }; - for (unsigned i = 0, e = array_lengthof(MemoryFoldTable4); i != e; ++i) { - unsigned RegOp = MemoryFoldTable4[i].RegOp; - unsigned MemOp = MemoryFoldTable4[i].MemOp; - unsigned Flags = MemoryFoldTable4[i].Flags; + for (X86MemoryFoldTableEntry Entry : MemoryFoldTable4) { AddTableEntry(RegOp2MemOpTable4, MemOp2RegOpTable, - RegOp, MemOp, + Entry.RegOp, Entry.MemOp, // Index 4, folded load - Flags | TB_INDEX_4 | TB_FOLDED_LOAD); + Entry.Flags | TB_INDEX_4 | TB_FOLDED_LOAD); } } @@ -3820,7 +3806,7 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, X86::MOVPQIto64rr); if (X86::VR64RegClass.contains(SrcReg)) // Copy from a VR64 register to a GR64 register. - return X86::MOVSDto64rr; + return X86::MMX_MOVD64from64rr; } else if (X86::GR64RegClass.contains(SrcReg)) { // Copy from a GR64 register to a VR128 register. if (X86::VR128XRegClass.contains(DestReg)) @@ -3828,7 +3814,7 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, X86::MOV64toPQIrr); // Copy from a GR64 register to a VR64 register. if (X86::VR64RegClass.contains(DestReg)) - return X86::MOV64toSDrr; + return X86::MMX_MOVD64to64rr; } // SrcReg(FR32) -> DestReg(GR32) @@ -6413,22 +6399,40 @@ static bool hasReassocSibling(const MachineInstr &Inst, bool &Commuted) { hasVirtualRegDefsInBasicBlock(*MI1, MBB) && MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg())) return true; - + return false; } +// TODO: There are many more machine instruction opcodes to match: +// 1. Other data types (integer, vectors) +// 2. Other math / logic operations (and, or) +static bool isAssociativeAndCommutative(unsigned Opcode) { + switch (Opcode) { + case X86::ADDSDrr: + case X86::ADDSSrr: + case X86::VADDSDrr: + case X86::VADDSSrr: + case X86::MULSDrr: + case X86::MULSSrr: + case X86::VMULSDrr: + case X86::VMULSSrr: + return true; + default: + return false; + } +} + /// Return true if the input instruction is part of a chain of dependent ops /// that are suitable for reassociation, otherwise return false. /// If the instruction's operands must be commuted to have a previous /// instruction of the same type define the first source operand, Commuted will /// be set to true. -static bool isReassocCandidate(const MachineInstr &Inst, unsigned AssocOpcode, - bool &Commuted) { - // 1. The instruction must have the correct type. +static bool isReassocCandidate(const MachineInstr &Inst, bool &Commuted) { + // 1. The operation must be associative and commutative. // 2. The instruction must have virtual register definitions for its // operands in the same basic block. - // 3. The instruction must have a reassociatable sibling. - if (Inst.getOpcode() == AssocOpcode && + // 3. The instruction must have a reassociable sibling. + if (isAssociativeAndCommutative(Inst.getOpcode()) && hasVirtualRegDefsInBasicBlock(Inst, Inst.getParent()) && hasReassocSibling(Inst, Commuted)) return true; @@ -6455,14 +6459,8 @@ bool X86InstrInfo::getMachineCombinerPatterns(MachineInstr &Root, // B = A op X (Prev) // C = B op Y (Root) - // TODO: There are many more associative instruction types to match: - // 1. Other forms of scalar FP add (non-AVX) - // 2. Other data types (double, integer, vectors) - // 3. Other math / logic operations (mul, and, or) - unsigned AssocOpcode = X86::VADDSSrr; - - bool Commute = false; - if (isReassocCandidate(Root, AssocOpcode, Commute)) { + bool Commute; + if (isReassocCandidate(Root, Commute)) { // We found a sequence of instructions that may be suitable for a // reassociation of operands to increase ILP. Specify each commutation // possibility for the Prev instruction in the sequence and let the @@ -6512,7 +6510,7 @@ static void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineOperand &OpX = Prev.getOperand(OpIdx[Pattern][2]); MachineOperand &OpY = Root.getOperand(OpIdx[Pattern][3]); MachineOperand &OpC = Root.getOperand(0); - + unsigned RegA = OpA.getReg(); unsigned RegB = OpB.getReg(); unsigned RegX = OpX.getReg(); @@ -6547,7 +6545,7 @@ static void reassociateOps(MachineInstr &Root, MachineInstr &Prev, .addReg(RegX, getKillRegState(KillX)) .addReg(RegY, getKillRegState(KillY)); InsInstrs.push_back(MIB1); - + MachineInstrBuilder MIB2 = BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) .addReg(RegA, getKillRegState(KillA)) @@ -6579,7 +6577,7 @@ void X86InstrInfo::genAlternativeCodeSequence( Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); } assert(Prev && "Unknown pattern for machine combiner"); - + reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); return; } |