diff options
Diffstat (limited to 'lib/Target/Mips/MipsInstructionSelector.cpp')
-rw-r--r-- | lib/Target/Mips/MipsInstructionSelector.cpp | 447 |
1 files changed, 396 insertions, 51 deletions
diff --git a/lib/Target/Mips/MipsInstructionSelector.cpp b/lib/Target/Mips/MipsInstructionSelector.cpp index b041590ee343..45a47ad3c087 100644 --- a/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/lib/Target/Mips/MipsInstructionSelector.cpp @@ -1,9 +1,8 @@ //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file @@ -12,6 +11,8 @@ /// \todo This should be generated by TableGen. //===----------------------------------------------------------------------===// +#include "MCTargetDesc/MipsInstPrinter.h" +#include "MipsMachineFunction.h" #include "MipsRegisterBankInfo.h" #include "MipsTargetMachine.h" #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" @@ -37,6 +38,12 @@ public: private: bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; + bool materialize32BitImm(Register DestReg, APInt Imm, + MachineIRBuilder &B) const; + bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; + const TargetRegisterClass * + getRegClassForTypeOnBank(unsigned OpSize, const RegisterBank &RB, + const RegisterBankInfo &RBI) const; const MipsTargetMachine &TM; const MipsSubtarget &STI; @@ -74,15 +81,24 @@ MipsInstructionSelector::MipsInstructionSelector( { } -static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, - MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, - const RegisterBankInfo &RBI) { - unsigned DstReg = I.getOperand(0).getReg(); +bool MipsInstructionSelector::selectCopy(MachineInstr &I, + MachineRegisterInfo &MRI) const { + Register DstReg = I.getOperand(0).getReg(); if (TargetRegisterInfo::isPhysicalRegister(DstReg)) return true; - const TargetRegisterClass *RC = &Mips::GPR32RegClass; + const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI); + const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); + const TargetRegisterClass *RC = &Mips::GPR32RegClass; + if (RegBank->getID() == Mips::FPRBRegBankID) { + if (DstSize == 32) + RC = &Mips::FGR32RegClass; + else if (DstSize == 64) + RC = STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; + else + llvm_unreachable("Unsupported destination size"); + } if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) << " operand\n"); @@ -91,6 +107,102 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, return true; } +const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank( + unsigned OpSize, const RegisterBank &RB, + const RegisterBankInfo &RBI) const { + if (RB.getID() == Mips::GPRBRegBankID) + return &Mips::GPR32RegClass; + + if (RB.getID() == Mips::FPRBRegBankID) + return OpSize == 32 + ? &Mips::FGR32RegClass + : STI.hasMips32r6() || STI.isFP64bit() ? &Mips::FGR64RegClass + : &Mips::AFGR64RegClass; + + llvm_unreachable("getRegClassForTypeOnBank can't find register class."); + return nullptr; +} + +bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm, + MachineIRBuilder &B) const { + assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); + // Ori zero extends immediate. Used for values with zeros in high 16 bits. + if (Imm.getHiBits(16).isNullValue()) { + MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) + .addImm(Imm.getLoBits(16).getLimitedValue()); + return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); + } + // Lui places immediate in high 16 bits and sets low 16 bits to zero. + if (Imm.getLoBits(16).isNullValue()) { + MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {}) + .addImm(Imm.getHiBits(16).getLimitedValue()); + return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); + } + // ADDiu sign extends immediate. Used for values with 1s in high 17 bits. + if (Imm.isSignedIntN(16)) { + MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)}) + .addImm(Imm.getLoBits(16).getLimitedValue()); + return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); + } + // Values that cannot be materialized with single immediate instruction. + Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); + MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {}) + .addImm(Imm.getHiBits(16).getLimitedValue()); + MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg}) + .addImm(Imm.getLoBits(16).getLimitedValue()); + if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) + return false; + if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI)) + return false; + return true; +} + +/// Returning Opc indicates that we failed to select MIPS instruction opcode. +static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes, + unsigned RegBank, bool isFP64) { + bool isStore = Opc == TargetOpcode::G_STORE; + if (RegBank == Mips::GPRBRegBankID) { + if (isStore) + switch (MemSizeInBytes) { + case 4: + return Mips::SW; + case 2: + return Mips::SH; + case 1: + return Mips::SB; + default: + return Opc; + } + else + // Unspecified extending load is selected into zeroExtending load. + switch (MemSizeInBytes) { + case 4: + return Mips::LW; + case 2: + return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu; + case 1: + return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu; + default: + return Opc; + } + } + + if (RegBank == Mips::FPRBRegBankID) { + switch (MemSizeInBytes) { + case 4: + return isStore ? Mips::SWC1 : Mips::LWC1; + case 8: + if (isFP64) + return isStore ? Mips::SDC164 : Mips::LDC164; + else + return isStore ? Mips::SDC1 : Mips::LDC1; + default: + return Opc; + } + } + return Opc; +} + bool MipsInstructionSelector::select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const { @@ -100,19 +212,52 @@ bool MipsInstructionSelector::select(MachineInstr &I, if (!isPreISelGenericOpcode(I.getOpcode())) { if (I.isCopy()) - return selectCopy(I, TII, MRI, TRI, RBI); + return selectCopy(I, MRI); return true; } - if (selectImpl(I, CoverageInfo)) { + if (I.getOpcode() == Mips::G_MUL) { + MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL)) + .add(I.getOperand(0)) + .add(I.getOperand(1)) + .add(I.getOperand(2)); + if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI)) + return false; + Mul->getOperand(3).setIsDead(true); + Mul->getOperand(4).setIsDead(true); + + I.eraseFromParent(); return true; } + if (selectImpl(I, CoverageInfo)) + return true; + MachineInstr *MI = nullptr; using namespace TargetOpcode; switch (I.getOpcode()) { + case G_UMULH: { + Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); + MachineInstr *PseudoMULTu, *PseudoMove; + + PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu)) + .addDef(PseudoMULTuReg) + .add(I.getOperand(1)) + .add(I.getOperand(2)); + if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI)) + return false; + + PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI)) + .addDef(I.getOperand(0).getReg()) + .addUse(PseudoMULTuReg); + if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) + return false; + + I.eraseFromParent(); + return true; + } case G_GEP: { MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) .add(I.getOperand(0)) @@ -127,16 +272,46 @@ bool MipsInstructionSelector::select(MachineInstr &I, .addImm(0); break; } + case G_BRCOND: { + MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE)) + .add(I.getOperand(0)) + .addUse(Mips::ZERO) + .add(I.getOperand(1)); + break; + } + case G_PHI: { + const Register DestReg = I.getOperand(0).getReg(); + const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); + + const TargetRegisterClass *DefRC = nullptr; + if (TargetRegisterInfo::isPhysicalRegister(DestReg)) + DefRC = TRI.getRegClass(DestReg); + else + DefRC = getRegClassForTypeOnBank(OpSize, + *RBI.getRegBank(DestReg, MRI, TRI), RBI); + + I.setDesc(TII.get(TargetOpcode::PHI)); + return RBI.constrainGenericRegister(DestReg, *DefRC, MRI); + } case G_STORE: - case G_LOAD: { - const unsigned DestReg = I.getOperand(0).getReg(); + case G_LOAD: + case G_ZEXTLOAD: + case G_SEXTLOAD: { + const Register DestReg = I.getOperand(0).getReg(); const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); + const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize(); - if (DestRegBank != Mips::GPRBRegBankID || OpSize != 32) + if (DestRegBank == Mips::GPRBRegBankID && OpSize != 32) return false; - const unsigned NewOpc = I.getOpcode() == G_STORE ? Mips::SW : Mips::LW; + if (DestRegBank == Mips::FPRBRegBankID && OpSize != 32 && OpSize != 64) + return false; + + const unsigned NewOpc = selectLoadStoreOpCode( + I.getOpcode(), OpMemSizeInBytes, DestRegBank, STI.isFP64bit()); + if (NewOpc == I.getOpcode()) + return false; MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) .add(I.getOperand(0)) @@ -149,7 +324,7 @@ bool MipsInstructionSelector::select(MachineInstr &I, case G_UREM: case G_SDIV: case G_SREM: { - unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); + Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; @@ -182,58 +357,150 @@ bool MipsInstructionSelector::select(MachineInstr &I, break; } case G_CONSTANT: { - int Imm = I.getOperand(1).getCImm()->getValue().getLimitedValue(); - unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); - MachineInstr *LUi, *ORi; + MachineIRBuilder B(I); + if (!materialize32BitImm(I.getOperand(0).getReg(), + I.getOperand(1).getCImm()->getValue(), B)) + return false; - LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) - .addDef(LUiReg) - .addImm(Imm >> 16); + I.eraseFromParent(); + return true; + } + case G_FCONSTANT: { + const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF(); + APInt APImm = FPimm.bitcastToAPInt(); + unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); + + if (Size == 32) { + Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); + MachineIRBuilder B(I); + if (!materialize32BitImm(GPRReg, APImm, B)) + return false; - ORi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ORi)) - .addDef(I.getOperand(0).getReg()) - .addUse(LUiReg) - .addImm(Imm & 0xFFFF); + MachineInstrBuilder MTC1 = + B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg}); + if (!MTC1.constrainAllUses(TII, TRI, RBI)) + return false; + } + if (Size == 64) { + Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); + MachineIRBuilder B(I); + if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B)) + return false; + if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B)) + return false; + + MachineInstrBuilder PairF64 = B.buildInstr( + STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64, + {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh}); + if (!PairF64.constrainAllUses(TII, TRI, RBI)) + return false; + } - if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) + I.eraseFromParent(); + return true; + } + case G_FABS: { + unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); + unsigned FABSOpcode = + Size == 32 ? Mips::FABS_S + : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32; + MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode)) + .add(I.getOperand(0)) + .add(I.getOperand(1)); + break; + } + case G_FPTOSI: { + unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); + (void)ToSize; + assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI"); + assert((FromSize == 32 || FromSize == 64) && + "Unsupported floating point size for G_FPTOSI"); + + unsigned Opcode; + if (FromSize == 32) + Opcode = Mips::TRUNC_W_S; + else + Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32; + unsigned ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); + MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode)) + .addDef(ResultInFPR) + .addUse(I.getOperand(1).getReg()); + if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI)) return false; - if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI)) + + MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1)) + .addDef(I.getOperand(0).getReg()) + .addUse(ResultInFPR); + if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) return false; I.eraseFromParent(); return true; } case G_GLOBAL_VALUE: { - if (MF.getTarget().isPositionIndependent()) - return false; - const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal(); - unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); - MachineInstr *LUi, *ADDiu; + if (MF.getTarget().isPositionIndependent()) { + MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) + .addDef(I.getOperand(0).getReg()) + .addReg(MF.getInfo<MipsFunctionInfo>() + ->getGlobalBaseRegForGlobalISel()) + .addGlobalAddress(GVal); + // Global Values that don't have local linkage are handled differently + // when they are part of call sequence. MipsCallLowering::lowerCall + // creates G_GLOBAL_VALUE instruction as part of call sequence and adds + // MO_GOT_CALL flag when Callee doesn't have local linkage. + if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL) + LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL); + else + LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); + LWGOT->addMemOperand( + MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), + MachineMemOperand::MOLoad, 4, 4)); + if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) + return false; - LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) - .addDef(LUiReg) - .addGlobalAddress(GVal); - LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI); + if (GVal->hasLocalLinkage()) { + Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); + LWGOT->getOperand(0).setReg(LWGOTDef); - ADDiu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) + MachineInstr *ADDiu = + BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) .addDef(I.getOperand(0).getReg()) - .addUse(LUiReg) + .addReg(LWGOTDef) .addGlobalAddress(GVal); - ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); - - if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) - return false; - if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) - return false; + ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); + if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) + return false; + } + } else { + Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); + + MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) + .addDef(LUiReg) + .addGlobalAddress(GVal); + LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI); + if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) + return false; + MachineInstr *ADDiu = + BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) + .addDef(I.getOperand(0).getReg()) + .addUse(LUiReg) + .addGlobalAddress(GVal); + ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); + if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) + return false; + } I.eraseFromParent(); return true; } case G_ICMP: { struct Instr { - unsigned Opcode, Def, LHS, RHS; - Instr(unsigned Opcode, unsigned Def, unsigned LHS, unsigned RHS) + unsigned Opcode; + Register Def, LHS, RHS; + Instr(unsigned Opcode, Register Def, Register LHS, Register RHS) : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; bool hasImm() const { @@ -244,10 +511,10 @@ bool MipsInstructionSelector::select(MachineInstr &I, }; SmallVector<struct Instr, 2> Instructions; - unsigned ICMPReg = I.getOperand(0).getReg(); - unsigned Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); - unsigned LHS = I.getOperand(2).getReg(); - unsigned RHS = I.getOperand(3).getReg(); + Register ICMPReg = I.getOperand(0).getReg(); + Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register LHS = I.getOperand(2).getReg(); + Register RHS = I.getOperand(3).getReg(); CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate()); @@ -309,6 +576,84 @@ bool MipsInstructionSelector::select(MachineInstr &I, I.eraseFromParent(); return true; } + case G_FCMP: { + unsigned MipsFCMPCondCode; + bool isLogicallyNegated; + switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>( + I.getOperand(1).getPredicate())) { + case CmpInst::FCMP_UNO: // Unordered + case CmpInst::FCMP_ORD: // Ordered (OR) + MipsFCMPCondCode = Mips::FCOND_UN; + isLogicallyNegated = Cond != CmpInst::FCMP_UNO; + break; + case CmpInst::FCMP_OEQ: // Equal + case CmpInst::FCMP_UNE: // Not Equal (NEQ) + MipsFCMPCondCode = Mips::FCOND_OEQ; + isLogicallyNegated = Cond != CmpInst::FCMP_OEQ; + break; + case CmpInst::FCMP_UEQ: // Unordered or Equal + case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL) + MipsFCMPCondCode = Mips::FCOND_UEQ; + isLogicallyNegated = Cond != CmpInst::FCMP_UEQ; + break; + case CmpInst::FCMP_OLT: // Ordered or Less Than + case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE) + MipsFCMPCondCode = Mips::FCOND_OLT; + isLogicallyNegated = Cond != CmpInst::FCMP_OLT; + break; + case CmpInst::FCMP_ULT: // Unordered or Less Than + case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE) + MipsFCMPCondCode = Mips::FCOND_ULT; + isLogicallyNegated = Cond != CmpInst::FCMP_ULT; + break; + case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal + case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT) + MipsFCMPCondCode = Mips::FCOND_OLE; + isLogicallyNegated = Cond != CmpInst::FCMP_OLE; + break; + case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal + case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT) + MipsFCMPCondCode = Mips::FCOND_ULE; + isLogicallyNegated = Cond != CmpInst::FCMP_ULE; + break; + default: + return false; + } + + // Default compare result in gpr register will be `true`. + // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false + // using MOVF_I. When orignal predicate (Cond) is logically negated + // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used. + unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I; + + unsigned TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); + BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) + .addDef(TrueInReg) + .addUse(Mips::ZERO) + .addImm(1); + + unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); + unsigned FCMPOpcode = + Size == 32 ? Mips::FCMP_S32 + : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32; + MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode)) + .addUse(I.getOperand(2).getReg()) + .addUse(I.getOperand(3).getReg()) + .addImm(MipsFCMPCondCode); + if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI)) + return false; + + MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode)) + .addDef(I.getOperand(0).getReg()) + .addUse(Mips::ZERO) + .addUse(Mips::FCC0) + .addUse(TrueInReg); + if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) + return false; + + I.eraseFromParent(); + return true; + } default: return false; } |