summaryrefslogtreecommitdiff
path: root/llvm/lib/Target/Mips/MipsISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/Mips/MipsISelLowering.cpp')
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp220
1 files changed, 176 insertions, 44 deletions
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index bf1b4756b24fa..46b1f35a6fc7f 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -111,21 +111,19 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
- if (VT.isVector()) {
- if (Subtarget.isABI_O32()) {
- return MVT::i32;
- } else {
- return (VT.getSizeInBits() == 32) ? MVT::i32 : MVT::i64;
- }
- }
- return MipsTargetLowering::getRegisterType(Context, VT);
+ if (!VT.isVector())
+ return getRegisterType(Context, VT);
+
+ return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
+ : MVT::i64;
}
unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT.isVector())
- return std::max((VT.getSizeInBits() / (Subtarget.isABI_O32() ? 32 : 64)),
+ return std::max(((unsigned)VT.getSizeInBits() /
+ (Subtarget.isABI_O32() ? 32 : 64)),
1U);
return MipsTargetLowering::getNumRegisters(Context, VT);
}
@@ -528,8 +526,9 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
isMicroMips = Subtarget.inMicroMipsMode();
}
-const MipsTargetLowering *MipsTargetLowering::create(const MipsTargetMachine &TM,
- const MipsSubtarget &STI) {
+const MipsTargetLowering *
+MipsTargetLowering::create(const MipsTargetMachine &TM,
+ const MipsSubtarget &STI) {
if (STI.inMips16Mode())
return createMips16TargetLowering(TM, STI);
@@ -710,7 +709,8 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
SDValue True = N->getOperand(1);
SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
- SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
+ SetCC.getOperand(1),
+ ISD::getSetCCInverse(CC, SetCC.getValueType()));
return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
}
@@ -744,7 +744,8 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (Diff == -1) {
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
- SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
+ SetCC.getOperand(1),
+ ISD::getSetCCInverse(CC, SetCC.getValueType()));
return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
}
@@ -1367,6 +1368,43 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return emitAtomicCmpSwap(MI, BB);
case Mips::ATOMIC_CMP_SWAP_I64:
return emitAtomicCmpSwap(MI, BB);
+
+ case Mips::ATOMIC_LOAD_MIN_I8:
+ return emitAtomicBinaryPartword(MI, BB, 1);
+ case Mips::ATOMIC_LOAD_MIN_I16:
+ return emitAtomicBinaryPartword(MI, BB, 2);
+ case Mips::ATOMIC_LOAD_MIN_I32:
+ return emitAtomicBinary(MI, BB);
+ case Mips::ATOMIC_LOAD_MIN_I64:
+ return emitAtomicBinary(MI, BB);
+
+ case Mips::ATOMIC_LOAD_MAX_I8:
+ return emitAtomicBinaryPartword(MI, BB, 1);
+ case Mips::ATOMIC_LOAD_MAX_I16:
+ return emitAtomicBinaryPartword(MI, BB, 2);
+ case Mips::ATOMIC_LOAD_MAX_I32:
+ return emitAtomicBinary(MI, BB);
+ case Mips::ATOMIC_LOAD_MAX_I64:
+ return emitAtomicBinary(MI, BB);
+
+ case Mips::ATOMIC_LOAD_UMIN_I8:
+ return emitAtomicBinaryPartword(MI, BB, 1);
+ case Mips::ATOMIC_LOAD_UMIN_I16:
+ return emitAtomicBinaryPartword(MI, BB, 2);
+ case Mips::ATOMIC_LOAD_UMIN_I32:
+ return emitAtomicBinary(MI, BB);
+ case Mips::ATOMIC_LOAD_UMIN_I64:
+ return emitAtomicBinary(MI, BB);
+
+ case Mips::ATOMIC_LOAD_UMAX_I8:
+ return emitAtomicBinaryPartword(MI, BB, 1);
+ case Mips::ATOMIC_LOAD_UMAX_I16:
+ return emitAtomicBinaryPartword(MI, BB, 2);
+ case Mips::ATOMIC_LOAD_UMAX_I32:
+ return emitAtomicBinary(MI, BB);
+ case Mips::ATOMIC_LOAD_UMAX_I64:
+ return emitAtomicBinary(MI, BB);
+
case Mips::PseudoSDIV:
case Mips::PseudoUDIV:
case Mips::DIV:
@@ -1428,6 +1466,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
DebugLoc DL = MI.getDebugLoc();
unsigned AtomicOp;
+ bool NeedsAdditionalReg = false;
switch (MI.getOpcode()) {
case Mips::ATOMIC_LOAD_ADD_I32:
AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
@@ -1471,6 +1510,38 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
case Mips::ATOMIC_SWAP_I64:
AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
break;
+ case Mips::ATOMIC_LOAD_MIN_I32:
+ AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_MAX_I32:
+ AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMIN_I32:
+ AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMAX_I32:
+ AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_MIN_I64:
+ AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_MAX_I64:
+ AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMIN_I64:
+ AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMAX_I64:
+ AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
default:
llvm_unreachable("Unknown pseudo atomic for replacement!");
}
@@ -1523,12 +1594,19 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
- BuildMI(*BB, II, DL, TII->get(AtomicOp))
- .addReg(OldVal, RegState::Define | RegState::EarlyClobber)
- .addReg(PtrCopy)
- .addReg(IncrCopy)
- .addReg(Scratch, RegState::Define | RegState::EarlyClobber |
- RegState::Implicit | RegState::Dead);
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, II, DL, TII->get(AtomicOp))
+ .addReg(OldVal, RegState::Define | RegState::EarlyClobber)
+ .addReg(PtrCopy)
+ .addReg(IncrCopy)
+ .addReg(Scratch, RegState::Define | RegState::EarlyClobber |
+ RegState::Implicit | RegState::Dead);
+ if (NeedsAdditionalReg) {
+ Register Scratch2 =
+ RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
+ MIB.addReg(Scratch2, RegState::Define | RegState::EarlyClobber |
+ RegState::Implicit | RegState::Dead);
+ }
MI.eraseFromParent();
@@ -1596,6 +1674,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
Register Scratch3 = RegInfo.createVirtualRegister(RC);
unsigned AtomicOp = 0;
+ bool NeedsAdditionalReg = false;
switch (MI.getOpcode()) {
case Mips::ATOMIC_LOAD_NAND_I8:
AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
@@ -1639,6 +1718,38 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
case Mips::ATOMIC_LOAD_XOR_I16:
AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
break;
+ case Mips::ATOMIC_LOAD_MIN_I8:
+ AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_MIN_I16:
+ AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_MAX_I8:
+ AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_MAX_I16:
+ AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMIN_I8:
+ AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMIN_I16:
+ AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMAX_I8:
+ AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
+ case Mips::ATOMIC_LOAD_UMAX_I16:
+ AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
+ NeedsAdditionalReg = true;
+ break;
default:
llvm_unreachable("Unknown subword atomic pseudo for expansion!");
}
@@ -1693,19 +1804,25 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
// emitAtomicBinary. In summary, we need a scratch register which is going to
// be undef, that is unique among registers chosen for the instruction.
- BuildMI(BB, DL, TII->get(AtomicOp))
- .addReg(Dest, RegState::Define | RegState::EarlyClobber)
- .addReg(AlignedAddr)
- .addReg(Incr2)
- .addReg(Mask)
- .addReg(Mask2)
- .addReg(ShiftAmt)
- .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
- RegState::Dead | RegState::Implicit)
- .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
- RegState::Dead | RegState::Implicit)
- .addReg(Scratch3, RegState::EarlyClobber | RegState::Define |
- RegState::Dead | RegState::Implicit);
+ MachineInstrBuilder MIB =
+ BuildMI(BB, DL, TII->get(AtomicOp))
+ .addReg(Dest, RegState::Define | RegState::EarlyClobber)
+ .addReg(AlignedAddr)
+ .addReg(Incr2)
+ .addReg(Mask)
+ .addReg(Mask2)
+ .addReg(ShiftAmt)
+ .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
+ RegState::Dead | RegState::Implicit)
+ .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
+ RegState::Dead | RegState::Implicit)
+ .addReg(Scratch3, RegState::EarlyClobber | RegState::Define |
+ RegState::Dead | RegState::Implicit);
+ if (NeedsAdditionalReg) {
+ Register Scratch4 = RegInfo.createVirtualRegister(RC);
+ MIB.addReg(Scratch4, RegState::EarlyClobber | RegState::Define |
+ RegState::Dead | RegState::Implicit);
+ }
MI.eraseFromParent(); // The instruction is gone now.
@@ -2804,7 +2921,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
// allocate a register directly.
Reg = State.AllocateReg(IntRegs);
}
- } else if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
+ } else if (ValVT == MVT::i32 ||
+ (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
Reg = State.AllocateReg(IntRegs);
// If this is the first part of an i64 arg,
// the allocated register must be either A0 or A2.
@@ -2993,6 +3111,14 @@ void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
StringRef Sym;
if (const GlobalAddressSDNode *G =
dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
+ // We must not emit the R_MIPS_JALR relocation against data symbols
+ // since this will cause run-time crashes if the linker replaces the
+ // call instruction with a relative branch to the data symbol.
+ if (!isa<Function>(G->getGlobal())) {
+ LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
+ << G->getGlobal()->getName() << "\n");
+ return;
+ }
Sym = G->getGlobal()->getName();
}
else if (const ExternalSymbolSDNode *ES =
@@ -3005,6 +3131,7 @@ void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
MachineFunction *MF = MI.getParent()->getParent();
MCSymbol *S = MF->getContext().getOrCreateSymbol(Sym);
+ LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
MI.addOperand(MachineOperand::CreateMCSymbol(S, MipsII::MO_JALR));
}
}
@@ -3625,8 +3752,8 @@ MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
return CCInfo.CheckReturn(Outs, RetCC_Mips);
}
-bool
-MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
+ bool IsSigned) const {
if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
return true;
@@ -4006,11 +4133,13 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
}
}
- std::pair<unsigned, const TargetRegisterClass *> R;
- R = parseRegForInlineAsmConstraint(Constraint, VT);
+ if (!Constraint.empty()) {
+ std::pair<unsigned, const TargetRegisterClass *> R;
+ R = parseRegForInlineAsmConstraint(Constraint, VT);
- if (R.second)
- return R;
+ if (R.second)
+ return R;
+ }
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
@@ -4113,7 +4242,8 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
- unsigned AS, Instruction *I) const {
+ unsigned AS,
+ Instruction *I) const {
// No global is ever allowed as a base.
if (AM.BaseGV)
return false;
@@ -4489,8 +4619,9 @@ MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
return BB;
}
-MachineBasicBlock *MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
- MachineBasicBlock *BB) const {
+MachineBasicBlock *
+MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
"Subtarget already supports SELECT nodes with the use of"
"conditional-move instructions.");
@@ -4566,8 +4697,9 @@ MachineBasicBlock *MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
-Register MipsTargetLowering::getRegisterByName(const char* RegName, EVT VT,
- const MachineFunction &MF) const {
+Register
+MipsTargetLowering::getRegisterByName(const char *RegName, LLT VT,
+ const MachineFunction &MF) const {
// Named registers is expected to be fairly rare. For now, just support $28
// since the linux kernel uses it.
if (Subtarget.isGP64bit()) {