summaryrefslogtreecommitdiff
path: root/lib/Target/Mips
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/Mips')
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp696
-rw-r--r--lib/Target/Mips/Disassembler/MipsDisassembler.cpp16
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp1
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h6
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp4
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp7
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp11
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp4
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp96
-rw-r--r--lib/Target/Mips/MicroMipsDSPInstrInfo.td4
-rw-r--r--lib/Target/Mips/MicroMipsInstrInfo.td9
-rw-r--r--lib/Target/Mips/MicroMipsSizeReduction.cpp18
-rw-r--r--lib/Target/Mips/Mips.td12
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.cpp2
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp16
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.cpp2
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td36
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp12
-rw-r--r--lib/Target/Mips/MipsCallLowering.cpp150
-rw-r--r--lib/Target/Mips/MipsCallLowering.h8
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp63
-rw-r--r--lib/Target/Mips/MipsDSPInstrInfo.td19
-rw-r--r--lib/Target/Mips/MipsExpandPseudo.cpp54
-rw-r--r--lib/Target/Mips/MipsFastISel.cpp12
-rw-r--r--lib/Target/Mips/MipsFrameLowering.h5
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.cpp53
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.h5
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp164
-rw-r--r--lib/Target/Mips/MipsISelLowering.h13
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp3
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h2
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td30
-rw-r--r--lib/Target/Mips/MipsInstructionSelector.cpp206
-rw-r--r--lib/Target/Mips/MipsLegalizerInfo.cpp244
-rw-r--r--lib/Target/Mips/MipsLegalizerInfo.h3
-rw-r--r--lib/Target/Mips/MipsMSAInstrInfo.td55
-rw-r--r--lib/Target/Mips/MipsOptimizePICCall.cpp5
-rw-r--r--lib/Target/Mips/MipsPfmCounters.td18
-rw-r--r--lib/Target/Mips/MipsPreLegalizerCombiner.cpp3
-rw-r--r--lib/Target/Mips/MipsRegisterBankInfo.cpp322
-rw-r--r--lib/Target/Mips/MipsRegisterBankInfo.h9
-rw-r--r--lib/Target/Mips/MipsRegisterBanks.td2
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp55
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.cpp54
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h6
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp124
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.cpp20
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.cpp8
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp17
-rw-r--r--lib/Target/Mips/MipsSubtarget.h15
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp18
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h14
54 files changed, 1772 insertions, 966 deletions
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 1f7d095bf49b..21d0df74d458 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -39,6 +39,7 @@
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/Alignment.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
@@ -233,9 +234,14 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
MCStreamer &Out, const MCSubtargetInfo *STI);
- bool expandLoadImmReal(MCInst &Inst, bool IsSingle, bool IsGPR, bool Is64FPU,
- SMLoc IDLoc, MCStreamer &Out,
- const MCSubtargetInfo *STI);
+ bool expandLoadSingleImmToGPR(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandLoadSingleImmToFPR(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandLoadDoubleImmToGPR(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandLoadDoubleImmToFPR(MCInst &Inst, bool Is64FPU, SMLoc IDLoc,
+ MCStreamer &Out, const MCSubtargetInfo *STI);
bool expandLoadAddress(unsigned DstReg, unsigned BaseReg,
const MCOperand &Offset, bool Is32BitAddress,
@@ -512,11 +518,11 @@ public:
// Remember the initial assembler options. The user can not modify these.
AssemblerOptions.push_back(
- llvm::make_unique<MipsAssemblerOptions>(getSTI().getFeatureBits()));
+ std::make_unique<MipsAssemblerOptions>(getSTI().getFeatureBits()));
// Create an assembler options environment for the user to modify.
AssemblerOptions.push_back(
- llvm::make_unique<MipsAssemblerOptions>(getSTI().getFeatureBits()));
+ std::make_unique<MipsAssemblerOptions>(getSTI().getFeatureBits()));
getTargetStreamer().updateABIInfo(*this);
@@ -844,7 +850,7 @@ private:
const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E,
MipsAsmParser &Parser) {
- auto Op = llvm::make_unique<MipsOperand>(k_RegisterIndex, Parser);
+ auto Op = std::make_unique<MipsOperand>(k_RegisterIndex, Parser);
Op->RegIdx.Index = Index;
Op->RegIdx.RegInfo = RegInfo;
Op->RegIdx.Kind = RegKind;
@@ -1446,7 +1452,7 @@ public:
static std::unique_ptr<MipsOperand> CreateToken(StringRef Str, SMLoc S,
MipsAsmParser &Parser) {
- auto Op = llvm::make_unique<MipsOperand>(k_Token, Parser);
+ auto Op = std::make_unique<MipsOperand>(k_Token, Parser);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
@@ -1521,7 +1527,7 @@ public:
static std::unique_ptr<MipsOperand>
CreateImm(const MCExpr *Val, SMLoc S, SMLoc E, MipsAsmParser &Parser) {
- auto Op = llvm::make_unique<MipsOperand>(k_Immediate, Parser);
+ auto Op = std::make_unique<MipsOperand>(k_Immediate, Parser);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -1531,7 +1537,7 @@ public:
static std::unique_ptr<MipsOperand>
CreateMem(std::unique_ptr<MipsOperand> Base, const MCExpr *Off, SMLoc S,
SMLoc E, MipsAsmParser &Parser) {
- auto Op = llvm::make_unique<MipsOperand>(k_Memory, Parser);
+ auto Op = std::make_unique<MipsOperand>(k_Memory, Parser);
Op->Mem.Base = Base.release();
Op->Mem.Off = Off;
Op->StartLoc = S;
@@ -1544,7 +1550,7 @@ public:
MipsAsmParser &Parser) {
assert(Regs.size() > 0 && "Empty list not allowed");
- auto Op = llvm::make_unique<MipsOperand>(k_RegList, Parser);
+ auto Op = std::make_unique<MipsOperand>(k_RegList, Parser);
Op->RegList.List = new SmallVector<unsigned, 10>(Regs.begin(), Regs.end());
Op->StartLoc = StartLoc;
Op->EndLoc = EndLoc;
@@ -1804,8 +1810,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(),
- 1LL << (inMicroMipsMode() ? 1 : 2)))
+ if (offsetToAlignment(Offset.getImm(),
+ (inMicroMipsMode() ? Align(2) : Align(4))))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEZ:
@@ -1834,8 +1840,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(),
- 1LL << (inMicroMipsMode() ? 1 : 2)))
+ if (offsetToAlignment(Offset.getImm(),
+ (inMicroMipsMode() ? Align(2) : Align(4))))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEC: case Mips::BGEC_MMR6:
@@ -1850,7 +1856,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(), 1LL << 2))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BLEZC: case Mips::BLEZC_MMR6:
@@ -1863,7 +1869,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(), 1LL << 2))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZC: case Mips::BEQZC_MMR6:
@@ -1874,7 +1880,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(23, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(), 1LL << 2))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZ16_MM:
@@ -1887,7 +1893,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
break; // We'll deal with this situation later on when applying fixups.
if (!isInt<8>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(), 2LL))
+ if (offsetToAlignment(Offset.getImm(), Align(2)))
return Error(IDLoc, "branch to misaligned address");
break;
}
@@ -2454,25 +2460,21 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
: MER_Success;
case Mips::LoadImmSingleGPR:
- return expandLoadImmReal(Inst, true, true, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadSingleImmToGPR(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmSingleFGR:
- return expandLoadImmReal(Inst, true, false, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadSingleImmToFPR(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmDoubleGPR:
- return expandLoadImmReal(Inst, false, true, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadDoubleImmToGPR(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmDoubleFGR:
- return expandLoadImmReal(Inst, false, false, true, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadDoubleImmToFPR(Inst, true, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::LoadImmDoubleFGR_32:
- return expandLoadImmReal(Inst, false, false, false, IDLoc, Out, STI)
- ? MER_Fail
- : MER_Success;
+ return expandLoadDoubleImmToFPR(Inst, false, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
+
case Mips::Ulh:
return expandUlh(Inst, true, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::Ulhu:
@@ -2868,12 +2870,12 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr,
bool Is32BitSym, SMLoc IDLoc,
MCStreamer &Out,
const MCSubtargetInfo *STI) {
- // FIXME: These expansions do not respect -mxgot.
MipsTargetStreamer &TOut = getTargetStreamer();
- bool UseSrcReg = SrcReg != Mips::NoRegister;
+ bool UseSrcReg = SrcReg != Mips::NoRegister && SrcReg != Mips::ZERO &&
+ SrcReg != Mips::ZERO_64;
warnIfNoMacro(IDLoc);
- if (inPicMode() && ABI.IsO32()) {
+ if (inPicMode()) {
MCValue Res;
if (!SymExpr->evaluateAsRelocatable(Res, nullptr, nullptr)) {
Error(IDLoc, "expected relocatable expression");
@@ -2884,46 +2886,41 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr,
return true;
}
+ bool IsPtr64 = ABI.ArePtrs64bit();
+ bool IsLocalSym =
+ Res.getSymA()->getSymbol().isInSection() ||
+ Res.getSymA()->getSymbol().isTemporary() ||
+ (Res.getSymA()->getSymbol().isELF() &&
+ cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
+ ELF::STB_LOCAL);
+ bool UseXGOT = STI->getFeatureBits()[Mips::FeatureXGOT] && !IsLocalSym;
+
// The case where the result register is $25 is somewhat special. If the
// symbol in the final relocation is external and not modified with a
- // constant then we must use R_MIPS_CALL16 instead of R_MIPS_GOT16.
+ // constant then we must use R_MIPS_CALL16 instead of R_MIPS_GOT16
+ // or R_MIPS_CALL16 instead of R_MIPS_GOT_DISP in 64-bit case.
if ((DstReg == Mips::T9 || DstReg == Mips::T9_64) && !UseSrcReg &&
- Res.getConstant() == 0 &&
- !(Res.getSymA()->getSymbol().isInSection() ||
- Res.getSymA()->getSymbol().isTemporary() ||
- (Res.getSymA()->getSymbol().isELF() &&
- cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
- ELF::STB_LOCAL))) {
- const MCExpr *CallExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
- TOut.emitRRX(Mips::LW, DstReg, GPReg, MCOperand::createExpr(CallExpr),
- IDLoc, STI);
+ Res.getConstant() == 0 && !IsLocalSym) {
+ if (UseXGOT) {
+ const MCExpr *CallHiExpr = MipsMCExpr::create(MipsMCExpr::MEK_CALL_HI16,
+ SymExpr, getContext());
+ const MCExpr *CallLoExpr = MipsMCExpr::create(MipsMCExpr::MEK_CALL_LO16,
+ SymExpr, getContext());
+ TOut.emitRX(Mips::LUi, DstReg, MCOperand::createExpr(CallHiExpr), IDLoc,
+ STI);
+ TOut.emitRRR(IsPtr64 ? Mips::DADDu : Mips::ADDu, DstReg, DstReg, GPReg,
+ IDLoc, STI);
+ TOut.emitRRX(IsPtr64 ? Mips::LD : Mips::LW, DstReg, DstReg,
+ MCOperand::createExpr(CallLoExpr), IDLoc, STI);
+ } else {
+ const MCExpr *CallExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
+ TOut.emitRRX(IsPtr64 ? Mips::LD : Mips::LW, DstReg, GPReg,
+ MCOperand::createExpr(CallExpr), IDLoc, STI);
+ }
return false;
}
- // The remaining cases are:
- // External GOT: lw $tmp, %got(symbol+offset)($gp)
- // >addiu $tmp, $tmp, %lo(offset)
- // >addiu $rd, $tmp, $rs
- // Local GOT: lw $tmp, %got(symbol+offset)($gp)
- // addiu $tmp, $tmp, %lo(symbol+offset)($gp)
- // >addiu $rd, $tmp, $rs
- // The addiu's marked with a '>' may be omitted if they are redundant. If
- // this happens then the last instruction must use $rd as the result
- // register.
- const MipsMCExpr *GotExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_GOT, SymExpr, getContext());
- const MCExpr *LoExpr = nullptr;
- if (Res.getSymA()->getSymbol().isInSection() ||
- Res.getSymA()->getSymbol().isTemporary())
- LoExpr = MipsMCExpr::create(MipsMCExpr::MEK_LO, SymExpr, getContext());
- else if (Res.getConstant() != 0) {
- // External symbols fully resolve the symbol with just the %got(symbol)
- // but we must still account for any offset to the symbol for expressions
- // like symbol+8.
- LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
- }
-
unsigned TmpReg = DstReg;
if (UseSrcReg &&
getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg,
@@ -2936,94 +2933,102 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr,
TmpReg = ATReg;
}
- TOut.emitRRX(Mips::LW, TmpReg, GPReg, MCOperand::createExpr(GotExpr), IDLoc,
- STI);
-
- if (LoExpr)
- TOut.emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
+ if (UseXGOT) {
+ // Loading address from XGOT
+ // External GOT: lui $tmp, %got_hi(symbol)($gp)
+ // addu $tmp, $tmp, $gp
+ // lw $tmp, %got_lo(symbol)($tmp)
+ // >addiu $tmp, $tmp, offset
+ // >addiu $rd, $tmp, $rs
+ // The addiu's marked with a '>' may be omitted if they are redundant. If
+ // this happens then the last instruction must use $rd as the result
+ // register.
+ const MCExpr *CallHiExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT_HI16, SymExpr, getContext());
+ const MCExpr *CallLoExpr = MipsMCExpr::create(
+ MipsMCExpr::MEK_GOT_LO16, Res.getSymA(), getContext());
+
+ TOut.emitRX(Mips::LUi, TmpReg, MCOperand::createExpr(CallHiExpr), IDLoc,
+ STI);
+ TOut.emitRRR(IsPtr64 ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg, GPReg,
IDLoc, STI);
+ TOut.emitRRX(IsPtr64 ? Mips::LD : Mips::LW, TmpReg, TmpReg,
+ MCOperand::createExpr(CallLoExpr), IDLoc, STI);
- if (UseSrcReg)
- TOut.emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
-
- return false;
- }
-
- if (inPicMode() && ABI.ArePtrs64bit()) {
- MCValue Res;
- if (!SymExpr->evaluateAsRelocatable(Res, nullptr, nullptr)) {
- Error(IDLoc, "expected relocatable expression");
- return true;
- }
- if (Res.getSymB() != nullptr) {
- Error(IDLoc, "expected relocatable expression with only one symbol");
- return true;
- }
+ if (Res.getConstant() != 0)
+ TOut.emitRRX(IsPtr64 ? Mips::DADDiu : Mips::ADDiu, TmpReg, TmpReg,
+ MCOperand::createExpr(MCConstantExpr::create(
+ Res.getConstant(), getContext())),
+ IDLoc, STI);
- // The case where the result register is $25 is somewhat special. If the
- // symbol in the final relocation is external and not modified with a
- // constant then we must use R_MIPS_CALL16 instead of R_MIPS_GOT_DISP.
- if ((DstReg == Mips::T9 || DstReg == Mips::T9_64) && !UseSrcReg &&
- Res.getConstant() == 0 &&
- !(Res.getSymA()->getSymbol().isInSection() ||
- Res.getSymA()->getSymbol().isTemporary() ||
- (Res.getSymA()->getSymbol().isELF() &&
- cast<MCSymbolELF>(Res.getSymA()->getSymbol()).getBinding() ==
- ELF::STB_LOCAL))) {
- const MCExpr *CallExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
- TOut.emitRRX(Mips::LD, DstReg, GPReg, MCOperand::createExpr(CallExpr),
- IDLoc, STI);
+ if (UseSrcReg)
+ TOut.emitRRR(IsPtr64 ? Mips::DADDu : Mips::ADDu, DstReg, TmpReg, SrcReg,
+ IDLoc, STI);
return false;
}
- // The remaining cases are:
- // Small offset: ld $tmp, %got_disp(symbol)($gp)
- // >daddiu $tmp, $tmp, offset
- // >daddu $rd, $tmp, $rs
- // The daddiu's marked with a '>' may be omitted if they are redundant. If
- // this happens then the last instruction must use $rd as the result
- // register.
- const MipsMCExpr *GotExpr = MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP,
- Res.getSymA(),
- getContext());
+ const MipsMCExpr *GotExpr = nullptr;
const MCExpr *LoExpr = nullptr;
- if (Res.getConstant() != 0) {
- // Symbols fully resolve with just the %got_disp(symbol) but we
- // must still account for any offset to the symbol for
- // expressions like symbol+8.
- LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
-
- // FIXME: Offsets greater than 16 bits are not yet implemented.
- // FIXME: The correct range is a 32-bit sign-extended number.
- if (Res.getConstant() < -0x8000 || Res.getConstant() > 0x7fff) {
- Error(IDLoc, "macro instruction uses large offset, which is not "
- "currently supported");
- return true;
+ if (IsPtr64) {
+ // The remaining cases are:
+ // Small offset: ld $tmp, %got_disp(symbol)($gp)
+ // >daddiu $tmp, $tmp, offset
+ // >daddu $rd, $tmp, $rs
+ // The daddiu's marked with a '>' may be omitted if they are redundant. If
+ // this happens then the last instruction must use $rd as the result
+ // register.
+ GotExpr = MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP, Res.getSymA(),
+ getContext());
+ if (Res.getConstant() != 0) {
+ // Symbols fully resolve with just the %got_disp(symbol) but we
+ // must still account for any offset to the symbol for
+ // expressions like symbol+8.
+ LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
+
+ // FIXME: Offsets greater than 16 bits are not yet implemented.
+ // FIXME: The correct range is a 32-bit sign-extended number.
+ if (Res.getConstant() < -0x8000 || Res.getConstant() > 0x7fff) {
+ Error(IDLoc, "macro instruction uses large offset, which is not "
+ "currently supported");
+ return true;
+ }
+ }
+ } else {
+ // The remaining cases are:
+ // External GOT: lw $tmp, %got(symbol)($gp)
+ // >addiu $tmp, $tmp, offset
+ // >addiu $rd, $tmp, $rs
+ // Local GOT: lw $tmp, %got(symbol+offset)($gp)
+ // addiu $tmp, $tmp, %lo(symbol+offset)($gp)
+ // >addiu $rd, $tmp, $rs
+ // The addiu's marked with a '>' may be omitted if they are redundant. If
+ // this happens then the last instruction must use $rd as the result
+ // register.
+ if (IsLocalSym) {
+ GotExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT, SymExpr, getContext());
+ LoExpr = MipsMCExpr::create(MipsMCExpr::MEK_LO, SymExpr, getContext());
+ } else {
+ // External symbols fully resolve the symbol with just the %got(symbol)
+ // but we must still account for any offset to the symbol for
+ // expressions like symbol+8.
+ GotExpr = MipsMCExpr::create(MipsMCExpr::MEK_GOT, Res.getSymA(),
+ getContext());
+ if (Res.getConstant() != 0)
+ LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
}
}
- unsigned TmpReg = DstReg;
- if (UseSrcReg &&
- getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg,
- SrcReg)) {
- // If $rs is the same as $rd, we need to use AT.
- // If it is not available we exit.
- unsigned ATReg = getATReg(IDLoc);
- if (!ATReg)
- return true;
- TmpReg = ATReg;
- }
-
- TOut.emitRRX(Mips::LD, TmpReg, GPReg, MCOperand::createExpr(GotExpr), IDLoc,
- STI);
+ TOut.emitRRX(IsPtr64 ? Mips::LD : Mips::LW, TmpReg, GPReg,
+ MCOperand::createExpr(GotExpr), IDLoc, STI);
if (LoExpr)
- TOut.emitRRX(Mips::DADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
- IDLoc, STI);
+ TOut.emitRRX(IsPtr64 ? Mips::DADDiu : Mips::ADDiu, TmpReg, TmpReg,
+ MCOperand::createExpr(LoExpr), IDLoc, STI);
if (UseSrcReg)
- TOut.emitRRR(Mips::DADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
+ TOut.emitRRR(IsPtr64 ? Mips::DADDu : Mips::ADDu, DstReg, TmpReg, SrcReg,
+ IDLoc, STI);
return false;
}
@@ -3289,10 +3294,43 @@ bool MipsAsmParser::emitPartialAddress(MipsTargetStreamer &TOut, SMLoc IDLoc,
return false;
}
-bool MipsAsmParser::expandLoadImmReal(MCInst &Inst, bool IsSingle, bool IsGPR,
- bool Is64FPU, SMLoc IDLoc,
- MCStreamer &Out,
- const MCSubtargetInfo *STI) {
+static uint64_t convertIntToDoubleImm(uint64_t ImmOp64) {
+ // If ImmOp64 is AsmToken::Integer type (all bits set to zero in the
+ // exponent field), convert it to double (e.g. 1 to 1.0)
+ if ((Hi_32(ImmOp64) & 0x7ff00000) == 0) {
+ APFloat RealVal(APFloat::IEEEdouble(), ImmOp64);
+ ImmOp64 = RealVal.bitcastToAPInt().getZExtValue();
+ }
+ return ImmOp64;
+}
+
+static uint32_t covertDoubleImmToSingleImm(uint64_t ImmOp64) {
+ // Conversion of a double in an uint64_t to a float in a uint32_t,
+ // retaining the bit pattern of a float.
+ double DoubleImm = BitsToDouble(ImmOp64);
+ float TmpFloat = static_cast<float>(DoubleImm);
+ return FloatToBits(TmpFloat);
+}
+
+bool MipsAsmParser::expandLoadSingleImmToGPR(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ assert(Inst.getNumOperands() == 2 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
+ "Invalid instruction operand.");
+
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ uint64_t ImmOp64 = Inst.getOperand(1).getImm();
+
+ uint32_t ImmOp32 = covertDoubleImmToSingleImm(convertIntToDoubleImm(ImmOp64));
+
+ return loadImmediate(ImmOp32, FirstReg, Mips::NoRegister, true, false, IDLoc,
+ Out, STI);
+}
+
+bool MipsAsmParser::expandLoadSingleImmToFPR(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
MipsTargetStreamer &TOut = getTargetStreamer();
assert(Inst.getNumOperands() == 2 && "Invalid operand count");
assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
@@ -3301,166 +3339,184 @@ bool MipsAsmParser::expandLoadImmReal(MCInst &Inst, bool IsSingle, bool IsGPR,
unsigned FirstReg = Inst.getOperand(0).getReg();
uint64_t ImmOp64 = Inst.getOperand(1).getImm();
- uint32_t HiImmOp64 = (ImmOp64 & 0xffffffff00000000) >> 32;
- // If ImmOp64 is AsmToken::Integer type (all bits set to zero in the
- // exponent field), convert it to double (e.g. 1 to 1.0)
- if ((HiImmOp64 & 0x7ff00000) == 0) {
- APFloat RealVal(APFloat::IEEEdouble(), ImmOp64);
- ImmOp64 = RealVal.bitcastToAPInt().getZExtValue();
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
+
+ uint32_t ImmOp32 = covertDoubleImmToSingleImm(ImmOp64);
+
+ unsigned TmpReg = Mips::ZERO;
+ if (ImmOp32 != 0) {
+ TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
+ return true;
}
- uint32_t LoImmOp64 = ImmOp64 & 0xffffffff;
- HiImmOp64 = (ImmOp64 & 0xffffffff00000000) >> 32;
+ if (Lo_32(ImmOp64) == 0) {
+ if (TmpReg != Mips::ZERO && loadImmediate(ImmOp32, TmpReg, Mips::NoRegister,
+ true, false, IDLoc, Out, STI))
+ return true;
+ TOut.emitRR(Mips::MTC1, FirstReg, TmpReg, IDLoc, STI);
+ return false;
+ }
- if (IsSingle) {
- // Conversion of a double in an uint64_t to a float in a uint32_t,
- // retaining the bit pattern of a float.
- uint32_t ImmOp32;
- double doubleImm = BitsToDouble(ImmOp64);
- float tmp_float = static_cast<float>(doubleImm);
- ImmOp32 = FloatToBits(tmp_float);
+ MCSection *CS = getStreamer().getCurrentSectionOnly();
+ // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
+ // where appropriate.
+ MCSection *ReadOnlySection =
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
- if (IsGPR) {
- if (loadImmediate(ImmOp32, FirstReg, Mips::NoRegister, true, true, IDLoc,
- Out, STI))
- return true;
- return false;
- } else {
- unsigned ATReg = getATReg(IDLoc);
- if (!ATReg)
- return true;
- if (LoImmOp64 == 0) {
- if (loadImmediate(ImmOp32, ATReg, Mips::NoRegister, true, true, IDLoc,
- Out, STI))
- return true;
- TOut.emitRR(Mips::MTC1, FirstReg, ATReg, IDLoc, STI);
- return false;
- }
+ MCSymbol *Sym = getContext().createTempSymbol();
+ const MCExpr *LoSym =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
- MCSection *CS = getStreamer().getCurrentSectionOnly();
- // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
- // where appropriate.
- MCSection *ReadOnlySection = getContext().getELFSection(
- ".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ getStreamer().SwitchSection(ReadOnlySection);
+ getStreamer().EmitLabel(Sym, IDLoc);
+ getStreamer().EmitIntValue(ImmOp32, 4);
+ getStreamer().SwitchSection(CS);
- MCSymbol *Sym = getContext().createTempSymbol();
- const MCExpr *LoSym =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- const MipsMCExpr *LoExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+ if (emitPartialAddress(TOut, IDLoc, Sym))
+ return true;
+ TOut.emitRRX(Mips::LWC1, FirstReg, TmpReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
+ return false;
+}
+
+bool MipsAsmParser::expandLoadDoubleImmToGPR(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ assert(Inst.getNumOperands() == 2 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
+ "Invalid instruction operand.");
+
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ uint64_t ImmOp64 = Inst.getOperand(1).getImm();
+
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
- getStreamer().SwitchSection(ReadOnlySection);
- getStreamer().EmitLabel(Sym, IDLoc);
- getStreamer().EmitIntValue(ImmOp32, 4);
- getStreamer().SwitchSection(CS);
+ if (Lo_32(ImmOp64) == 0) {
+ if (isGP64bit()) {
+ if (loadImmediate(ImmOp64, FirstReg, Mips::NoRegister, false, false,
+ IDLoc, Out, STI))
+ return true;
+ } else {
+ if (loadImmediate(Hi_32(ImmOp64), FirstReg, Mips::NoRegister, true, false,
+ IDLoc, Out, STI))
+ return true;
- if(emitPartialAddress(TOut, IDLoc, Sym))
+ if (loadImmediate(0, nextReg(FirstReg), Mips::NoRegister, true, false,
+ IDLoc, Out, STI))
return true;
- TOut.emitRRX(Mips::LWC1, FirstReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
}
return false;
}
- // if(!IsSingle)
- unsigned ATReg = getATReg(IDLoc);
- if (!ATReg)
+ MCSection *CS = getStreamer().getCurrentSectionOnly();
+ MCSection *ReadOnlySection =
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+
+ MCSymbol *Sym = getContext().createTempSymbol();
+ const MCExpr *LoSym =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+
+ getStreamer().SwitchSection(ReadOnlySection);
+ getStreamer().EmitLabel(Sym, IDLoc);
+ getStreamer().EmitValueToAlignment(8);
+ getStreamer().EmitIntValue(ImmOp64, 8);
+ getStreamer().SwitchSection(CS);
+
+ unsigned TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
return true;
- if (IsGPR) {
- if (LoImmOp64 == 0) {
- if(isABI_N32() || isABI_N64()) {
- if (loadImmediate(HiImmOp64, FirstReg, Mips::NoRegister, false, true,
- IDLoc, Out, STI))
- return true;
- return false;
- } else {
- if (loadImmediate(HiImmOp64, FirstReg, Mips::NoRegister, true, true,
- IDLoc, Out, STI))
- return true;
+ if (emitPartialAddress(TOut, IDLoc, Sym))
+ return true;
- if (loadImmediate(0, nextReg(FirstReg), Mips::NoRegister, true, true,
- IDLoc, Out, STI))
- return true;
- return false;
- }
- }
+ TOut.emitRRX(isABI_N64() ? Mips::DADDiu : Mips::ADDiu, TmpReg, TmpReg,
+ MCOperand::createExpr(LoExpr), IDLoc, STI);
- MCSection *CS = getStreamer().getCurrentSectionOnly();
- MCSection *ReadOnlySection = getContext().getELFSection(
- ".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ if (isGP64bit())
+ TOut.emitRRI(Mips::LD, FirstReg, TmpReg, 0, IDLoc, STI);
+ else {
+ TOut.emitRRI(Mips::LW, FirstReg, TmpReg, 0, IDLoc, STI);
+ TOut.emitRRI(Mips::LW, nextReg(FirstReg), TmpReg, 4, IDLoc, STI);
+ }
+ return false;
+}
- MCSymbol *Sym = getContext().createTempSymbol();
- const MCExpr *LoSym =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- const MipsMCExpr *LoExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+bool MipsAsmParser::expandLoadDoubleImmToFPR(MCInst &Inst, bool Is64FPU,
+ SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ assert(Inst.getNumOperands() == 2 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isImm() &&
+ "Invalid instruction operand.");
- getStreamer().SwitchSection(ReadOnlySection);
- getStreamer().EmitLabel(Sym, IDLoc);
- getStreamer().EmitIntValue(HiImmOp64, 4);
- getStreamer().EmitIntValue(LoImmOp64, 4);
- getStreamer().SwitchSection(CS);
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ uint64_t ImmOp64 = Inst.getOperand(1).getImm();
+
+ ImmOp64 = convertIntToDoubleImm(ImmOp64);
- if(emitPartialAddress(TOut, IDLoc, Sym))
+ unsigned TmpReg = Mips::ZERO;
+ if (ImmOp64 != 0) {
+ TmpReg = getATReg(IDLoc);
+ if (!TmpReg)
return true;
- if(isABI_N64())
- TOut.emitRRX(Mips::DADDiu, ATReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
- else
- TOut.emitRRX(Mips::ADDiu, ATReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
+ }
- if(isABI_N32() || isABI_N64())
- TOut.emitRRI(Mips::LD, FirstReg, ATReg, 0, IDLoc, STI);
- else {
- TOut.emitRRI(Mips::LW, FirstReg, ATReg, 0, IDLoc, STI);
- TOut.emitRRI(Mips::LW, nextReg(FirstReg), ATReg, 4, IDLoc, STI);
- }
- return false;
- } else { // if(!IsGPR && !IsSingle)
- if ((LoImmOp64 == 0) &&
- !((HiImmOp64 & 0xffff0000) && (HiImmOp64 & 0x0000ffff))) {
- // FIXME: In the case where the constant is zero, we can load the
- // register directly from the zero register.
- if (loadImmediate(HiImmOp64, ATReg, Mips::NoRegister, true, true, IDLoc,
+ if ((Lo_32(ImmOp64) == 0) &&
+ !((Hi_32(ImmOp64) & 0xffff0000) && (Hi_32(ImmOp64) & 0x0000ffff))) {
+ if (isGP64bit()) {
+ if (TmpReg != Mips::ZERO &&
+ loadImmediate(ImmOp64, TmpReg, Mips::NoRegister, false, false, IDLoc,
Out, STI))
return true;
- if (isABI_N32() || isABI_N64())
- TOut.emitRR(Mips::DMTC1, FirstReg, ATReg, IDLoc, STI);
- else if (hasMips32r2()) {
- TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
- TOut.emitRRR(Mips::MTHC1_D32, FirstReg, FirstReg, ATReg, IDLoc, STI);
- } else {
- TOut.emitRR(Mips::MTC1, nextReg(FirstReg), ATReg, IDLoc, STI);
- TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
- }
+ TOut.emitRR(Mips::DMTC1, FirstReg, TmpReg, IDLoc, STI);
return false;
}
- MCSection *CS = getStreamer().getCurrentSectionOnly();
- // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
- // where appropriate.
- MCSection *ReadOnlySection = getContext().getELFSection(
- ".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ if (TmpReg != Mips::ZERO &&
+ loadImmediate(Hi_32(ImmOp64), TmpReg, Mips::NoRegister, true, false,
+ IDLoc, Out, STI))
+ return true;
- MCSymbol *Sym = getContext().createTempSymbol();
- const MCExpr *LoSym =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- const MipsMCExpr *LoExpr =
- MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+ if (hasMips32r2()) {
+ TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
+ TOut.emitRRR(Mips::MTHC1_D32, FirstReg, FirstReg, TmpReg, IDLoc, STI);
+ } else {
+ TOut.emitRR(Mips::MTC1, nextReg(FirstReg), TmpReg, IDLoc, STI);
+ TOut.emitRR(Mips::MTC1, FirstReg, Mips::ZERO, IDLoc, STI);
+ }
+ return false;
+ }
- getStreamer().SwitchSection(ReadOnlySection);
- getStreamer().EmitLabel(Sym, IDLoc);
- getStreamer().EmitIntValue(HiImmOp64, 4);
- getStreamer().EmitIntValue(LoImmOp64, 4);
- getStreamer().SwitchSection(CS);
+ MCSection *CS = getStreamer().getCurrentSectionOnly();
+ // FIXME: Enhance this expansion to use the .lit4 & .lit8 sections
+ // where appropriate.
+ MCSection *ReadOnlySection =
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+
+ MCSymbol *Sym = getContext().createTempSymbol();
+ const MCExpr *LoSym =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, LoSym, getContext());
+
+ getStreamer().SwitchSection(ReadOnlySection);
+ getStreamer().EmitLabel(Sym, IDLoc);
+ getStreamer().EmitValueToAlignment(8);
+ getStreamer().EmitIntValue(ImmOp64, 8);
+ getStreamer().SwitchSection(CS);
+
+ if (emitPartialAddress(TOut, IDLoc, Sym))
+ return true;
+
+ TOut.emitRRX(Is64FPU ? Mips::LDC164 : Mips::LDC1, FirstReg, TmpReg,
+ MCOperand::createExpr(LoExpr), IDLoc, STI);
- if(emitPartialAddress(TOut, IDLoc, Sym))
- return true;
- TOut.emitRRX(Is64FPU ? Mips::LDC164 : Mips::LDC1, FirstReg, ATReg,
- MCOperand::createExpr(LoExpr), IDLoc, STI);
- }
return false;
}
@@ -3489,7 +3545,7 @@ bool MipsAsmParser::expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
} else {
if (!isInt<17>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (OffsetToAlignment(Offset.getImm(), 1LL << 1))
+ if (offsetToAlignment(Offset.getImm(), Align(2)))
return Error(IDLoc, "branch to misaligned address");
Inst.clear();
Inst.setOpcode(Mips::BEQ_MM);
@@ -3581,7 +3637,6 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
assert(DstRegOp.isReg() && "expected register operand kind");
const MCOperand &BaseRegOp = Inst.getOperand(1);
assert(BaseRegOp.isReg() && "expected register operand kind");
- const MCOperand &OffsetOp = Inst.getOperand(2);
MipsTargetStreamer &TOut = getTargetStreamer();
unsigned DstReg = DstRegOp.getReg();
@@ -3603,6 +3658,26 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
return;
}
+ if (Inst.getNumOperands() > 3) {
+ const MCOperand &BaseRegOp = Inst.getOperand(2);
+ assert(BaseRegOp.isReg() && "expected register operand kind");
+ const MCOperand &ExprOp = Inst.getOperand(3);
+ assert(ExprOp.isExpr() && "expected expression oprand kind");
+
+ unsigned BaseReg = BaseRegOp.getReg();
+ const MCExpr *ExprOffset = ExprOp.getExpr();
+
+ MCOperand LoOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
+ MCOperand HiOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_HI, ExprOffset, getContext()));
+ TOut.emitSCWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
+ LoOperand, TmpReg, IDLoc, STI);
+ return;
+ }
+
+ const MCOperand &OffsetOp = Inst.getOperand(2);
+
if (OffsetOp.isImm()) {
int64_t LoOffset = OffsetOp.getImm() & 0xffff;
int64_t HiOffset = OffsetOp.getImm() & ~0xffff;
@@ -3625,21 +3700,54 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg,
BaseReg, IDLoc, STI);
TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, LoOffset, IDLoc, STI);
- } else {
- assert(OffsetOp.isExpr() && "expected expression operand kind");
- const MCExpr *ExprOffset = OffsetOp.getExpr();
- MCOperand LoOperand = MCOperand::createExpr(
- MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
- MCOperand HiOperand = MCOperand::createExpr(
- MipsMCExpr::create(MipsMCExpr::MEK_HI, ExprOffset, getContext()));
+ return;
+ }
+
+ if (OffsetOp.isExpr()) {
+ if (inPicMode()) {
+ // FIXME:
+ // c) Check that immediates of R_MIPS_GOT16/R_MIPS_LO16 relocations
+ // do not exceed 16-bit.
+ // d) Use R_MIPS_GOT_PAGE/R_MIPS_GOT_OFST relocations instead
+ // of R_MIPS_GOT_DISP in appropriate cases to reduce number
+ // of GOT entries.
+ MCValue Res;
+ if (!OffsetOp.getExpr()->evaluateAsRelocatable(Res, nullptr, nullptr)) {
+ Error(IDLoc, "expected relocatable expression");
+ return;
+ }
+ if (Res.getSymB() != nullptr) {
+ Error(IDLoc, "expected relocatable expression with only one symbol");
+ return;
+ }
- if (IsLoad)
- TOut.emitLoadWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
- LoOperand, TmpReg, IDLoc, STI);
- else
- TOut.emitStoreWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
- LoOperand, TmpReg, IDLoc, STI);
+ loadAndAddSymbolAddress(Res.getSymA(), TmpReg, BaseReg,
+ !ABI.ArePtrs64bit(), IDLoc, Out, STI);
+ TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, Res.getConstant(), IDLoc,
+ STI);
+ } else {
+ // FIXME: Implement 64-bit case.
+ // 1) lw $8, sym => lui $8, %hi(sym)
+ // lw $8, %lo(sym)($8)
+ // 2) sw $8, sym => lui $at, %hi(sym)
+ // sw $8, %lo(sym)($at)
+ const MCExpr *ExprOffset = OffsetOp.getExpr();
+ MCOperand LoOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
+ MCOperand HiOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_HI, ExprOffset, getContext()));
+
+ // Generate the base address in TmpReg.
+ TOut.emitRX(Mips::LUi, TmpReg, HiOperand, IDLoc, STI);
+ if (BaseReg != Mips::ZERO)
+ TOut.emitRRR(Mips::ADDu, TmpReg, TmpReg, BaseReg, IDLoc, STI);
+ // Emit the load or store with the adjusted base and offset.
+ TOut.emitRRX(Inst.getOpcode(), DstReg, TmpReg, LoOperand, IDLoc, STI);
+ }
+ return;
}
+
+ llvm_unreachable("unexpected operand type");
}
bool MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
@@ -6976,7 +7084,7 @@ bool MipsAsmParser::parseSetPushDirective() {
// Create a copy of the current assembler options environment and push it.
AssemblerOptions.push_back(
- llvm::make_unique<MipsAssemblerOptions>(AssemblerOptions.back().get()));
+ std::make_unique<MipsAssemblerOptions>(AssemblerOptions.back().get()));
getTargetStreamer().emitDirectiveSetPush();
return false;
diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index ef13507fe63a..c3e98fe410c1 100644
--- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -267,6 +267,13 @@ static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+// DecodeJumpTargetXMM - Decode microMIPS jump and link exchange target,
+// which is shifted left by 2 bit.
+static DecodeStatus DecodeJumpTargetXMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeMem(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -2291,6 +2298,15 @@ static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeJumpTargetXMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned JumpOffset = fieldFromInstruction(Insn, 0, 26) << 2;
+ Inst.addOperand(MCOperand::createImm(JumpOffset));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeAddiur2Simm7(MCInst &Inst,
unsigned Value,
uint64_t Address,
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index 859f9cbbca07..70f2a7bdf10f 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -304,7 +304,6 @@ Optional<MCFixupKind> MipsAsmBackend::getFixupKind(StringRef Name) const {
return StringSwitch<Optional<MCFixupKind>>(Name)
.Case("R_MIPS_NONE", FK_NONE)
.Case("R_MIPS_32", FK_Data_4)
- .Case("R_MIPS_GOT_PAGE", (MCFixupKind)Mips::fixup_Mips_GOT_PAGE)
.Case("R_MIPS_CALL_HI16", (MCFixupKind)Mips::fixup_Mips_CALL_HI16)
.Case("R_MIPS_CALL_LO16", (MCFixupKind)Mips::fixup_Mips_CALL_LO16)
.Case("R_MIPS_CALL16", (MCFixupKind)Mips::fixup_Mips_CALL16)
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
index 4d7e36995ae4..cca75dfc45c2 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
@@ -66,9 +66,9 @@ public:
/// fixupNeedsRelaxation - Target specific predicate for whether a given
/// fixup requires the associated instruction to be relaxed.
- bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const override {
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
// FIXME.
llvm_unreachable("RelaxInstruction() unimplemented");
return false;
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index cf7bae98a27f..cc3168790b98 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -219,7 +219,7 @@ unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx,
const MCFixup &Fixup,
bool IsPCRel) const {
// Determine the type of the relocation.
- unsigned Kind = (unsigned)Fixup.getKind();
+ unsigned Kind = Fixup.getTargetKind();
switch (Kind) {
case FK_NONE:
@@ -690,6 +690,6 @@ llvm::createMipsELFObjectWriter(const Triple &TT, bool IsN32) {
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
bool IsN64 = TT.isArch64Bit() && !IsN32;
bool HasRelocationAddend = TT.isArch64Bit();
- return llvm::make_unique<MipsELFObjectWriter>(OSABI, HasRelocationAddend,
+ return std::make_unique<MipsELFObjectWriter>(OSABI, HasRelocationAddend,
IsN64);
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 759a7fdb32b8..142e9cebb79e 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -485,8 +485,11 @@ getJumpOffset16OpValue(const MCInst &MI, unsigned OpNo,
assert(MO.isExpr() &&
"getJumpOffset16OpValue expects only expressions or an immediate");
- // TODO: Push fixup.
- return 0;
+ const MCExpr *Expr = MO.getExpr();
+ Mips::Fixups FixupKind =
+ isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16 : Mips::fixup_Mips_LO16;
+ Fixups.push_back(MCFixup::create(0, Expr, MCFixupKind(FixupKind)));
+ return 0;
}
/// getJumpTargetOpValue - Return binary encoding of the jump
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
index ad5aff6552f6..a84ca8ccfb2d 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
@@ -10,11 +10,12 @@
#define LLVM_LIB_TARGET_MIPS_MCTARGETDESC_MIPSMCNACL_H
#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/Support/Alignment.h"
namespace llvm {
-// Log2 of the NaCl MIPS sandbox's instruction bundle size.
-static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u;
+// NaCl MIPS sandbox's instruction bundle size.
+static const Align MIPS_NACL_BUNDLE_ALIGN = Align(16);
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
bool *IsStore = nullptr);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index ddeec03ba784..79c47d1b6508 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -143,12 +143,15 @@ public:
return false;
switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) {
case MCOI::OPERAND_UNKNOWN:
- case MCOI::OPERAND_IMMEDIATE:
- // jal, bal ...
- Target = Inst.getOperand(NumOps - 1).getImm();
+ case MCOI::OPERAND_IMMEDIATE: {
+ // j, jal, jalx, jals
+ // Absolute branch within the current 256 MB-aligned region
+ uint64_t Region = Addr & ~uint64_t(0xfffffff);
+ Target = Region + Inst.getOperand(NumOps - 1).getImm();
return true;
+ }
case MCOI::OPERAND_PCREL:
- // b, j, beq ...
+ // b, beq ...
Target = Addr + Inst.getOperand(NumOps - 1).getImm();
return true;
default:
diff --git a/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
index c050db8a17fd..2d53750ad0ee 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
@@ -270,7 +270,7 @@ MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context,
S->getAssembler().setRelaxAll(true);
// Set bundle-alignment as required by the NaCl ABI for the target.
- S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN);
+ S->EmitBundleAlignMode(Log2(MIPS_NACL_BUNDLE_ALIGN));
return S;
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp b/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
index b4ebb9d18b72..3ff9c722484b 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
@@ -37,7 +37,7 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() {
Context.getELFSection(".MIPS.options", ELF::SHT_MIPS_OPTIONS,
ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP, 1, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(8);
+ Sec->setAlignment(Align(8));
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ELF::ODK_REGINFO, 1); // kind
@@ -55,7 +55,7 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() {
MCSectionELF *Sec = Context.getELFSection(".reginfo", ELF::SHT_MIPS_REGINFO,
ELF::SHF_ALLOC, 24, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(MTS->getABI().IsN32() ? 8 : 4);
+ Sec->setAlignment(MTS->getABI().IsN32() ? Align(8) : Align(4));
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ri_gprmask, 4);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index e3bdb3b140a8..b6dae9f6dea8 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -34,6 +34,15 @@ static cl::opt<bool> RoundSectionSizes(
cl::desc("Round section sizes up to the section alignment"), cl::Hidden);
} // end anonymous namespace
+static bool isMipsR6(const MCSubtargetInfo *STI) {
+ return STI->getFeatureBits()[Mips::FeatureMips32r6] ||
+ STI->getFeatureBits()[Mips::FeatureMips64r6];
+}
+
+static bool isMicroMips(const MCSubtargetInfo *STI) {
+ return STI->getFeatureBits()[Mips::FeatureMicroMips];
+}
+
MipsTargetStreamer::MipsTargetStreamer(MCStreamer &S)
: MCTargetStreamer(S), GPReg(Mips::GP), ModuleDirectiveAllowed(true) {
GPRInfoSet = FPRInfoSet = FrameInfoSet = false;
@@ -216,6 +225,19 @@ void MipsTargetStreamer::emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1,
emitRRX(Opcode, Reg0, Reg1, MCOperand::createReg(Reg2), IDLoc, STI);
}
+void MipsTargetStreamer::emitRRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1,
+ unsigned Reg2, MCOperand Op3, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createReg(Reg0));
+ TmpInst.addOperand(MCOperand::createReg(Reg1));
+ TmpInst.addOperand(MCOperand::createReg(Reg2));
+ TmpInst.addOperand(Op3);
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
void MipsTargetStreamer::emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1,
int16_t Imm, SMLoc IDLoc,
const MCSubtargetInfo *STI) {
@@ -264,8 +286,7 @@ void MipsTargetStreamer::emitEmptyDelaySlot(bool hasShortDelaySlot, SMLoc IDLoc,
}
void MipsTargetStreamer::emitNop(SMLoc IDLoc, const MCSubtargetInfo *STI) {
- const FeatureBitset &Features = STI->getFeatureBits();
- if (Features[Mips::FeatureMicroMips])
+ if (isMicroMips(STI))
emitRR(Mips::MOVE16_MM, Mips::ZERO, Mips::ZERO, IDLoc, STI);
else
emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
@@ -311,21 +332,34 @@ void MipsTargetStreamer::emitStoreWithImmOffset(
emitRRI(Opcode, SrcReg, ATReg, LoOffset, IDLoc, STI);
}
-/// Emit a store instruction with an symbol offset. Symbols are assumed to be
-/// out of range for a simm16 will be expanded to appropriate instructions.
-void MipsTargetStreamer::emitStoreWithSymOffset(
- unsigned Opcode, unsigned SrcReg, unsigned BaseReg, MCOperand &HiOperand,
- MCOperand &LoOperand, unsigned ATReg, SMLoc IDLoc,
- const MCSubtargetInfo *STI) {
- // sw $8, sym => lui $at, %hi(sym)
- // sw $8, %lo(sym)($at)
+/// Emit a store instruction with an symbol offset.
+void MipsTargetStreamer::emitSCWithSymOffset(unsigned Opcode, unsigned SrcReg,
+ unsigned BaseReg,
+ MCOperand &HiOperand,
+ MCOperand &LoOperand,
+ unsigned ATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ // sc $8, sym => lui $at, %hi(sym)
+ // sc $8, %lo(sym)($at)
// Generate the base address in ATReg.
emitRX(Mips::LUi, ATReg, HiOperand, IDLoc, STI);
- if (BaseReg != Mips::ZERO)
- emitRRR(Mips::ADDu, ATReg, ATReg, BaseReg, IDLoc, STI);
- // Emit the store with the adjusted base and offset.
- emitRRX(Opcode, SrcReg, ATReg, LoOperand, IDLoc, STI);
+ if (!isMicroMips(STI) && isMipsR6(STI)) {
+ // For non-micromips r6 offset for 'sc' is not in the lower 16 bits so we
+ // put it in 'at'.
+ // sc $8, sym => lui $at, %hi(sym)
+ // addiu $at, $at, %lo(sym)
+ // sc $8, 0($at)
+ emitRRX(Mips::ADDiu, ATReg, ATReg, LoOperand, IDLoc, STI);
+ MCOperand Offset = MCOperand::createImm(0);
+ // Emit the store with the adjusted base and offset.
+ emitRRRX(Opcode, SrcReg, SrcReg, ATReg, Offset, IDLoc, STI);
+ } else {
+ if (BaseReg != Mips::ZERO)
+ emitRRR(Mips::ADDu, ATReg, ATReg, BaseReg, IDLoc, STI);
+ // Emit the store with the adjusted base and offset.
+ emitRRRX(Opcode, SrcReg, SrcReg, ATReg, LoOperand, IDLoc, STI);
+ }
}
/// Emit a load instruction with an immediate offset. DstReg and TmpReg are
@@ -364,30 +398,6 @@ void MipsTargetStreamer::emitLoadWithImmOffset(unsigned Opcode, unsigned DstReg,
emitRRI(Opcode, DstReg, TmpReg, LoOffset, IDLoc, STI);
}
-/// Emit a load instruction with an symbol offset. Symbols are assumed to be
-/// out of range for a simm16 will be expanded to appropriate instructions.
-/// DstReg and TmpReg are permitted to be the same register iff DstReg is a
-/// GPR. It is the callers responsibility to identify such cases and pass the
-/// appropriate register in TmpReg.
-void MipsTargetStreamer::emitLoadWithSymOffset(unsigned Opcode, unsigned DstReg,
- unsigned BaseReg,
- MCOperand &HiOperand,
- MCOperand &LoOperand,
- unsigned TmpReg, SMLoc IDLoc,
- const MCSubtargetInfo *STI) {
- // 1) lw $8, sym => lui $8, %hi(sym)
- // lw $8, %lo(sym)($8)
- // 2) ldc1 $f0, sym => lui $at, %hi(sym)
- // ldc1 $f0, %lo(sym)($at)
-
- // Generate the base address in TmpReg.
- emitRX(Mips::LUi, TmpReg, HiOperand, IDLoc, STI);
- if (BaseReg != Mips::ZERO)
- emitRRR(Mips::ADDu, TmpReg, TmpReg, BaseReg, IDLoc, STI);
- // Emit the load with the adjusted base and offset.
- emitRRX(Opcode, DstReg, TmpReg, LoOperand, IDLoc, STI);
-}
-
MipsTargetAsmStreamer::MipsTargetAsmStreamer(MCStreamer &S,
formatted_raw_ostream &OS)
: MipsTargetStreamer(S), OS(OS) {}
@@ -891,9 +901,9 @@ void MipsTargetELFStreamer::finish() {
MCSection &BSSSection = *OFI.getBSSSection();
MCA.registerSection(BSSSection);
- TextSection.setAlignment(std::max(16u, TextSection.getAlignment()));
- DataSection.setAlignment(std::max(16u, DataSection.getAlignment()));
- BSSSection.setAlignment(std::max(16u, BSSSection.getAlignment()));
+ TextSection.setAlignment(Align(std::max(16u, TextSection.getAlignment())));
+ DataSection.setAlignment(Align(std::max(16u, DataSection.getAlignment())));
+ BSSSection.setAlignment(Align(std::max(16u, BSSSection.getAlignment())));
if (RoundSectionSizes) {
// Make sections sizes a multiple of the alignment. This is useful for
@@ -1016,7 +1026,7 @@ void MipsTargetELFStreamer::emitDirectiveEnd(StringRef Name) {
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Context);
MCA.registerSection(*Sec);
- Sec->setAlignment(4);
+ Sec->setAlignment(Align(4));
OS.PushSection();
@@ -1306,7 +1316,7 @@ void MipsTargetELFStreamer::emitMipsAbiFlags() {
MCSectionELF *Sec = Context.getELFSection(
".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS, ELF::SHF_ALLOC, 24, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(8);
+ Sec->setAlignment(Align(8));
OS.SwitchSection(Sec);
OS << ABIFlagsSection;
diff --git a/lib/Target/Mips/MicroMipsDSPInstrInfo.td b/lib/Target/Mips/MicroMipsDSPInstrInfo.td
index 5a12568893af..9a1e47e5ecca 100644
--- a/lib/Target/Mips/MicroMipsDSPInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsDSPInstrInfo.td
@@ -360,7 +360,7 @@ class RDDSP_MM_DESC {
dag OutOperandList = (outs GPR32Opnd:$rt);
dag InOperandList = (ins uimm7:$mask);
string AsmString = !strconcat("rddsp", "\t$rt, $mask");
- list<dag> Pattern = [(set GPR32Opnd:$rt, (int_mips_rddsp immZExt7:$mask))];
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (int_mips_rddsp timmZExt7:$mask))];
InstrItinClass Itinerary = NoItinerary;
}
@@ -383,7 +383,7 @@ class WRDSP_MM_DESC {
dag OutOperandList = (outs);
dag InOperandList = (ins GPR32Opnd:$rt, uimm7:$mask);
string AsmString = !strconcat("wrdsp", "\t$rt, $mask");
- list<dag> Pattern = [(int_mips_wrdsp GPR32Opnd:$rt, immZExt7:$mask)];
+ list<dag> Pattern = [(int_mips_wrdsp GPR32Opnd:$rt, timmZExt7:$mask)];
InstrItinClass Itinerary = NoItinerary;
bit isMoveReg = 1;
}
diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td
index 9b7f7b25fa94..8cc0029fc896 100644
--- a/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -955,17 +955,18 @@ let DecoderNamespace = "MicroMips" in {
EXT_FM_MM<0x0c>, ISA_MICROMIPS32_NOT_MIPS32R6;
/// Jump Instructions
- let DecoderMethod = "DecodeJumpTargetMM" in
+ let DecoderMethod = "DecodeJumpTargetMM" in {
def J_MM : MMRel, JumpFJ<jmptarget_mm, "j", br, bb, "j">,
J_FM_MM<0x35>, AdditionalRequires<[RelocNotPIC]>,
IsBranch, ISA_MICROMIPS32_NOT_MIPS32R6;
-
- let DecoderMethod = "DecodeJumpTargetMM" in {
def JAL_MM : MMRel, JumpLink<"jal", calltarget_mm>, J_FM_MM<0x3d>,
ISA_MICROMIPS32_NOT_MIPS32R6;
+ }
+
+ let DecoderMethod = "DecodeJumpTargetXMM" in
def JALX_MM : MMRel, JumpLink<"jalx", calltarget>, J_FM_MM<0x3c>,
ISA_MICROMIPS32_NOT_MIPS32R6;
- }
+
def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>,
ISA_MICROMIPS32_NOT_MIPS32R6;
def JALR_MM : JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>,
diff --git a/lib/Target/Mips/MicroMipsSizeReduction.cpp b/lib/Target/Mips/MicroMipsSizeReduction.cpp
index 70af95592aa5..db93b3d80ede 100644
--- a/lib/Target/Mips/MicroMipsSizeReduction.cpp
+++ b/lib/Target/Mips/MicroMipsSizeReduction.cpp
@@ -361,7 +361,7 @@ static bool CheckXWPInstr(MachineInstr *MI, bool ReduceToLwp,
MI->getOpcode() == Mips::SW16_MM))
return false;
- unsigned reg = MI->getOperand(0).getReg();
+ Register reg = MI->getOperand(0).getReg();
if (reg == Mips::RA)
return false;
@@ -403,8 +403,8 @@ static bool ConsecutiveInstr(MachineInstr *MI1, MachineInstr *MI2) {
if (!GetImm(MI2, 2, Offset2))
return false;
- unsigned Reg1 = MI1->getOperand(0).getReg();
- unsigned Reg2 = MI2->getOperand(0).getReg();
+ Register Reg1 = MI1->getOperand(0).getReg();
+ Register Reg2 = MI2->getOperand(0).getReg();
return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2)));
}
@@ -475,8 +475,8 @@ bool MicroMipsSizeReduce::ReduceXWtoXWP(ReduceEntryFunArgs *Arguments) {
if (!CheckXWPInstr(MI2, ReduceToLwp, Entry))
return false;
- unsigned Reg1 = MI1->getOperand(1).getReg();
- unsigned Reg2 = MI2->getOperand(1).getReg();
+ Register Reg1 = MI1->getOperand(1).getReg();
+ Register Reg2 = MI2->getOperand(1).getReg();
if (Reg1 != Reg2)
return false;
@@ -621,8 +621,8 @@ bool MicroMipsSizeReduce::ReduceMoveToMovep(ReduceEntryFunArgs *Arguments) {
MachineInstr *MI1 = Arguments->MI;
MachineInstr *MI2 = &*NextMII;
- unsigned RegDstMI1 = MI1->getOperand(0).getReg();
- unsigned RegSrcMI1 = MI1->getOperand(1).getReg();
+ Register RegDstMI1 = MI1->getOperand(0).getReg();
+ Register RegSrcMI1 = MI1->getOperand(1).getReg();
if (!IsMovepSrcRegister(RegSrcMI1))
return false;
@@ -633,8 +633,8 @@ bool MicroMipsSizeReduce::ReduceMoveToMovep(ReduceEntryFunArgs *Arguments) {
if (MI2->getOpcode() != Entry.WideOpc())
return false;
- unsigned RegDstMI2 = MI2->getOperand(0).getReg();
- unsigned RegSrcMI2 = MI2->getOperand(1).getReg();
+ Register RegDstMI2 = MI2->getOperand(0).getReg();
+ Register RegSrcMI2 = MI2->getOperand(1).getReg();
if (!IsMovepSrcRegister(RegSrcMI2))
return false;
diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td
index 7b83ea8535ae..a5908362e81f 100644
--- a/lib/Target/Mips/Mips.td
+++ b/lib/Target/Mips/Mips.td
@@ -25,6 +25,8 @@ class PredicateControl {
list<Predicate> GPRPredicates = [];
// Predicates for the PTR size such as IsPTR64bit
list<Predicate> PTRPredicates = [];
+ // Predicates for a symbol's size such as hasSym32.
+ list<Predicate> SYMPredicates = [];
// Predicates for the FGR size and layout such as IsFP64bit
list<Predicate> FGRPredicates = [];
// Predicates for the instruction group membership such as ISA's.
@@ -38,6 +40,7 @@ class PredicateControl {
list<Predicate> Predicates = !listconcat(EncodingPredicates,
GPRPredicates,
PTRPredicates,
+ SYMPredicates,
FGRPredicates,
InsnPredicates,
HardFloatPredicate,
@@ -206,6 +209,9 @@ def FeatureMT : SubtargetFeature<"mt", "HasMT", "true", "Mips MT ASE">;
def FeatureLongCalls : SubtargetFeature<"long-calls", "UseLongCalls", "true",
"Disable use of the jal instruction">;
+def FeatureXGOT
+ : SubtargetFeature<"xgot", "UseXGOT", "true", "Assume 32-bit GOT">;
+
def FeatureUseIndirectJumpsHazard : SubtargetFeature<"use-indirect-jump-hazard",
"UseIndirectJumpsHazard",
"true", "Use indirect jump"
@@ -257,3 +263,9 @@ def Mips : Target {
let AssemblyParserVariants = [MipsAsmParserVariant];
let AllowRegisterRenaming = 1;
}
+
+//===----------------------------------------------------------------------===//
+// Pfm Counters
+//===----------------------------------------------------------------------===//
+
+include "MipsPfmCounters.td"
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index 3ab4f1e064da..768d54fc9c24 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -72,7 +72,7 @@ void Mips16DAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
MachineRegisterInfo &RegInfo = MF.getRegInfo();
const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
DebugLoc DL;
- unsigned V0, V1, V2, GlobalBaseReg = MipsFI->getGlobalBaseReg();
+ Register V0, V1, V2, GlobalBaseReg = MipsFI->getGlobalBaseReg();
const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
V0 = RegInfo.createVirtualRegister(RC);
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index 6d8e5aef2a3f..5a5b78c9d5f9 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -708,8 +708,8 @@ Mips16TargetLowering::emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc,
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned regX = MI.getOperand(0).getReg();
- unsigned regY = MI.getOperand(1).getReg();
+ Register regX = MI.getOperand(0).getReg();
+ Register regY = MI.getOperand(1).getReg();
MachineBasicBlock *target = MI.getOperand(2).getMBB();
BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(CmpOpc))
.addReg(regX)
@@ -725,7 +725,7 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned regX = MI.getOperand(0).getReg();
+ Register regX = MI.getOperand(0).getReg();
int64_t imm = MI.getOperand(1).getImm();
MachineBasicBlock *target = MI.getOperand(2).getMBB();
unsigned CmpOpc;
@@ -758,9 +758,9 @@ Mips16TargetLowering::emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr &MI,
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned CC = MI.getOperand(0).getReg();
- unsigned regX = MI.getOperand(1).getReg();
- unsigned regY = MI.getOperand(2).getReg();
+ Register CC = MI.getOperand(0).getReg();
+ Register regX = MI.getOperand(1).getReg();
+ Register regY = MI.getOperand(2).getReg();
BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc))
.addReg(regX)
.addReg(regY);
@@ -777,8 +777,8 @@ Mips16TargetLowering::emitFEXT_CCRXI16_ins(unsigned SltiOpc, unsigned SltiXOpc,
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned CC = MI.getOperand(0).getReg();
- unsigned regX = MI.getOperand(1).getReg();
+ Register CC = MI.getOperand(0).getReg();
+ Register regX = MI.getOperand(1).getReg();
int64_t Imm = MI.getOperand(2).getImm();
unsigned SltOpc = Mips16WhichOp8uOr16simm(SltiOpc, SltiXOpc, Imm);
BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc)).addReg(regX).addImm(Imm);
diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp
index c234c309d760..0d735c20ec2f 100644
--- a/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -358,7 +358,7 @@ unsigned Mips16InstrInfo::loadImmediate(unsigned FrameReg, int64_t Imm,
for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
MachineOperand &MO = II->getOperand(i);
if (MO.isReg() && MO.getReg() != 0 && !MO.isDef() &&
- !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ !Register::isVirtualRegister(MO.getReg()))
Candidates.reset(MO.getReg());
}
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index 7f35280f7936..cc15949b0d57 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -16,6 +16,7 @@
// shamt must fit in 6 bits.
def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
+def timmZExt6 : TImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
// Node immediate fits as 10-bit sign extended on target immediate.
// e.g. seqi, snei
@@ -651,6 +652,7 @@ def : MipsPat<(MipsTlsHi tglobaltlsaddr:$in), (LUi64 tglobaltlsaddr:$in)>,
let AdditionalPredicates = [NotInMicroMips] in {
def : MipsPat<(MipsJmpLink (i64 texternalsym:$dst)),
(JAL texternalsym:$dst)>, ISA_MIPS3, GPR_64, SYM_64;
+
def : MipsPat<(MipsHighest (i64 tglobaladdr:$in)),
(LUi64 tglobaladdr:$in)>, ISA_MIPS3, GPR_64, SYM_64;
def : MipsPat<(MipsHighest (i64 tblockaddress:$in)),
@@ -682,6 +684,20 @@ let AdditionalPredicates = [NotInMicroMips] in {
(DADDiu GPR64:$hi, tjumptable:$lo)>, ISA_MIPS3, GPR_64, SYM_64;
def : MipsPat<(add GPR64:$hi, (MipsHigher (i64 tconstpool:$lo))),
(DADDiu GPR64:$hi, tconstpool:$lo)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(add GPR64:$hi, (MipsHigher (i64 texternalsym:$lo))),
+ (DADDiu GPR64:$hi, texternalsym:$lo)>,
+ ISA_MIPS3, GPR_64, SYM_64;
+
+ def : MipsPat<(MipsHi (i64 tglobaladdr:$in)),
+ (DADDiu ZERO_64, tglobaladdr:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsHi (i64 tblockaddress:$in)),
+ (DADDiu ZERO_64, tblockaddress:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsHi (i64 tjumptable:$in)),
+ (DADDiu ZERO_64, tjumptable:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsHi (i64 tconstpool:$in)),
+ (DADDiu ZERO_64, tconstpool:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsHi (i64 texternalsym:$in)),
+ (DADDiu ZERO_64, texternalsym:$in)>, ISA_MIPS3, GPR_64, SYM_64;
def : MipsPat<(add GPR64:$hi, (MipsHi (i64 tglobaladdr:$lo))),
(DADDiu GPR64:$hi, tglobaladdr:$lo)>, ISA_MIPS3, GPR_64, SYM_64;
@@ -692,6 +708,23 @@ let AdditionalPredicates = [NotInMicroMips] in {
(DADDiu GPR64:$hi, tjumptable:$lo)>, ISA_MIPS3, GPR_64, SYM_64;
def : MipsPat<(add GPR64:$hi, (MipsHi (i64 tconstpool:$lo))),
(DADDiu GPR64:$hi, tconstpool:$lo)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(add GPR64:$hi, (MipsHi (i64 texternalsym:$lo))),
+ (DADDiu GPR64:$hi, texternalsym:$lo)>,
+ ISA_MIPS3, GPR_64, SYM_64;
+
+ def : MipsPat<(MipsLo (i64 tglobaladdr:$in)),
+ (DADDiu ZERO_64, tglobaladdr:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsLo (i64 tblockaddress:$in)),
+ (DADDiu ZERO_64, tblockaddress:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsLo (i64 tjumptable:$in)),
+ (DADDiu ZERO_64, tjumptable:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsLo (i64 tconstpool:$in)),
+ (DADDiu ZERO_64, tconstpool:$in)>, ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsLo (i64 tglobaltlsaddr:$in)),
+ (DADDiu ZERO_64, tglobaltlsaddr:$in)>,
+ ISA_MIPS3, GPR_64, SYM_64;
+ def : MipsPat<(MipsLo (i64 texternalsym:$in)),
+ (DADDiu ZERO_64, texternalsym:$in)>, ISA_MIPS3, GPR_64, SYM_64;
def : MipsPat<(add GPR64:$hi, (MipsLo (i64 tglobaladdr:$lo))),
(DADDiu GPR64:$hi, tglobaladdr:$lo)>, ISA_MIPS3, GPR_64, SYM_64;
@@ -705,6 +738,9 @@ let AdditionalPredicates = [NotInMicroMips] in {
def : MipsPat<(add GPR64:$hi, (MipsLo (i64 tglobaltlsaddr:$lo))),
(DADDiu GPR64:$hi, tglobaltlsaddr:$lo)>, ISA_MIPS3, GPR_64,
SYM_64;
+ def : MipsPat<(add GPR64:$hi, (MipsLo (i64 texternalsym:$lo))),
+ (DADDiu GPR64:$hi, texternalsym:$lo)>,
+ ISA_MIPS3, GPR_64, SYM_64;
}
// gp_rel relocs
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index db83fe49cec0..2201545adc94 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -56,6 +56,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <cstdint>
@@ -376,7 +377,7 @@ void MipsAsmPrinter::printSavedRegsBitmask() {
void MipsAsmPrinter::emitFrameDirective() {
const TargetRegisterInfo &RI = *MF->getSubtarget().getRegisterInfo();
- unsigned stackReg = RI.getFrameRegister(*MF);
+ Register stackReg = RI.getFrameRegister(*MF);
unsigned returnReg = RI.getRARegister();
unsigned stackSize = MF->getFrameInfo().getStackSize();
@@ -571,7 +572,7 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
// for 2 for 32 bit mode and 1 for 64 bit mode.
if (NumVals != 2) {
if (Subtarget->isGP64bit() && NumVals == 1 && MO.isReg()) {
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
O << '$' << MipsInstPrinter::getRegisterName(Reg);
return false;
}
@@ -597,7 +598,7 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
const MachineOperand &MO = MI->getOperand(RegOp);
if (!MO.isReg())
return true;
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
O << '$' << MipsInstPrinter::getRegisterName(Reg);
return false;
}
@@ -780,7 +781,7 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
StringRef CPU = MIPS_MC::selectMipsCPU(TT, TM.getTargetCPU());
StringRef FS = TM.getTargetFeatureString();
const MipsTargetMachine &MTM = static_cast<const MipsTargetMachine &>(TM);
- const MipsSubtarget STI(TT, CPU, FS, MTM.isLittleEndian(), MTM, 0);
+ const MipsSubtarget STI(TT, CPU, FS, MTM.isLittleEndian(), MTM, None);
bool IsABICalls = STI.isABICalls();
const MipsABIInfo &ABI = MTM.getABI();
@@ -821,6 +822,9 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// option has changed the default (i.e. FPXX) and omit it otherwise.
if (ABI.IsO32() && (!STI.useOddSPReg() || STI.isABI_FPXX()))
TS.emitDirectiveModuleOddSPReg();
+
+ // Switch to the .text section.
+ OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
}
void MipsAsmPrinter::emitInlineAsmStart() const {
diff --git a/lib/Target/Mips/MipsCallLowering.cpp b/lib/Target/Mips/MipsCallLowering.cpp
index da65689ecff5..cad82953af50 100644
--- a/lib/Target/Mips/MipsCallLowering.cpp
+++ b/lib/Target/Mips/MipsCallLowering.cpp
@@ -106,6 +106,7 @@ private:
Register ArgsReg, const EVT &VT) override;
virtual void markPhysRegUsed(unsigned PhysReg) {
+ MIRBuilder.getMRI()->addLiveIn(PhysReg);
MIRBuilder.getMBB().addLiveIn(PhysReg);
}
@@ -357,7 +358,7 @@ bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
return true;
}
-static bool isSupportedType(Type *T) {
+static bool isSupportedArgumentType(Type *T) {
if (T->isIntegerTy())
return true;
if (T->isPointerTy())
@@ -367,6 +368,18 @@ static bool isSupportedType(Type *T) {
return false;
}
+static bool isSupportedReturnType(Type *T) {
+ if (T->isIntegerTy())
+ return true;
+ if (T->isPointerTy())
+ return true;
+ if (T->isFloatingPointTy())
+ return true;
+ if (T->isAggregateType())
+ return true;
+ return false;
+}
+
static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
const ISD::ArgFlagsTy &Flags) {
// > does not mean loss of information as type RegisterVT can't hold type VT,
@@ -403,7 +416,7 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
- if (Val != nullptr && !isSupportedType(Val->getType()))
+ if (Val != nullptr && !isSupportedReturnType(Val->getType()))
return false;
if (!VRegs.empty()) {
@@ -411,21 +424,13 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Function &F = MF.getFunction();
const DataLayout &DL = MF.getDataLayout();
const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
- LLVMContext &Ctx = Val->getType()->getContext();
-
- SmallVector<EVT, 4> SplitEVTs;
- ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
- assert(VRegs.size() == SplitEVTs.size() &&
- "For each split Type there should be exactly one VReg.");
SmallVector<ArgInfo, 8> RetInfos;
SmallVector<unsigned, 8> OrigArgIndices;
- for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
- ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
- setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
- splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices);
- }
+ ArgInfo ArgRetInfo(VRegs, Val->getType());
+ setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F);
+ splitToValueTypes(DL, ArgRetInfo, 0, RetInfos, OrigArgIndices);
SmallVector<ISD::OutputArg, 8> Outs;
subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
@@ -453,12 +458,8 @@ bool MipsCallLowering::lowerFormalArguments(
if (F.arg_empty())
return true;
- if (F.isVarArg()) {
- return false;
- }
-
for (auto &Arg : F.args()) {
- if (!isSupportedType(Arg.getType()))
+ if (!isSupportedArgumentType(Arg.getType()))
return false;
}
@@ -472,7 +473,8 @@ bool MipsCallLowering::lowerFormalArguments(
for (auto &Arg : F.args()) {
ArgInfo AInfo(VRegs[i], Arg.getType());
setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
- splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices);
+ ArgInfos.push_back(AInfo);
+ OrigArgIndices.push_back(i);
++i;
}
@@ -495,30 +497,64 @@ bool MipsCallLowering::lowerFormalArguments(
if (!Handler.handle(ArgLocs, ArgInfos))
return false;
+ if (F.isVarArg()) {
+ ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
+
+ int VaArgOffset;
+ unsigned RegSize = 4;
+ if (ArgRegs.size() == Idx)
+ VaArgOffset = alignTo(CCInfo.getNextStackOffset(), RegSize);
+ else {
+ VaArgOffset =
+ (int)ABI.GetCalleeAllocdArgSizeInBytes(CCInfo.getCallingConv()) -
+ (int)(RegSize * (ArgRegs.size() - Idx));
+ }
+
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true);
+ MF.getInfo<MipsFunctionInfo>()->setVarArgsFrameIndex(FI);
+
+ for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) {
+ MIRBuilder.getMBB().addLiveIn(ArgRegs[I]);
+
+ MachineInstrBuilder Copy =
+ MIRBuilder.buildCopy(LLT::scalar(RegSize * 8), Register(ArgRegs[I]));
+ FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true);
+ MachinePointerInfo MPO = MachinePointerInfo::getFixedStack(MF, FI);
+ MachineInstrBuilder FrameIndex =
+ MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, RegSize,
+ /* Alignment */ RegSize);
+ MIRBuilder.buildStore(Copy, FrameIndex, *MMO);
+ }
+ }
+
return true;
}
bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
- CallingConv::ID CallConv,
- const MachineOperand &Callee,
- const ArgInfo &OrigRet,
- ArrayRef<ArgInfo> OrigArgs) const {
+ CallLoweringInfo &Info) const {
- if (CallConv != CallingConv::C)
+ if (Info.CallConv != CallingConv::C)
return false;
- for (auto &Arg : OrigArgs) {
- if (!isSupportedType(Arg.Ty))
+ for (auto &Arg : Info.OrigArgs) {
+ if (!isSupportedArgumentType(Arg.Ty))
+ return false;
+ if (Arg.Flags[0].isByVal())
return false;
- if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
+ if (Arg.Flags[0].isSRet() && !Arg.Ty->isPointerTy())
return false;
}
- if (OrigRet.Regs[0] && !isSupportedType(OrigRet.Ty))
+ if (!Info.OrigRet.Ty->isVoidTy() && !isSupportedReturnType(Info.OrigRet.Ty))
return false;
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
+ const DataLayout &DL = MF.getDataLayout();
const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
const MipsTargetMachine &TM =
static_cast<const MipsTargetMachine &>(MF.getTarget());
@@ -528,37 +564,38 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
const bool IsCalleeGlobalPIC =
- Callee.isGlobal() && TM.isPositionIndependent();
+ Info.Callee.isGlobal() && TM.isPositionIndependent();
MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(
- Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
+ Info.Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
MIB.addDef(Mips::SP, RegState::Implicit);
if (IsCalleeGlobalPIC) {
Register CalleeReg =
MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
MachineInstr *CalleeGlobalValue =
- MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal());
- if (!Callee.getGlobal()->hasLocalLinkage())
+ MIRBuilder.buildGlobalValue(CalleeReg, Info.Callee.getGlobal());
+ if (!Info.Callee.getGlobal()->hasLocalLinkage())
CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL);
MIB.addUse(CalleeReg);
} else
- MIB.add(Callee);
+ MIB.add(Info.Callee);
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
TargetLowering::ArgListTy FuncOrigArgs;
- FuncOrigArgs.reserve(OrigArgs.size());
+ FuncOrigArgs.reserve(Info.OrigArgs.size());
SmallVector<ArgInfo, 8> ArgInfos;
SmallVector<unsigned, 8> OrigArgIndices;
unsigned i = 0;
- for (auto &Arg : OrigArgs) {
+ for (auto &Arg : Info.OrigArgs) {
TargetLowering::ArgListEntry Entry;
Entry.Ty = Arg.Ty;
FuncOrigArgs.push_back(Entry);
- splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices);
+ ArgInfos.push_back(Arg);
+ OrigArgIndices.push_back(i);
++i;
}
@@ -566,11 +603,17 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
SmallVector<CCValAssign, 8> ArgLocs;
- MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
+ bool IsCalleeVarArg = false;
+ if (Info.Callee.isGlobal()) {
+ const Function *CF = static_cast<const Function *>(Info.Callee.getGlobal());
+ IsCalleeVarArg = CF->isVarArg();
+ }
+ MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs,
F.getContext());
- CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
- const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr;
+ CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1);
+ const char *Call =
+ Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr;
CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
setLocInfo(ArgLocs, Outs);
@@ -599,11 +642,11 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
*STI.getRegBankInfo());
}
- if (OrigRet.Regs[0]) {
+ if (!Info.OrigRet.Ty->isVoidTy()) {
ArgInfos.clear();
SmallVector<unsigned, 8> OrigRetIndices;
- splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices);
+ splitToValueTypes(DL, Info.OrigRet, 0, ArgInfos, OrigRetIndices);
SmallVector<ISD::InputArg, 8> Ins;
subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
@@ -612,7 +655,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
F.getContext());
- CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call);
+ CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty, Call);
setLocInfo(ArgLocs, Ins);
CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
@@ -642,12 +685,12 @@ void MipsCallLowering::subTargetRegTypeForCallingConv(
F.getContext(), F.getCallingConv(), VT);
for (unsigned i = 0; i < NumRegs; ++i) {
- ISD::ArgFlagsTy Flags = Arg.Flags;
+ ISD::ArgFlagsTy Flags = Arg.Flags[0];
if (i == 0)
Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
else
- Flags.setOrigAlign(1);
+ Flags.setOrigAlign(Align::None());
ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
0);
@@ -657,12 +700,21 @@ void MipsCallLowering::subTargetRegTypeForCallingConv(
}
void MipsCallLowering::splitToValueTypes(
- const ArgInfo &OrigArg, unsigned OriginalIndex,
+ const DataLayout &DL, const ArgInfo &OrigArg, unsigned OriginalIndex,
SmallVectorImpl<ArgInfo> &SplitArgs,
SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
- // TODO : perform structure and array split. For now we only deal with
- // types that pass isSupportedType check.
- SplitArgs.push_back(OrigArg);
- SplitArgsOrigIndices.push_back(OriginalIndex);
+ SmallVector<EVT, 4> SplitEVTs;
+ SmallVector<Register, 4> SplitVRegs;
+ const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
+ LLVMContext &Ctx = OrigArg.Ty->getContext();
+
+ ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitEVTs);
+
+ for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
+ ArgInfo Info = ArgInfo{OrigArg.Regs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
+ Info.Flags = OrigArg.Flags;
+ SplitArgs.push_back(Info);
+ SplitArgsOrigIndices.push_back(OriginalIndex);
+ }
}
diff --git a/lib/Target/Mips/MipsCallLowering.h b/lib/Target/Mips/MipsCallLowering.h
index 11c2d53ad35d..a284cf5e26cf 100644
--- a/lib/Target/Mips/MipsCallLowering.h
+++ b/lib/Target/Mips/MipsCallLowering.h
@@ -68,9 +68,8 @@ public:
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<ArrayRef<Register>> VRegs) const override;
- bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
- const MachineOperand &Callee, const ArgInfo &OrigRet,
- ArrayRef<ArgInfo> OrigArgs) const override;
+ bool lowerCall(MachineIRBuilder &MIRBuilder,
+ CallLoweringInfo &Info) const override;
private:
/// Based on registers available on target machine split or extend
@@ -83,7 +82,8 @@ private:
/// Split structures and arrays, save original argument indices since
/// Mips calling convention needs info about original argument type.
- void splitToValueTypes(const ArgInfo &OrigArg, unsigned OriginalIndex,
+ void splitToValueTypes(const DataLayout &DL, const ArgInfo &OrigArg,
+ unsigned OriginalIndex,
SmallVectorImpl<ArgInfo> &SplitArgs,
SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const;
};
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index eea28df7eda1..f50640521738 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -222,12 +222,7 @@ namespace {
BasicBlockInfo() = default;
- // FIXME: ignore LogAlign for this patch
- //
- unsigned postOffset(unsigned LogAlign = 0) const {
- unsigned PO = Offset + Size;
- return PO;
- }
+ unsigned postOffset() const { return Offset + Size; }
};
std::vector<BasicBlockInfo> BBInfo;
@@ -376,7 +371,7 @@ namespace {
void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- unsigned getCPELogAlign(const MachineInstr &CPEMI);
+ Align getCPEAlign(const MachineInstr &CPEMI);
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
unsigned getOffsetOf(MachineInstr *MI) const;
unsigned getUserOffset(CPUser&) const;
@@ -534,11 +529,11 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
- unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+ const Align MaxAlign(MCP->getConstantPoolAlignment());
// Mark the basic block as required by the const-pool.
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
- BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
+ BB->setAlignment(AlignConstantIslands ? MaxAlign : Align(4));
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
@@ -548,7 +543,8 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// alignment of all entries as long as BB is sufficiently aligned. Keep
// track of the insertion point for each alignment. We are going to bucket
// sort the entries as they are created.
- SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(Log2(MaxAlign) + 1,
+ BB->end());
// Add all of the constants from the constant pool to the end block, use an
// identity mapping of CPI's to CPE's.
@@ -576,7 +572,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// Ensure that future entries with higher alignment get inserted before
// CPEMI. This is bucket sort with iterators.
- for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ for (unsigned a = LogAlign + 1; a <= Log2(MaxAlign); ++a)
if (InsPoint[a] == InsAt)
InsPoint[a] = CPEMI;
// Add a new CPEntry, but no corresponding CPUser yet.
@@ -621,20 +617,18 @@ MipsConstantIslands::CPEntry
return nullptr;
}
-/// getCPELogAlign - Returns the required alignment of the constant pool entry
+/// getCPEAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI. Alignment is measured in log2(bytes) units.
-unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr &CPEMI) {
+Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY);
// Everything is 4-byte aligned unless AlignConstantIslands is set.
if (!AlignConstantIslands)
- return 2;
+ return Align(4);
unsigned CPI = CPEMI.getOperand(1).getIndex();
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
- unsigned Align = MCP->getConstants()[CPI].getAlignment();
- assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
- return Log2_32(Align);
+ return Align(MCP->getConstants()[CPI].getAlignment());
}
/// initializeFunctionInfo - Do the initial scan of the function, building up
@@ -940,13 +934,13 @@ bool MipsConstantIslands::isOffsetInRange(unsigned UserOffset,
bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
MachineBasicBlock* Water, CPUser &U,
unsigned &Growth) {
- unsigned CPELogAlign = getCPELogAlign(*U.CPEMI);
- unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
- unsigned NextBlockOffset, NextBlockAlignment;
+ unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset();
+ unsigned NextBlockOffset;
+ Align NextBlockAlignment;
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
if (NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
- NextBlockAlignment = 0;
+ NextBlockAlignment = Align::None();
} else {
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
NextBlockAlignment = NextBlock->getAlignment();
@@ -961,7 +955,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
- Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
+ Growth += offsetToAlignment(CPEEnd, NextBlockAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding
@@ -1221,7 +1215,6 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
- unsigned CPELogAlign = getCPELogAlign(*CPEMI);
MachineBasicBlock *UserMBB = UserMI->getParent();
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
@@ -1231,7 +1224,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
// Size of branch to insert.
unsigned Delta = 2;
// Compute the offset where the CPE will begin.
- unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
+ unsigned CPEOffset = UserBBI.postOffset() + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
@@ -1257,9 +1250,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
- // LogAlign which is the largest possible alignment in the function.
- unsigned LogAlign = MF->getAlignment();
- assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
+ // Align which is the largest possible alignment in the function.
+ const Align Align = MF->getAlignment();
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
BaseInsertOffset));
@@ -1270,7 +1262,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
BaseInsertOffset -= 4;
LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
- << " la=" << LogAlign << '\n');
+ << " la=" << Log2(Align) << '\n');
// This could point off the end of the block if we've already got constant
// pool entries following this block; only the last one is in the water list.
@@ -1295,8 +1287,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
CPUser &U = CPUsers[CPUIndex];
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
// Shift intertion point by one unit of alignment so it is within reach.
- BaseInsertOffset -= 1u << LogAlign;
- EndInsertOffset -= 1u << LogAlign;
+ BaseInsertOffset -= Align.value();
+ EndInsertOffset -= Align.value();
}
// This is overly conservative, as we don't account for CPEMIs being
// reused within the block, but it doesn't matter much. Also assume CPEs
@@ -1399,7 +1391,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
++NumCPEs;
// Mark the basic block as aligned as required by the const-pool entry.
- NewIsland->setAlignment(getCPELogAlign(*U.CPEMI));
+ NewIsland->setAlignment(getCPEAlign(*U.CPEMI));
// Increase the size of the island block to account for the new entry.
BBInfo[NewIsland->getNumber()].Size += Size;
@@ -1431,10 +1423,11 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(0);
- } else
+ CPEBB->setAlignment(Align(1));
+ } else {
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin()));
+ CPEBB->setAlignment(getCPEAlign(*CPEBB->begin()));
+ }
adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
@@ -1529,7 +1522,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
// We should have a way to back out this alignment restriction if we "can" later.
// but it is not harmful.
//
- DestBB->setAlignment(2);
+ DestBB->setAlignment(Align(4));
Br.MaxDisp = ((1<<24)-1) * 2;
MI->setDesc(TII->get(Mips::JalB16));
}
diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td
index daca8b907081..d3e68c014fb7 100644
--- a/lib/Target/Mips/MipsDSPInstrInfo.td
+++ b/lib/Target/Mips/MipsDSPInstrInfo.td
@@ -12,12 +12,19 @@
// ImmLeaf
def immZExt1 : ImmLeaf<i32, [{return isUInt<1>(Imm);}]>;
+def timmZExt1 : ImmLeaf<i32, [{return isUInt<1>(Imm);}], NOOP_SDNodeXForm, timm>;
def immZExt2 : ImmLeaf<i32, [{return isUInt<2>(Imm);}]>;
+def timmZExt2 : ImmLeaf<i32, [{return isUInt<2>(Imm);}], NOOP_SDNodeXForm, timm>;
def immZExt3 : ImmLeaf<i32, [{return isUInt<3>(Imm);}]>;
+def timmZExt3 : ImmLeaf<i32, [{return isUInt<3>(Imm);}], NOOP_SDNodeXForm, timm>;
def immZExt4 : ImmLeaf<i32, [{return isUInt<4>(Imm);}]>;
+def timmZExt4 : ImmLeaf<i32, [{return isUInt<4>(Imm);}], NOOP_SDNodeXForm, timm>;
def immZExt8 : ImmLeaf<i32, [{return isUInt<8>(Imm);}]>;
+def timmZExt8 : ImmLeaf<i32, [{return isUInt<8>(Imm);}], NOOP_SDNodeXForm, timm>;
def immZExt10 : ImmLeaf<i32, [{return isUInt<10>(Imm);}]>;
+def timmZExt10 : ImmLeaf<i32, [{return isUInt<10>(Imm);}], NOOP_SDNodeXForm, timm>;
def immSExt6 : ImmLeaf<i32, [{return isInt<6>(Imm);}]>;
+def timmSExt6 : ImmLeaf<i32, [{return isInt<6>(Imm);}], NOOP_SDNodeXForm, timm>;
def immSExt10 : ImmLeaf<i32, [{return isInt<10>(Imm);}]>;
// Mips-specific dsp nodes
@@ -306,7 +313,7 @@ class PRECR_SRA_PH_W_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
dag OutOperandList = (outs ROT:$rt);
dag InOperandList = (ins ROS:$rs, uimm5:$sa, ROS:$src);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
- list<dag> Pattern = [(set ROT:$rt, (OpNode ROS:$src, ROS:$rs, immZExt5:$sa))];
+ list<dag> Pattern = [(set ROT:$rt, (OpNode ROS:$src, ROS:$rs, timmZExt5:$sa))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
string BaseOpcode = instr_asm;
@@ -443,7 +450,7 @@ class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
dag OutOperandList = (outs GPR32Opnd:$rd);
dag InOperandList = (ins uimm10:$mask);
string AsmString = !strconcat(instr_asm, "\t$rd, $mask");
- list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode immZExt10:$mask))];
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode timmZExt10:$mask))];
InstrItinClass Itinerary = itin;
string BaseOpcode = instr_asm;
bit isMoveReg = 1;
@@ -454,7 +461,7 @@ class WRDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
dag OutOperandList = (outs);
dag InOperandList = (ins GPR32Opnd:$rs, uimm10:$mask);
string AsmString = !strconcat(instr_asm, "\t$rs, $mask");
- list<dag> Pattern = [(OpNode GPR32Opnd:$rs, immZExt10:$mask)];
+ list<dag> Pattern = [(OpNode GPR32Opnd:$rs, timmZExt10:$mask)];
InstrItinClass Itinerary = itin;
string BaseOpcode = instr_asm;
bit isMoveReg = 1;
@@ -1096,14 +1103,14 @@ class SHRLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.ph", int_mips_shrl_ph,
NoItinerary, DSPROpnd>;
// Misc
-class APPEND_DESC : APPEND_DESC_BASE<"append", int_mips_append, uimm5, immZExt5,
+class APPEND_DESC : APPEND_DESC_BASE<"append", int_mips_append, uimm5, timmZExt5,
NoItinerary>;
-class BALIGN_DESC : APPEND_DESC_BASE<"balign", int_mips_balign, uimm2, immZExt2,
+class BALIGN_DESC : APPEND_DESC_BASE<"balign", int_mips_balign, uimm2, timmZExt2,
NoItinerary>;
class PREPEND_DESC : APPEND_DESC_BASE<"prepend", int_mips_prepend, uimm5,
- immZExt5, NoItinerary>;
+ timmZExt5, NoItinerary>;
// Pseudos.
def BPOSGE32_PSEUDO : BPOSGE32_PSEUDO_DESC_BASE<int_mips_bposge32,
diff --git a/lib/Target/Mips/MipsExpandPseudo.cpp b/lib/Target/Mips/MipsExpandPseudo.cpp
index 65d84a6c44a0..00cd284e7094 100644
--- a/lib/Target/Mips/MipsExpandPseudo.cpp
+++ b/lib/Target/Mips/MipsExpandPseudo.cpp
@@ -99,15 +99,15 @@ bool MipsExpandPseudo::expandAtomicCmpSwapSubword(
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
}
- unsigned Dest = I->getOperand(0).getReg();
- unsigned Ptr = I->getOperand(1).getReg();
- unsigned Mask = I->getOperand(2).getReg();
- unsigned ShiftCmpVal = I->getOperand(3).getReg();
- unsigned Mask2 = I->getOperand(4).getReg();
- unsigned ShiftNewVal = I->getOperand(5).getReg();
- unsigned ShiftAmnt = I->getOperand(6).getReg();
- unsigned Scratch = I->getOperand(7).getReg();
- unsigned Scratch2 = I->getOperand(8).getReg();
+ Register Dest = I->getOperand(0).getReg();
+ Register Ptr = I->getOperand(1).getReg();
+ Register Mask = I->getOperand(2).getReg();
+ Register ShiftCmpVal = I->getOperand(3).getReg();
+ Register Mask2 = I->getOperand(4).getReg();
+ Register ShiftNewVal = I->getOperand(5).getReg();
+ Register ShiftAmnt = I->getOperand(6).getReg();
+ Register Scratch = I->getOperand(7).getReg();
+ Register Scratch2 = I->getOperand(8).getReg();
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB.getBasicBlock();
@@ -240,11 +240,11 @@ bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB,
MOVE = Mips::OR64;
}
- unsigned Dest = I->getOperand(0).getReg();
- unsigned Ptr = I->getOperand(1).getReg();
- unsigned OldVal = I->getOperand(2).getReg();
- unsigned NewVal = I->getOperand(3).getReg();
- unsigned Scratch = I->getOperand(4).getReg();
+ Register Dest = I->getOperand(0).getReg();
+ Register Ptr = I->getOperand(1).getReg();
+ Register OldVal = I->getOperand(2).getReg();
+ Register NewVal = I->getOperand(3).getReg();
+ Register Scratch = I->getOperand(4).getReg();
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB.getBasicBlock();
@@ -374,15 +374,15 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword(
llvm_unreachable("Unknown subword atomic pseudo for expansion!");
}
- unsigned Dest = I->getOperand(0).getReg();
- unsigned Ptr = I->getOperand(1).getReg();
- unsigned Incr = I->getOperand(2).getReg();
- unsigned Mask = I->getOperand(3).getReg();
- unsigned Mask2 = I->getOperand(4).getReg();
- unsigned ShiftAmnt = I->getOperand(5).getReg();
- unsigned OldVal = I->getOperand(6).getReg();
- unsigned BinOpRes = I->getOperand(7).getReg();
- unsigned StoreVal = I->getOperand(8).getReg();
+ Register Dest = I->getOperand(0).getReg();
+ Register Ptr = I->getOperand(1).getReg();
+ Register Incr = I->getOperand(2).getReg();
+ Register Mask = I->getOperand(3).getReg();
+ Register Mask2 = I->getOperand(4).getReg();
+ Register ShiftAmnt = I->getOperand(5).getReg();
+ Register OldVal = I->getOperand(6).getReg();
+ Register BinOpRes = I->getOperand(7).getReg();
+ Register StoreVal = I->getOperand(8).getReg();
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
@@ -513,10 +513,10 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
BEQ = Mips::BEQ64;
}
- unsigned OldVal = I->getOperand(0).getReg();
- unsigned Ptr = I->getOperand(1).getReg();
- unsigned Incr = I->getOperand(2).getReg();
- unsigned Scratch = I->getOperand(3).getReg();
+ Register OldVal = I->getOperand(0).getReg();
+ Register Ptr = I->getOperand(1).getReg();
+ Register Incr = I->getOperand(2).getReg();
+ Register Scratch = I->getOperand(3).getReg();
unsigned Opcode = 0;
unsigned OR = 0;
diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp
index 123d3cc242f0..80f288ac500c 100644
--- a/lib/Target/Mips/MipsFastISel.cpp
+++ b/lib/Target/Mips/MipsFastISel.cpp
@@ -1162,14 +1162,20 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
if (ArgVT == MVT::f32) {
VA.convertToReg(Mips::F12);
} else if (ArgVT == MVT::f64) {
- VA.convertToReg(Mips::D6);
+ if (Subtarget->isFP64bit())
+ VA.convertToReg(Mips::D6_64);
+ else
+ VA.convertToReg(Mips::D6);
}
} else if (i == 1) {
if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
if (ArgVT == MVT::f32) {
VA.convertToReg(Mips::F14);
} else if (ArgVT == MVT::f64) {
- VA.convertToReg(Mips::D7);
+ if (Subtarget->isFP64bit())
+ VA.convertToReg(Mips::D7_64);
+ else
+ VA.convertToReg(Mips::D7);
}
}
}
@@ -1722,7 +1728,7 @@ bool MipsFastISel::selectRet(const Instruction *I) {
return false;
unsigned SrcReg = Reg + VA.getValNo();
- unsigned DestReg = VA.getLocReg();
+ Register DestReg = VA.getLocReg();
// Avoid a cross-class copy. This is very unlikely.
if (!MRI.getRegClass(SrcReg)->contains(DestReg))
return false;
diff --git a/lib/Target/Mips/MipsFrameLowering.h b/lib/Target/Mips/MipsFrameLowering.h
index 0537cfd1cb30..612b2b712fa8 100644
--- a/lib/Target/Mips/MipsFrameLowering.h
+++ b/lib/Target/Mips/MipsFrameLowering.h
@@ -24,8 +24,9 @@ protected:
const MipsSubtarget &STI;
public:
- explicit MipsFrameLowering(const MipsSubtarget &sti, unsigned Alignment)
- : TargetFrameLowering(StackGrowsDown, Alignment, 0, Alignment), STI(sti) {}
+ explicit MipsFrameLowering(const MipsSubtarget &sti, Align Alignment)
+ : TargetFrameLowering(StackGrowsDown, Alignment, 0, Alignment), STI(sti) {
+ }
static const MipsFrameLowering *create(const MipsSubtarget &ST);
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 9ba54d6bb73c..e5997af3bcc5 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -65,7 +65,7 @@ bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
/// getGlobalBaseReg - Output the instructions required to put the
/// GOT address into a register.
SDNode *MipsDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg();
+ Register GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg();
return CurDAG->getRegister(GlobalBaseReg, getTargetLowering()->getPointerTy(
CurDAG->getDataLayout()))
.getNode();
@@ -217,6 +217,51 @@ bool MipsDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
return false;
}
+/// Convert vector addition with vector subtraction if that allows to encode
+/// constant as an immediate and thus avoid extra 'ldi' instruction.
+/// add X, <-1, -1...> --> sub X, <1, 1...>
+bool MipsDAGToDAGISel::selectVecAddAsVecSubIfProfitable(SDNode *Node) {
+ assert(Node->getOpcode() == ISD::ADD && "Should only get 'add' here.");
+
+ EVT VT = Node->getValueType(0);
+ assert(VT.isVector() && "Should only be called for vectors.");
+
+ SDValue X = Node->getOperand(0);
+ SDValue C = Node->getOperand(1);
+
+ auto *BVN = dyn_cast<BuildVectorSDNode>(C);
+ if (!BVN)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
+ 8, !Subtarget->isLittle()))
+ return false;
+
+ auto IsInlineConstant = [](const APInt &Imm) { return Imm.isIntN(5); };
+
+ if (IsInlineConstant(SplatValue))
+ return false; // Can already be encoded as an immediate.
+
+ APInt NegSplatValue = 0 - SplatValue;
+ if (!IsInlineConstant(NegSplatValue))
+ return false; // Even if we negate it it won't help.
+
+ SDLoc DL(Node);
+
+ SDValue NegC = CurDAG->FoldConstantArithmetic(
+ ISD::SUB, DL, VT, CurDAG->getConstant(0, DL, VT).getNode(), C.getNode());
+ assert(NegC && "Constant-folding failed!");
+ SDValue NewNode = CurDAG->getNode(ISD::SUB, DL, VT, X, NegC);
+
+ ReplaceNode(Node, NewNode.getNode());
+ SelectCode(NewNode.getNode());
+ return true;
+}
+
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
void MipsDAGToDAGISel::Select(SDNode *Node) {
@@ -236,6 +281,12 @@ void MipsDAGToDAGISel::Select(SDNode *Node) {
switch(Opcode) {
default: break;
+ case ISD::ADD:
+ if (Node->getSimpleValueType(0).isVector() &&
+ selectVecAddAsVecSubIfProfitable(Node))
+ return;
+ break;
+
// Get target GOT address.
case ISD::GLOBAL_OFFSET_TABLE:
ReplaceNode(Node, getGlobalBaseReg());
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.h b/lib/Target/Mips/MipsISelDAGToDAG.h
index bae3bbf71f3b..a768589b374b 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -125,6 +125,11 @@ private:
/// starting at bit zero.
virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
+ /// Convert vector addition with vector subtraction if that allows to encode
+ /// constant as an immediate and thus avoid extra 'ldi' instruction.
+ /// add X, <-1, -1...> --> sub X, <1, 1...>
+ bool selectVecAddAsVecSubIfProfitable(SDNode *Node);
+
void Select(SDNode *N) override;
virtual bool trySelect(SDNode *Node) = 0;
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 0ff09007da4b..bf1b4756b24f 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -83,10 +83,6 @@ using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool>
-LargeGOT("mxgot", cl::Hidden,
- cl::desc("MIPS: Enable GOT larger than 64k."), cl::init(false));
-
-static cl::opt<bool>
NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
cl::desc("MIPS: Don't trap on integer division by zero."),
cl::init(false));
@@ -330,7 +326,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
}
// Set LoadExtAction for f16 vectors to Expand
- for (MVT VT : MVT::fp_vector_valuetypes()) {
+ for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
if (F16VT.isValid())
setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand);
@@ -518,11 +514,12 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setLibcallName(RTLIB::SRA_I128, nullptr);
}
- setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
+ setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
// The arguments on the stack are defined in terms of 4-byte slots on O32
// and 8-byte slots on N32/N64.
- setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? 8 : 4);
+ setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
+ : Align(4));
setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
@@ -552,8 +549,9 @@ MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
!Subtarget.inMicroMipsMode();
// Disable if either of the following is true:
- // We do not generate PIC, the ABI is not O32, LargeGOT is being used.
- if (!TM.isPositionIndependent() || !TM.getABI().IsO32() || LargeGOT)
+ // We do not generate PIC, the ABI is not O32, XGOT is being used.
+ if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
+ Subtarget.useXGOT())
UseFastISel = false;
return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
@@ -1257,7 +1255,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
static unsigned
addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
{
- unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
+ Register VReg = MF.getRegInfo().createVirtualRegister(RC);
MF.getRegInfo().addLiveIn(PReg, VReg);
return VReg;
}
@@ -1477,10 +1475,10 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
llvm_unreachable("Unknown pseudo atomic for replacement!");
}
- unsigned OldVal = MI.getOperand(0).getReg();
- unsigned Ptr = MI.getOperand(1).getReg();
- unsigned Incr = MI.getOperand(2).getReg();
- unsigned Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
+ Register OldVal = MI.getOperand(0).getReg();
+ Register Ptr = MI.getOperand(1).getReg();
+ Register Incr = MI.getOperand(2).getReg();
+ Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
MachineBasicBlock::iterator II(MI);
@@ -1519,8 +1517,8 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
// containing the word.
//
- unsigned PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
- unsigned IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
+ Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
+ Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
@@ -1556,7 +1554,7 @@ MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
- unsigned ScrReg = RegInfo.createVirtualRegister(RC);
+ Register ScrReg = RegInfo.createVirtualRegister(RC);
assert(Size < 32);
int64_t ShiftImm = 32 - (Size * 8);
@@ -1581,21 +1579,21 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Dest = MI.getOperand(0).getReg();
- unsigned Ptr = MI.getOperand(1).getReg();
- unsigned Incr = MI.getOperand(2).getReg();
-
- unsigned AlignedAddr = RegInfo.createVirtualRegister(RCp);
- unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
- unsigned Mask = RegInfo.createVirtualRegister(RC);
- unsigned Mask2 = RegInfo.createVirtualRegister(RC);
- unsigned Incr2 = RegInfo.createVirtualRegister(RC);
- unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp);
- unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
- unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
- unsigned Scratch = RegInfo.createVirtualRegister(RC);
- unsigned Scratch2 = RegInfo.createVirtualRegister(RC);
- unsigned Scratch3 = RegInfo.createVirtualRegister(RC);
+ Register Dest = MI.getOperand(0).getReg();
+ Register Ptr = MI.getOperand(1).getReg();
+ Register Incr = MI.getOperand(2).getReg();
+
+ Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
+ Register ShiftAmt = RegInfo.createVirtualRegister(RC);
+ Register Mask = RegInfo.createVirtualRegister(RC);
+ Register Mask2 = RegInfo.createVirtualRegister(RC);
+ Register Incr2 = RegInfo.createVirtualRegister(RC);
+ Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
+ Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
+ Register MaskUpper = RegInfo.createVirtualRegister(RC);
+ Register Scratch = RegInfo.createVirtualRegister(RC);
+ Register Scratch2 = RegInfo.createVirtualRegister(RC);
+ Register Scratch3 = RegInfo.createVirtualRegister(RC);
unsigned AtomicOp = 0;
switch (MI.getOpcode()) {
@@ -1678,7 +1676,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
- unsigned Off = RegInfo.createVirtualRegister(RC);
+ Register Off = RegInfo.createVirtualRegister(RC);
BuildMI(BB, DL, TII->get(Mips::XORi), Off)
.addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
@@ -1738,12 +1736,12 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
: Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
- unsigned Dest = MI.getOperand(0).getReg();
- unsigned Ptr = MI.getOperand(1).getReg();
- unsigned OldVal = MI.getOperand(2).getReg();
- unsigned NewVal = MI.getOperand(3).getReg();
+ Register Dest = MI.getOperand(0).getReg();
+ Register Ptr = MI.getOperand(1).getReg();
+ Register OldVal = MI.getOperand(2).getReg();
+ Register NewVal = MI.getOperand(3).getReg();
- unsigned Scratch = MRI.createVirtualRegister(RC);
+ Register Scratch = MRI.createVirtualRegister(RC);
MachineBasicBlock::iterator II(MI);
// We need to create copies of the various registers and kill them at the
@@ -1751,9 +1749,9 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
// after fast register allocation, the spills will end up outside of the
// blocks that their values are defined in, causing livein errors.
- unsigned PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
- unsigned OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
- unsigned NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
+ Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
+ Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
+ Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
@@ -1790,22 +1788,22 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Dest = MI.getOperand(0).getReg();
- unsigned Ptr = MI.getOperand(1).getReg();
- unsigned CmpVal = MI.getOperand(2).getReg();
- unsigned NewVal = MI.getOperand(3).getReg();
-
- unsigned AlignedAddr = RegInfo.createVirtualRegister(RCp);
- unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
- unsigned Mask = RegInfo.createVirtualRegister(RC);
- unsigned Mask2 = RegInfo.createVirtualRegister(RC);
- unsigned ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
- unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC);
- unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp);
- unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
- unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
- unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC);
- unsigned MaskedNewVal = RegInfo.createVirtualRegister(RC);
+ Register Dest = MI.getOperand(0).getReg();
+ Register Ptr = MI.getOperand(1).getReg();
+ Register CmpVal = MI.getOperand(2).getReg();
+ Register NewVal = MI.getOperand(3).getReg();
+
+ Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
+ Register ShiftAmt = RegInfo.createVirtualRegister(RC);
+ Register Mask = RegInfo.createVirtualRegister(RC);
+ Register Mask2 = RegInfo.createVirtualRegister(RC);
+ Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
+ Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
+ Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
+ Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
+ Register MaskUpper = RegInfo.createVirtualRegister(RC);
+ Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
+ Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
: Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
@@ -1820,8 +1818,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
// value isn't a problem.
// The Dead flag is needed as the value in scratch isn't used by any other
// instruction. Kill isn't used as Dead is more precise.
- unsigned Scratch = RegInfo.createVirtualRegister(RC);
- unsigned Scratch2 = RegInfo.createVirtualRegister(RC);
+ Register Scratch = RegInfo.createVirtualRegister(RC);
+ Register Scratch2 = RegInfo.createVirtualRegister(RC);
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -1859,7 +1857,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
- unsigned Off = RegInfo.createVirtualRegister(RC);
+ Register Off = RegInfo.createVirtualRegister(RC);
BuildMI(BB, DL, TII->get(Mips::XORi), Off)
.addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
@@ -1967,10 +1965,10 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
// %gp_rel relocation
return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
- // %hi/%lo relocation
+ // %hi/%lo relocation
return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
- // %highest/%higher/%hi/%lo relocation
- : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
+ // %highest/%higher/%hi/%lo relocation
+ : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
}
// Every other architecture would use shouldAssumeDSOLocal in here, but
@@ -1987,7 +1985,7 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
if (GV->hasLocalLinkage())
return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
- if (LargeGOT)
+ if (Subtarget.useXGOT())
return getAddrGlobalLargeGOT(
N, SDLoc(N), Ty, DAG, MipsII::MO_GOT_HI16, MipsII::MO_GOT_LO16,
DAG.getEntryNode(),
@@ -2149,7 +2147,8 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Node->getValueType(0);
SDValue Chain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
- unsigned Align = Node->getConstantOperandVal(3);
+ const Align Align =
+ llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc DL(Node);
unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
@@ -2166,14 +2165,13 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// when the pointer is still aligned from the last va_arg (or pair of
// va_args for the i64 on O32 case).
if (Align > getMinStackArgumentAlignment()) {
- assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
+ VAList = DAG.getNode(
+ ISD::ADD, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
- VAList = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
- DAG.getConstant(Align - 1, DL, VAList.getValueType()));
-
- VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
- DAG.getConstant(-(int64_t)Align, DL,
- VAList.getValueType()));
+ VAList = DAG.getNode(
+ ISD::AND, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
}
// Increment the pointer, VAList, to the next vaarg.
@@ -2870,7 +2868,7 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
#include "MipsGenCallingConv.inc"
CCAssignFn *MipsTargetLowering::CCAssignFnForCall() const{
- return CC_Mips;
+ return CC_Mips_FixedArg;
}
CCAssignFn *MipsTargetLowering::CCAssignFnForReturn() const{
@@ -3167,7 +3165,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Arg, DAG.getConstant(1, DL, MVT::i32));
if (!Subtarget.isLittle())
std::swap(Lo, Hi);
- unsigned LocRegLo = VA.getLocReg();
+ Register LocRegLo = VA.getLocReg();
unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
@@ -3270,7 +3268,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (InternalLinkage)
Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
- else if (LargeGOT) {
+ else if (Subtarget.useXGOT()) {
Callee = getAddrGlobalLargeGOT(G, DL, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Val));
@@ -3292,7 +3290,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!IsPIC) // static
Callee = DAG.getTargetExternalSymbol(
Sym, getPointerTy(DAG.getDataLayout()), MipsII::MO_NO_FLAG);
- else if (LargeGOT) {
+ else if (Subtarget.useXGOT()) {
Callee = getAddrGlobalLargeGOT(S, DL, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
FuncInfo->callPtrInfo(Sym));
@@ -3523,7 +3521,7 @@ SDValue MipsTargetLowering::LowerFormalArguments(
// Arguments stored on registers
if (IsRegLoc) {
MVT RegVT = VA.getLocVT();
- unsigned ArgReg = VA.getLocReg();
+ Register ArgReg = VA.getLocReg();
const TargetRegisterClass *RC = getRegClassFor(RegVT);
// Transform the arguments stored on
@@ -4568,20 +4566,20 @@ MachineBasicBlock *MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
-unsigned MipsTargetLowering::getRegisterByName(const char* RegName, EVT VT,
- SelectionDAG &DAG) const {
+Register MipsTargetLowering::getRegisterByName(const char* RegName, EVT VT,
+ const MachineFunction &MF) const {
// Named registers is expected to be fairly rare. For now, just support $28
// since the linux kernel uses it.
if (Subtarget.isGP64bit()) {
- unsigned Reg = StringSwitch<unsigned>(RegName)
+ Register Reg = StringSwitch<Register>(RegName)
.Case("$28", Mips::GP_64)
- .Default(0);
+ .Default(Register());
if (Reg)
return Reg;
} else {
- unsigned Reg = StringSwitch<unsigned>(RegName)
+ Register Reg = StringSwitch<Register>(RegName)
.Case("$28", Mips::GP)
- .Default(0);
+ .Default(Register());
if (Reg)
return Reg;
}
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 2db60e9801f1..0a5cddd45afb 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -304,11 +304,12 @@ class TargetRegisterClass;
unsigned &NumIntermediates, MVT &RegisterVT) const override;
/// Return the correct alignment for the current calling convention.
- unsigned getABIAlignmentForCallingConv(Type *ArgTy,
- DataLayout DL) const override {
+ Align getABIAlignmentForCallingConv(Type *ArgTy,
+ DataLayout DL) const override {
+ const Align ABIAlign(DL.getABITypeAlignment(ArgTy));
if (ArgTy->isVectorTy())
- return std::min(DL.getABITypeAlignment(ArgTy), 8U);
- return DL.getABITypeAlignment(ArgTy);
+ return std::min(ABIAlign, Align(8));
+ return ABIAlign;
}
ISD::NodeType getExtendForAtomicOps() const override {
@@ -347,8 +348,8 @@ class TargetRegisterClass;
void HandleByVal(CCState *, unsigned &, unsigned) const override;
- unsigned getRegisterByName(const char* RegName, EVT VT,
- SelectionDAG &DAG) const override;
+ Register getRegisterByName(const char* RegName, EVT VT,
+ const MachineFunction &MF) const override;
/// If a physical register, this returns the register that receives the
/// exception address on entry to an EH pad.
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index fbd56206b249..6bb25ee5754d 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -677,7 +677,8 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
return MIB;
}
-bool MipsInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
+bool MipsInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
+ unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
assert(!MI.isBundle() &&
"TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index a626c0c3fdb8..092a960b4ba7 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -148,7 +148,7 @@ public:
MachineInstrBuilder genInstrWithNewOpc(unsigned NewOpc,
MachineBasicBlock::iterator I) const;
- bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
+ bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
/// Perform target specific instruction verification.
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index a4e85a38ab28..58167e0f344d 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -211,9 +211,9 @@ def HasCnMips : Predicate<"Subtarget->hasCnMips()">,
AssemblerPredicate<"FeatureCnMips">;
def NotCnMips : Predicate<"!Subtarget->hasCnMips()">,
AssemblerPredicate<"!FeatureCnMips">;
-def IsSym32 : Predicate<"Subtarget->HasSym32()">,
+def IsSym32 : Predicate<"Subtarget->hasSym32()">,
AssemblerPredicate<"FeatureSym32">;
-def IsSym64 : Predicate<"!Subtarget->HasSym32()">,
+def IsSym64 : Predicate<"!Subtarget->hasSym32()">,
AssemblerPredicate<"!FeatureSym32">;
def IsN64 : Predicate<"Subtarget->isABI_N64()">;
def IsNotN64 : Predicate<"!Subtarget->isABI_N64()">;
@@ -1263,6 +1263,7 @@ def immSExt16 : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
// Node immediate fits as 7-bit zero extended on target immediate.
def immZExt7 : PatLeaf<(imm), [{ return isUInt<7>(N->getZExtValue()); }]>;
+def timmZExt7 : PatLeaf<(timm), [{ return isUInt<7>(N->getZExtValue()); }]>;
// Node immediate fits as 16-bit zero extended on target immediate.
// The LO16 param means that only the lower 16 bits of the node
@@ -1295,6 +1296,7 @@ def immZExt32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>;
// shamt field must fit in 5 bits.
def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
+def timmZExt5 : TImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
def immZExt5Plus1 : PatLeaf<(imm), [{
return isUInt<5>(N->getZExtValue() - 1);
@@ -3142,25 +3144,31 @@ multiclass MipsHiLoRelocs<Instruction Lui, Instruction Addiu,
def : MipsPat<(MipsHi tconstpool:$in), (Lui tconstpool:$in)>;
def : MipsPat<(MipsHi texternalsym:$in), (Lui texternalsym:$in)>;
- def : MipsPat<(MipsLo tglobaladdr:$in), (Addiu ZeroReg, tglobaladdr:$in)>;
+ def : MipsPat<(MipsLo tglobaladdr:$in),
+ (Addiu ZeroReg, tglobaladdr:$in)>;
def : MipsPat<(MipsLo tblockaddress:$in),
(Addiu ZeroReg, tblockaddress:$in)>;
- def : MipsPat<(MipsLo tjumptable:$in), (Addiu ZeroReg, tjumptable:$in)>;
- def : MipsPat<(MipsLo tconstpool:$in), (Addiu ZeroReg, tconstpool:$in)>;
+ def : MipsPat<(MipsLo tjumptable:$in),
+ (Addiu ZeroReg, tjumptable:$in)>;
+ def : MipsPat<(MipsLo tconstpool:$in),
+ (Addiu ZeroReg, tconstpool:$in)>;
def : MipsPat<(MipsLo tglobaltlsaddr:$in),
(Addiu ZeroReg, tglobaltlsaddr:$in)>;
- def : MipsPat<(MipsLo texternalsym:$in), (Addiu ZeroReg, texternalsym:$in)>;
+ def : MipsPat<(MipsLo texternalsym:$in),
+ (Addiu ZeroReg, texternalsym:$in)>;
def : MipsPat<(add GPROpnd:$hi, (MipsLo tglobaladdr:$lo)),
- (Addiu GPROpnd:$hi, tglobaladdr:$lo)>;
+ (Addiu GPROpnd:$hi, tglobaladdr:$lo)>;
def : MipsPat<(add GPROpnd:$hi, (MipsLo tblockaddress:$lo)),
- (Addiu GPROpnd:$hi, tblockaddress:$lo)>;
+ (Addiu GPROpnd:$hi, tblockaddress:$lo)>;
def : MipsPat<(add GPROpnd:$hi, (MipsLo tjumptable:$lo)),
- (Addiu GPROpnd:$hi, tjumptable:$lo)>;
+ (Addiu GPROpnd:$hi, tjumptable:$lo)>;
def : MipsPat<(add GPROpnd:$hi, (MipsLo tconstpool:$lo)),
- (Addiu GPROpnd:$hi, tconstpool:$lo)>;
+ (Addiu GPROpnd:$hi, tconstpool:$lo)>;
def : MipsPat<(add GPROpnd:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (Addiu GPROpnd:$hi, tglobaltlsaddr:$lo)>;
+ (Addiu GPROpnd:$hi, tglobaltlsaddr:$lo)>;
+ def : MipsPat<(add GPROpnd:$hi, (MipsLo texternalsym:$lo)),
+ (Addiu GPROpnd:$hi, texternalsym:$lo)>;
}
// wrapper_pic
diff --git a/lib/Target/Mips/MipsInstructionSelector.cpp b/lib/Target/Mips/MipsInstructionSelector.cpp
index 45a47ad3c087..f8fc7cb0898b 100644
--- a/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -17,6 +17,7 @@
#include "MipsTargetMachine.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#define DEBUG_TYPE "mips-isel"
@@ -33,7 +34,7 @@ public:
MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
const MipsRegisterBankInfo &RBI);
- bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
+ bool select(MachineInstr &I) override;
static const char *getName() { return DEBUG_TYPE; }
private:
@@ -44,6 +45,8 @@ private:
const TargetRegisterClass *
getRegClassForTypeOnBank(unsigned OpSize, const RegisterBank &RB,
const RegisterBankInfo &RBI) const;
+ unsigned selectLoadStoreOpCode(MachineInstr &I,
+ MachineRegisterInfo &MRI) const;
const MipsTargetMachine &TM;
const MipsSubtarget &STI;
@@ -84,7 +87,7 @@ MipsInstructionSelector::MipsInstructionSelector(
bool MipsInstructionSelector::selectCopy(MachineInstr &I,
MachineRegisterInfo &MRI) const {
Register DstReg = I.getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ if (Register::isPhysicalRegister(DstReg))
return true;
const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI);
@@ -158,9 +161,15 @@ bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
}
/// Returning Opc indicates that we failed to select MIPS instruction opcode.
-static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes,
- unsigned RegBank, bool isFP64) {
- bool isStore = Opc == TargetOpcode::G_STORE;
+unsigned
+MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
+ MachineRegisterInfo &MRI) const {
+ STI.getRegisterInfo();
+ const Register DestReg = I.getOperand(0).getReg();
+ const unsigned RegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
+ const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize();
+ unsigned Opc = I.getOpcode();
+ const bool isStore = Opc == TargetOpcode::G_STORE;
if (RegBank == Mips::GPRBRegBankID) {
if (isStore)
switch (MemSizeInBytes) {
@@ -192,10 +201,24 @@ static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes,
case 4:
return isStore ? Mips::SWC1 : Mips::LWC1;
case 8:
- if (isFP64)
+ if (STI.isFP64bit())
return isStore ? Mips::SDC164 : Mips::LDC164;
else
return isStore ? Mips::SDC1 : Mips::LDC1;
+ case 16: {
+ assert(STI.hasMSA() && "Vector instructions require target with MSA.");
+ const unsigned VectorElementSizeInBytes =
+ MRI.getType(DestReg).getElementType().getSizeInBytes();
+ if (VectorElementSizeInBytes == 1)
+ return isStore ? Mips::ST_B : Mips::LD_B;
+ if (VectorElementSizeInBytes == 2)
+ return isStore ? Mips::ST_H : Mips::LD_H;
+ if (VectorElementSizeInBytes == 4)
+ return isStore ? Mips::ST_W : Mips::LD_W;
+ if (VectorElementSizeInBytes == 8)
+ return isStore ? Mips::ST_D : Mips::LD_D;
+ return Opc;
+ }
default:
return Opc;
}
@@ -203,8 +226,7 @@ static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes,
return Opc;
}
-bool MipsInstructionSelector::select(MachineInstr &I,
- CodeGenCoverage &CoverageInfo) const {
+bool MipsInstructionSelector::select(MachineInstr &I) {
MachineBasicBlock &MBB = *I.getParent();
MachineFunction &MF = *MBB.getParent();
@@ -231,7 +253,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
return true;
}
- if (selectImpl(I, CoverageInfo))
+ if (selectImpl(I, *CoverageInfo))
return true;
MachineInstr *MI = nullptr;
@@ -265,6 +287,11 @@ bool MipsInstructionSelector::select(MachineInstr &I,
.add(I.getOperand(2));
break;
}
+ case G_INTTOPTR:
+ case G_PTRTOINT: {
+ I.setDesc(TII.get(COPY));
+ return selectCopy(I, MRI);
+ }
case G_FRAME_INDEX: {
MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
.add(I.getOperand(0))
@@ -279,12 +306,71 @@ bool MipsInstructionSelector::select(MachineInstr &I,
.add(I.getOperand(1));
break;
}
+ case G_BRJT: {
+ unsigned EntrySize =
+ MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout());
+ assert(isPowerOf2_32(EntrySize) &&
+ "Non-power-of-two jump-table entry size not supported.");
+
+ Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
+ .addDef(JTIndex)
+ .addUse(I.getOperand(2).getReg())
+ .addImm(Log2_32(EntrySize));
+ if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
+ return false;
+
+ Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
+ .addDef(DestAddress)
+ .addUse(I.getOperand(0).getReg())
+ .addUse(JTIndex);
+ if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
+ return false;
+
+ Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ MachineInstr *LW =
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
+ .addDef(Dest)
+ .addUse(DestAddress)
+ .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
+ .addMemOperand(MF.getMachineMemOperand(
+ MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4));
+ if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI))
+ return false;
+
+ if (MF.getTarget().isPositionIndependent()) {
+ Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ LW->getOperand(0).setReg(DestTmp);
+ MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
+ .addDef(Dest)
+ .addUse(DestTmp)
+ .addUse(MF.getInfo<MipsFunctionInfo>()
+ ->getGlobalBaseRegForGlobalISel());
+ if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
+ return false;
+ }
+
+ MachineInstr *Branch =
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
+ .addUse(Dest);
+ if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
+ return false;
+
+ I.eraseFromParent();
+ return true;
+ }
+ case G_BRINDIRECT: {
+ MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
+ .add(I.getOperand(0));
+ break;
+ }
case G_PHI: {
const Register DestReg = I.getOperand(0).getReg();
const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
const TargetRegisterClass *DefRC = nullptr;
- if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ if (Register::isPhysicalRegister(DestReg))
DefRC = TRI.getRegClass(DestReg);
else
DefRC = getRegClassForTypeOnBank(OpSize,
@@ -297,26 +383,35 @@ bool MipsInstructionSelector::select(MachineInstr &I,
case G_LOAD:
case G_ZEXTLOAD:
case G_SEXTLOAD: {
- const Register DestReg = I.getOperand(0).getReg();
- const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
- const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
- const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize();
-
- if (DestRegBank == Mips::GPRBRegBankID && OpSize != 32)
- return false;
-
- if (DestRegBank == Mips::FPRBRegBankID && OpSize != 32 && OpSize != 64)
- return false;
-
- const unsigned NewOpc = selectLoadStoreOpCode(
- I.getOpcode(), OpMemSizeInBytes, DestRegBank, STI.isFP64bit());
+ const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
if (NewOpc == I.getOpcode())
return false;
+ MachineOperand BaseAddr = I.getOperand(1);
+ int64_t SignedOffset = 0;
+ // Try to fold load/store + G_GEP + G_CONSTANT
+ // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
+ // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
+ // %LoadResult/%StoreSrc = load/store %Addr(p0)
+ // into:
+ // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
+
+ MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
+ if (Addr->getOpcode() == G_GEP) {
+ MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
+ if (Offset->getOpcode() == G_CONSTANT) {
+ APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
+ if (OffsetValue.isSignedIntN(16)) {
+ BaseAddr = Addr->getOperand(1);
+ SignedOffset = OffsetValue.getSExtValue();
+ }
+ }
+ }
+
MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
.add(I.getOperand(0))
- .add(I.getOperand(1))
- .addImm(0)
+ .add(BaseAddr)
+ .addImm(SignedOffset)
.addMemOperand(*I.memoperands_begin());
break;
}
@@ -356,6 +451,18 @@ bool MipsInstructionSelector::select(MachineInstr &I,
.add(I.getOperand(3));
break;
}
+ case G_IMPLICIT_DEF: {
+ MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
+ .add(I.getOperand(0));
+
+ // Set class based on register bank, there can be fpr and gpr implicit def.
+ MRI.setRegClass(MI->getOperand(0).getReg(),
+ getRegClassForTypeOnBank(
+ MRI.getType(I.getOperand(0).getReg()).getSizeInBits(),
+ *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI),
+ RBI));
+ break;
+ }
case G_CONSTANT: {
MachineIRBuilder B(I);
if (!materialize32BitImm(I.getOperand(0).getReg(),
@@ -423,7 +530,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
Opcode = Mips::TRUNC_W_S;
else
Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
- unsigned ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
+ Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
.addDef(ResultInFPR)
.addUse(I.getOperand(1).getReg());
@@ -496,6 +603,24 @@ bool MipsInstructionSelector::select(MachineInstr &I,
I.eraseFromParent();
return true;
}
+ case G_JUMP_TABLE: {
+ if (MF.getTarget().isPositionIndependent()) {
+ MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
+ .addDef(I.getOperand(0).getReg())
+ .addReg(MF.getInfo<MipsFunctionInfo>()
+ ->getGlobalBaseRegForGlobalISel())
+ .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
+ .addMemOperand(
+ MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
+ MachineMemOperand::MOLoad, 4, 4));
+ } else {
+ MI =
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
+ .addDef(I.getOperand(0).getReg())
+ .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
+ }
+ break;
+ }
case G_ICMP: {
struct Instr {
unsigned Opcode;
@@ -626,7 +751,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
// MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
- unsigned TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
.addDef(TrueInReg)
.addUse(Mips::ZERO)
@@ -654,6 +779,33 @@ bool MipsInstructionSelector::select(MachineInstr &I,
I.eraseFromParent();
return true;
}
+ case G_FENCE: {
+ MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
+ break;
+ }
+ case G_VASTART: {
+ MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
+ int FI = FuncInfo->getVarArgsFrameIndex();
+
+ Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ MachineInstr *LEA_ADDiu =
+ BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
+ .addDef(LeaReg)
+ .addFrameIndex(FI)
+ .addImm(0);
+ if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
+ return false;
+
+ MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
+ .addUse(LeaReg)
+ .addUse(I.getOperand(0).getReg())
+ .addImm(0);
+ if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
+ return false;
+
+ I.eraseFromParent();
+ return true;
+ }
default:
return false;
}
diff --git a/lib/Target/Mips/MipsLegalizerInfo.cpp b/lib/Target/Mips/MipsLegalizerInfo.cpp
index e442a81837ed..bb4a1d902d75 100644
--- a/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -16,18 +16,65 @@
using namespace llvm;
+struct TypesAndMemOps {
+ LLT ValTy;
+ LLT PtrTy;
+ unsigned MemSize;
+ bool MustBeNaturallyAligned;
+};
+
+static bool
+CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query,
+ std::initializer_list<TypesAndMemOps> SupportedValues) {
+ for (auto &Val : SupportedValues) {
+ if (Val.ValTy != Query.Types[0])
+ continue;
+ if (Val.PtrTy != Query.Types[1])
+ continue;
+ if (Val.MemSize != Query.MMODescrs[0].SizeInBits)
+ continue;
+ if (Val.MustBeNaturallyAligned &&
+ Query.MMODescrs[0].SizeInBits % Query.MMODescrs[0].AlignInBits != 0)
+ continue;
+ return true;
+ }
+ return false;
+}
+
+static bool CheckTyN(unsigned N, const LegalityQuery &Query,
+ std::initializer_list<LLT> SupportedValues) {
+ for (auto &Val : SupportedValues)
+ if (Val == Query.Types[N])
+ return true;
+ return false;
+}
+
MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
using namespace TargetOpcode;
const LLT s1 = LLT::scalar(1);
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
+ const LLT v16s8 = LLT::vector(16, 8);
+ const LLT v8s16 = LLT::vector(8, 16);
+ const LLT v4s32 = LLT::vector(4, 32);
+ const LLT v2s64 = LLT::vector(2, 64);
const LLT p0 = LLT::pointer(0, 32);
- getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
+ getActionDefinitionsBuilder({G_SUB, G_MUL})
.legalFor({s32})
.clampScalar(0, s32, s32);
+ getActionDefinitionsBuilder(G_ADD)
+ .legalIf([=, &ST](const LegalityQuery &Query) {
+ if (CheckTyN(0, Query, {s32}))
+ return true;
+ if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
+ return true;
+ return false;
+ })
+ .clampScalar(0, s32, s32);
+
getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO})
.lowerFor({{s32, s1}});
@@ -36,13 +83,26 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.maxScalar(0, s32);
getActionDefinitionsBuilder({G_LOAD, G_STORE})
- .legalForTypesWithMemDesc({{s32, p0, 8, 8},
- {s32, p0, 16, 8},
- {s32, p0, 32, 8},
- {s64, p0, 64, 8},
- {p0, p0, 32, 8}})
+ .legalIf([=, &ST](const LegalityQuery &Query) {
+ if (CheckTy0Ty1MemSizeAlign(Query, {{s32, p0, 8, ST.hasMips32r6()},
+ {s32, p0, 16, ST.hasMips32r6()},
+ {s32, p0, 32, ST.hasMips32r6()},
+ {p0, p0, 32, ST.hasMips32r6()},
+ {s64, p0, 64, ST.hasMips32r6()}}))
+ return true;
+ if (ST.hasMSA() &&
+ CheckTy0Ty1MemSizeAlign(Query, {{v16s8, p0, 128, false},
+ {v8s16, p0, 128, false},
+ {v4s32, p0, 128, false},
+ {v2s64, p0, 128, false}}))
+ return true;
+ return false;
+ })
.minScalar(0, s32);
+ getActionDefinitionsBuilder(G_IMPLICIT_DEF)
+ .legalFor({s32, s64});
+
getActionDefinitionsBuilder(G_UNMERGE_VALUES)
.legalFor({{s32, s64}});
@@ -50,9 +110,17 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.legalFor({{s64, s32}});
getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD})
- .legalForTypesWithMemDesc({{s32, p0, 8, 8},
- {s32, p0, 16, 8}})
- .minScalar(0, s32);
+ .legalForTypesWithMemDesc({{s32, p0, 8, 8},
+ {s32, p0, 16, 8}})
+ .clampScalar(0, s32, s32);
+
+ getActionDefinitionsBuilder({G_ZEXT, G_SEXT})
+ .legalIf([](const LegalityQuery &Query) { return false; })
+ .maxScalar(0, s32);
+
+ getActionDefinitionsBuilder(G_TRUNC)
+ .legalIf([](const LegalityQuery &Query) { return false; })
+ .maxScalar(1, s32);
getActionDefinitionsBuilder(G_SELECT)
.legalForCartesianProduct({p0, s32, s64}, {s32})
@@ -63,6 +131,12 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.legalFor({s32})
.minScalar(0, s32);
+ getActionDefinitionsBuilder(G_BRJT)
+ .legalFor({{p0, s32}});
+
+ getActionDefinitionsBuilder(G_BRINDIRECT)
+ .legalFor({p0});
+
getActionDefinitionsBuilder(G_PHI)
.legalFor({p0, s32, s64})
.minScalar(0, s32);
@@ -77,8 +151,9 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.libcallFor({s64});
getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
- .legalFor({s32, s32})
- .minScalar(1, s32);
+ .legalFor({{s32, s32}})
+ .clampScalar(1, s32, s32)
+ .clampScalar(0, s32, s32);
getActionDefinitionsBuilder(G_ICMP)
.legalForCartesianProduct({s32}, {s32, p0})
@@ -89,15 +164,24 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.legalFor({s32})
.clampScalar(0, s32, s32);
- getActionDefinitionsBuilder(G_GEP)
+ getActionDefinitionsBuilder({G_GEP, G_INTTOPTR})
.legalFor({{p0, s32}});
+ getActionDefinitionsBuilder(G_PTRTOINT)
+ .legalFor({{s32, p0}});
+
getActionDefinitionsBuilder(G_FRAME_INDEX)
.legalFor({p0});
- getActionDefinitionsBuilder(G_GLOBAL_VALUE)
+ getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE})
.legalFor({p0});
+ getActionDefinitionsBuilder(G_DYN_STACKALLOC)
+ .lowerFor({{p0, s32}});
+
+ getActionDefinitionsBuilder(G_VASTART)
+ .legalFor({p0});
+
// FP instructions
getActionDefinitionsBuilder(G_FCONSTANT)
.legalFor({s32, s64});
@@ -126,6 +210,7 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
getActionDefinitionsBuilder(G_FPTOUI)
.libcallForCartesianProduct({s64}, {s64, s32})
+ .lowerForCartesianProduct({s32}, {s64, s32})
.minScalar(0, s32);
// Int to FP conversion instructions
@@ -136,8 +221,11 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
getActionDefinitionsBuilder(G_UITOFP)
.libcallForCartesianProduct({s64, s32}, {s64})
+ .customForCartesianProduct({s64, s32}, {s32})
.minScalar(1, s32);
+ getActionDefinitionsBuilder(G_SEXT_INREG).lower();
+
computeTables();
verify(*ST.getInstrInfo());
}
@@ -150,6 +238,134 @@ bool MipsLegalizerInfo::legalizeCustom(MachineInstr &MI,
using namespace TargetOpcode;
MIRBuilder.setInstr(MI);
+ const MipsSubtarget &STI =
+ static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
+ const LLT s32 = LLT::scalar(32);
+ const LLT s64 = LLT::scalar(64);
- return false;
+ switch (MI.getOpcode()) {
+ case G_UITOFP: {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+
+ if (SrcTy != s32)
+ return false;
+ if (DstTy != s32 && DstTy != s64)
+ return false;
+
+ // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert
+ // unsigned to double. Mantissa has 52 bits so we use following trick:
+ // First make floating point bit mask 0x43300000ABCDEFGH.
+ // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 .
+ // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it.
+ // Done. Trunc double to float if needed.
+
+ MachineInstrBuilder Bitcast = MIRBuilder.buildInstr(
+ STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64, {s64},
+ {Src, MIRBuilder.buildConstant(s32, UINT32_C(0x43300000))});
+ Bitcast.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
+ *STI.getRegBankInfo());
+
+ MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant(
+ s64, BitsToDouble(UINT64_C(0x4330000000000000)));
+
+ if (DstTy == s64)
+ MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP);
+ else {
+ MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP);
+ MIRBuilder.buildFPTrunc(Dst, ResF64);
+ }
+
+ MI.eraseFromParent();
+ break;
+ }
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode,
+ MachineIRBuilder &MIRBuilder,
+ const MipsSubtarget &ST) {
+ assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
+ if (!MIRBuilder.buildInstr(Opcode)
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
+ *ST.getRegBankInfo()))
+ return false;
+ MI.eraseFromParent();
+ return true;
+}
+
+static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
+ MachineIRBuilder &MIRBuilder,
+ const MipsSubtarget &ST) {
+ assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
+ MIRBuilder.buildInstr(Opcode)
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3));
+ MI.eraseFromParent();
+ return true;
+}
+
+bool MipsLegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &MIRBuilder) const {
+ const MipsSubtarget &ST =
+ static_cast<const MipsSubtarget &>(MI.getMF()->getSubtarget());
+ const MipsInstrInfo &TII = *ST.getInstrInfo();
+ const MipsRegisterInfo &TRI = *ST.getRegisterInfo();
+ const RegisterBankInfo &RBI = *ST.getRegBankInfo();
+ MIRBuilder.setInstr(MI);
+
+ switch (MI.getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memset:
+ case Intrinsic::memmove:
+ if (createMemLibcall(MIRBuilder, MRI, MI) ==
+ LegalizerHelper::UnableToLegalize)
+ return false;
+ MI.eraseFromParent();
+ return true;
+ case Intrinsic::trap: {
+ MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
+ MI.eraseFromParent();
+ return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI);
+ }
+ case Intrinsic::vacopy: {
+ Register Tmp = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
+ MachinePointerInfo MPO;
+ MIRBuilder.buildLoad(Tmp, MI.getOperand(2),
+ *MI.getMF()->getMachineMemOperand(
+ MPO, MachineMemOperand::MOLoad, 4, 4));
+ MIRBuilder.buildStore(Tmp, MI.getOperand(1),
+ *MI.getMF()->getMachineMemOperand(
+ MPO, MachineMemOperand::MOStore, 4, 4));
+ MI.eraseFromParent();
+ return true;
+ }
+ case Intrinsic::mips_addv_b:
+ case Intrinsic::mips_addv_h:
+ case Intrinsic::mips_addv_w:
+ case Intrinsic::mips_addv_d:
+ return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST);
+ case Intrinsic::mips_addvi_b:
+ return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST);
+ case Intrinsic::mips_addvi_h:
+ return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST);
+ case Intrinsic::mips_addvi_w:
+ return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST);
+ case Intrinsic::mips_addvi_d:
+ return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST);
+ default:
+ break;
+ }
+ return true;
}
diff --git a/lib/Target/Mips/MipsLegalizerInfo.h b/lib/Target/Mips/MipsLegalizerInfo.h
index e5021e081890..9696c262b2db 100644
--- a/lib/Target/Mips/MipsLegalizerInfo.h
+++ b/lib/Target/Mips/MipsLegalizerInfo.h
@@ -28,6 +28,9 @@ public:
bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder,
GISelChangeObserver &Observer) const override;
+
+ bool legalizeIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &MIRBuilder) const override;
};
} // end namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td
index 907ed9ef746f..f585d9c1a148 100644
--- a/lib/Target/Mips/MipsMSAInstrInfo.td
+++ b/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -60,6 +60,11 @@ def immZExt2Ptr : ImmLeaf<iPTR, [{return isUInt<2>(Imm);}]>;
def immZExt3Ptr : ImmLeaf<iPTR, [{return isUInt<3>(Imm);}]>;
def immZExt4Ptr : ImmLeaf<iPTR, [{return isUInt<4>(Imm);}]>;
+def timmZExt1Ptr : TImmLeaf<iPTR, [{return isUInt<1>(Imm);}]>;
+def timmZExt2Ptr : TImmLeaf<iPTR, [{return isUInt<2>(Imm);}]>;
+def timmZExt3Ptr : TImmLeaf<iPTR, [{return isUInt<3>(Imm);}]>;
+def timmZExt4Ptr : TImmLeaf<iPTR, [{return isUInt<4>(Imm);}]>;
+
// Operands
def immZExt2Lsa : ImmLeaf<i32, [{return isUInt<2>(Imm - 1);}]>;
@@ -1270,7 +1275,7 @@ class MSA_I8_SHF_DESC_BASE<string instr_asm, RegisterOperand ROWD,
dag OutOperandList = (outs ROWD:$wd);
dag InOperandList = (ins ROWS:$ws, uimm8:$u8);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
- list<dag> Pattern = [(set ROWD:$wd, (MipsSHF immZExt8:$u8, ROWS:$ws))];
+ list<dag> Pattern = [(set ROWD:$wd, (MipsSHF timmZExt8:$u8, ROWS:$ws))];
InstrItinClass Itinerary = itin;
}
@@ -2299,13 +2304,13 @@ class INSERT_FW_VIDX64_PSEUDO_DESC :
class INSERT_FD_VIDX64_PSEUDO_DESC :
MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v2f64, MSA128DOpnd, FGR64Opnd, GPR64Opnd>;
-class INSVE_B_DESC : MSA_INSVE_DESC_BASE<"insve.b", insve_v16i8, uimm4, immZExt4,
+class INSVE_B_DESC : MSA_INSVE_DESC_BASE<"insve.b", insve_v16i8, uimm4, timmZExt4,
MSA128BOpnd>;
-class INSVE_H_DESC : MSA_INSVE_DESC_BASE<"insve.h", insve_v8i16, uimm3, immZExt3,
+class INSVE_H_DESC : MSA_INSVE_DESC_BASE<"insve.h", insve_v8i16, uimm3, timmZExt3,
MSA128HOpnd>;
-class INSVE_W_DESC : MSA_INSVE_DESC_BASE<"insve.w", insve_v4i32, uimm2, immZExt2,
+class INSVE_W_DESC : MSA_INSVE_DESC_BASE<"insve.w", insve_v4i32, uimm2, timmZExt2,
MSA128WOpnd>;
-class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", insve_v2i64, uimm1, immZExt1,
+class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", insve_v2i64, uimm1, timmZExt1,
MSA128DOpnd>;
class LD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
@@ -2518,22 +2523,22 @@ class PCNT_W_DESC : MSA_2R_DESC_BASE<"pcnt.w", ctpop, MSA128WOpnd>;
class PCNT_D_DESC : MSA_2R_DESC_BASE<"pcnt.d", ctpop, MSA128DOpnd>;
class SAT_S_B_DESC : MSA_BIT_X_DESC_BASE<"sat_s.b", int_mips_sat_s_b, uimm3,
- immZExt3, MSA128BOpnd>;
+ timmZExt3, MSA128BOpnd>;
class SAT_S_H_DESC : MSA_BIT_X_DESC_BASE<"sat_s.h", int_mips_sat_s_h, uimm4,
- immZExt4, MSA128HOpnd>;
+ timmZExt4, MSA128HOpnd>;
class SAT_S_W_DESC : MSA_BIT_X_DESC_BASE<"sat_s.w", int_mips_sat_s_w, uimm5,
- immZExt5, MSA128WOpnd>;
+ timmZExt5, MSA128WOpnd>;
class SAT_S_D_DESC : MSA_BIT_X_DESC_BASE<"sat_s.d", int_mips_sat_s_d, uimm6,
- immZExt6, MSA128DOpnd>;
+ timmZExt6, MSA128DOpnd>;
class SAT_U_B_DESC : MSA_BIT_X_DESC_BASE<"sat_u.b", int_mips_sat_u_b, uimm3,
- immZExt3, MSA128BOpnd>;
+ timmZExt3, MSA128BOpnd>;
class SAT_U_H_DESC : MSA_BIT_X_DESC_BASE<"sat_u.h", int_mips_sat_u_h, uimm4,
- immZExt4, MSA128HOpnd>;
+ timmZExt4, MSA128HOpnd>;
class SAT_U_W_DESC : MSA_BIT_X_DESC_BASE<"sat_u.w", int_mips_sat_u_w, uimm5,
- immZExt5, MSA128WOpnd>;
+ timmZExt5, MSA128WOpnd>;
class SAT_U_D_DESC : MSA_BIT_X_DESC_BASE<"sat_u.d", int_mips_sat_u_d, uimm6,
- immZExt6, MSA128DOpnd>;
+ timmZExt6, MSA128DOpnd>;
class SHF_B_DESC : MSA_I8_SHF_DESC_BASE<"shf.b", MSA128BOpnd>;
class SHF_H_DESC : MSA_I8_SHF_DESC_BASE<"shf.h", MSA128HOpnd>;
@@ -2546,16 +2551,16 @@ class SLD_D_DESC : MSA_3R_SLD_DESC_BASE<"sld.d", int_mips_sld_d, MSA128DOpnd>;
class SLDI_B_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.b", int_mips_sldi_b,
MSA128BOpnd, MSA128BOpnd, uimm4,
- immZExt4>;
+ timmZExt4>;
class SLDI_H_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.h", int_mips_sldi_h,
MSA128HOpnd, MSA128HOpnd, uimm3,
- immZExt3>;
+ timmZExt3>;
class SLDI_W_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.w", int_mips_sldi_w,
MSA128WOpnd, MSA128WOpnd, uimm2,
- immZExt2>;
+ timmZExt2>;
class SLDI_D_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.d", int_mips_sldi_d,
MSA128DOpnd, MSA128DOpnd, uimm1,
- immZExt1>;
+ timmZExt1>;
class SLL_B_DESC : MSA_3R_DESC_BASE<"sll.b", shl, MSA128BOpnd>;
class SLL_H_DESC : MSA_3R_DESC_BASE<"sll.h", shl, MSA128HOpnd>;
@@ -2609,13 +2614,13 @@ class SRAR_W_DESC : MSA_3R_DESC_BASE<"srar.w", int_mips_srar_w, MSA128WOpnd>;
class SRAR_D_DESC : MSA_3R_DESC_BASE<"srar.d", int_mips_srar_d, MSA128DOpnd>;
class SRARI_B_DESC : MSA_BIT_X_DESC_BASE<"srari.b", int_mips_srari_b, uimm3,
- immZExt3, MSA128BOpnd>;
+ timmZExt3, MSA128BOpnd>;
class SRARI_H_DESC : MSA_BIT_X_DESC_BASE<"srari.h", int_mips_srari_h, uimm4,
- immZExt4, MSA128HOpnd>;
+ timmZExt4, MSA128HOpnd>;
class SRARI_W_DESC : MSA_BIT_X_DESC_BASE<"srari.w", int_mips_srari_w, uimm5,
- immZExt5, MSA128WOpnd>;
+ timmZExt5, MSA128WOpnd>;
class SRARI_D_DESC : MSA_BIT_X_DESC_BASE<"srari.d", int_mips_srari_d, uimm6,
- immZExt6, MSA128DOpnd>;
+ timmZExt6, MSA128DOpnd>;
class SRL_B_DESC : MSA_3R_DESC_BASE<"srl.b", srl, MSA128BOpnd>;
class SRL_H_DESC : MSA_3R_DESC_BASE<"srl.h", srl, MSA128HOpnd>;
@@ -2637,13 +2642,13 @@ class SRLR_W_DESC : MSA_3R_DESC_BASE<"srlr.w", int_mips_srlr_w, MSA128WOpnd>;
class SRLR_D_DESC : MSA_3R_DESC_BASE<"srlr.d", int_mips_srlr_d, MSA128DOpnd>;
class SRLRI_B_DESC : MSA_BIT_X_DESC_BASE<"srlri.b", int_mips_srlri_b, uimm3,
- immZExt3, MSA128BOpnd>;
+ timmZExt3, MSA128BOpnd>;
class SRLRI_H_DESC : MSA_BIT_X_DESC_BASE<"srlri.h", int_mips_srlri_h, uimm4,
- immZExt4, MSA128HOpnd>;
+ timmZExt4, MSA128HOpnd>;
class SRLRI_W_DESC : MSA_BIT_X_DESC_BASE<"srlri.w", int_mips_srlri_w, uimm5,
- immZExt5, MSA128WOpnd>;
+ timmZExt5, MSA128WOpnd>;
class SRLRI_D_DESC : MSA_BIT_X_DESC_BASE<"srlri.d", int_mips_srlri_d, uimm6,
- immZExt6, MSA128DOpnd>;
+ timmZExt6, MSA128DOpnd>;
class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
ValueType TyNode, RegisterOperand ROWD,
diff --git a/lib/Target/Mips/MipsOptimizePICCall.cpp b/lib/Target/Mips/MipsOptimizePICCall.cpp
index 5ef07a2d283e..8bd64ff6cb27 100644
--- a/lib/Target/Mips/MipsOptimizePICCall.cpp
+++ b/lib/Target/Mips/MipsOptimizePICCall.cpp
@@ -127,8 +127,7 @@ static MachineOperand *getCallTargetRegOpnd(MachineInstr &MI) {
MachineOperand &MO = MI.getOperand(0);
- if (!MO.isReg() || !MO.isUse() ||
- !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (!MO.isReg() || !MO.isUse() || !Register::isVirtualRegister(MO.getReg()))
return nullptr;
return &MO;
@@ -152,7 +151,7 @@ static void setCallTargetReg(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I) {
MachineFunction &MF = *MBB->getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- unsigned SrcReg = I->getOperand(0).getReg();
+ Register SrcReg = I->getOperand(0).getReg();
unsigned DstReg = getRegTy(SrcReg, MF) == MVT::i32 ? Mips::T9 : Mips::T9_64;
BuildMI(*MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), DstReg)
.addReg(SrcReg);
diff --git a/lib/Target/Mips/MipsPfmCounters.td b/lib/Target/Mips/MipsPfmCounters.td
new file mode 100644
index 000000000000..c7779b474b91
--- /dev/null
+++ b/lib/Target/Mips/MipsPfmCounters.td
@@ -0,0 +1,18 @@
+//===-- MipsPfmCounters.td - Mips Hardware Counters --------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the available hardware counters for Mips.
+//
+//===----------------------------------------------------------------------===//
+
+def CpuCyclesPfmCounter : PfmCounter<"CYCLES">;
+
+def DefaultPfmCounters : ProcPfmCounters {
+ let CycleCounter = CpuCyclesPfmCounter;
+}
+def : PfmCountersDefaultBinding<DefaultPfmCounters>;
diff --git a/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
index 85076590d407..ace0735652bd 100644
--- a/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
+++ b/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -27,7 +27,8 @@ class MipsPreLegalizerCombinerInfo : public CombinerInfo {
public:
MipsPreLegalizerCombinerInfo()
: CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
- /*LegalizerInfo*/ nullptr) {}
+ /*LegalizerInfo*/ nullptr, /*EnableOpt*/ false,
+ /*EnableOptSize*/ false, /*EnableMinSize*/ false) {}
virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
MachineIRBuilder &B) const override;
};
diff --git a/lib/Target/Mips/MipsRegisterBankInfo.cpp b/lib/Target/Mips/MipsRegisterBankInfo.cpp
index d8bcf16afd50..d334366e727c 100644
--- a/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -12,6 +12,7 @@
#include "MipsRegisterBankInfo.h"
#include "MipsInstrInfo.h"
+#include "MipsTargetMachine.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
@@ -27,20 +28,23 @@ enum PartialMappingIdx {
PMI_GPR,
PMI_SPR,
PMI_DPR,
+ PMI_MSA,
PMI_Min = PMI_GPR,
};
RegisterBankInfo::PartialMapping PartMappings[]{
{0, 32, GPRBRegBank},
{0, 32, FPRBRegBank},
- {0, 64, FPRBRegBank}
+ {0, 64, FPRBRegBank},
+ {0, 128, FPRBRegBank}
};
enum ValueMappingIdx {
InvalidIdx = 0,
GPRIdx = 1,
SPRIdx = 4,
- DPRIdx = 7
+ DPRIdx = 7,
+ MSAIdx = 10
};
RegisterBankInfo::ValueMapping ValueMappings[] = {
@@ -50,14 +54,18 @@ RegisterBankInfo::ValueMapping ValueMappings[] = {
{&PartMappings[PMI_GPR - PMI_Min], 1},
{&PartMappings[PMI_GPR - PMI_Min], 1},
{&PartMappings[PMI_GPR - PMI_Min], 1},
- // up to 3 ops operands FPRs - single precission
+ // up to 3 operands in FPRs - single precission
{&PartMappings[PMI_SPR - PMI_Min], 1},
{&PartMappings[PMI_SPR - PMI_Min], 1},
{&PartMappings[PMI_SPR - PMI_Min], 1},
- // up to 3 ops operands FPRs - double precission
+ // up to 3 operands in FPRs - double precission
{&PartMappings[PMI_DPR - PMI_Min], 1},
{&PartMappings[PMI_DPR - PMI_Min], 1},
- {&PartMappings[PMI_DPR - PMI_Min], 1}
+ {&PartMappings[PMI_DPR - PMI_Min], 1},
+ // up to 3 operands in FPRs - MSA
+ {&PartMappings[PMI_MSA - PMI_Min], 1},
+ {&PartMappings[PMI_MSA - PMI_Min], 1},
+ {&PartMappings[PMI_MSA - PMI_Min], 1}
};
} // end namespace Mips
@@ -86,6 +94,10 @@ const RegisterBank &MipsRegisterBankInfo::getRegBankFromRegClass(
case Mips::FGR32RegClassID:
case Mips::FGR64RegClassID:
case Mips::AFGR64RegClassID:
+ case Mips::MSA128BRegClassID:
+ case Mips::MSA128HRegClassID:
+ case Mips::MSA128WRegClassID:
+ case Mips::MSA128DRegClassID:
return getRegBank(Mips::FPRBRegBankID);
default:
llvm_unreachable("Register class not supported");
@@ -149,6 +161,7 @@ static bool isAmbiguous(unsigned Opc) {
case TargetOpcode::G_STORE:
case TargetOpcode::G_PHI:
case TargetOpcode::G_SELECT:
+ case TargetOpcode::G_IMPLICIT_DEF:
return true;
default:
return false;
@@ -163,8 +176,7 @@ void MipsRegisterBankInfo::AmbiguousRegDefUseContainer::addDefUses(
MachineInstr *NonCopyInstr = skipCopiesOutgoing(&UseMI);
// Copy with many uses.
if (NonCopyInstr->getOpcode() == TargetOpcode::COPY &&
- !TargetRegisterInfo::isPhysicalRegister(
- NonCopyInstr->getOperand(0).getReg()))
+ !Register::isPhysicalRegister(NonCopyInstr->getOperand(0).getReg()))
addDefUses(NonCopyInstr->getOperand(0).getReg(), MRI);
else
DefUses.push_back(skipCopiesOutgoing(&UseMI));
@@ -186,7 +198,7 @@ MipsRegisterBankInfo::AmbiguousRegDefUseContainer::skipCopiesOutgoing(
const MachineRegisterInfo &MRI = MF.getRegInfo();
MachineInstr *Ret = MI;
while (Ret->getOpcode() == TargetOpcode::COPY &&
- !TargetRegisterInfo::isPhysicalRegister(Ret->getOperand(0).getReg()) &&
+ !Register::isPhysicalRegister(Ret->getOperand(0).getReg()) &&
MRI.hasOneUse(Ret->getOperand(0).getReg())) {
Ret = &(*MRI.use_instr_begin(Ret->getOperand(0).getReg()));
}
@@ -200,7 +212,7 @@ MipsRegisterBankInfo::AmbiguousRegDefUseContainer::skipCopiesIncoming(
const MachineRegisterInfo &MRI = MF.getRegInfo();
MachineInstr *Ret = MI;
while (Ret->getOpcode() == TargetOpcode::COPY &&
- !TargetRegisterInfo::isPhysicalRegister(Ret->getOperand(1).getReg()))
+ !Register::isPhysicalRegister(Ret->getOperand(1).getReg()))
Ret = MRI.getVRegDef(Ret->getOperand(1).getReg());
return Ret;
}
@@ -231,6 +243,9 @@ MipsRegisterBankInfo::AmbiguousRegDefUseContainer::AmbiguousRegDefUseContainer(
addUseDef(MI->getOperand(2).getReg(), MRI);
addUseDef(MI->getOperand(3).getReg(), MRI);
}
+
+ if (MI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
+ addDefUses(MI->getOperand(0).getReg(), MRI);
}
bool MipsRegisterBankInfo::TypeInfoForMF::visit(
@@ -318,8 +333,7 @@ void MipsRegisterBankInfo::TypeInfoForMF::setTypes(const MachineInstr *MI,
void MipsRegisterBankInfo::TypeInfoForMF::setTypesAccordingToPhysicalRegister(
const MachineInstr *MI, const MachineInstr *CopyInst, unsigned Op) {
- assert((TargetRegisterInfo::isPhysicalRegister(
- CopyInst->getOperand(Op).getReg())) &&
+ assert((Register::isPhysicalRegister(CopyInst->getOperand(Op).getReg())) &&
"Copies of non physical registers should not be considered here.\n");
const MachineFunction &MF = *CopyInst->getMF();
@@ -353,6 +367,31 @@ void MipsRegisterBankInfo::TypeInfoForMF::cleanupIfNewFunction(
}
}
+static const MipsRegisterBankInfo::ValueMapping *
+getMSAMapping(const MachineFunction &MF) {
+ assert(static_cast<const MipsSubtarget &>(MF.getSubtarget()).hasMSA() &&
+ "MSA mapping not available on target without MSA.");
+ return &Mips::ValueMappings[Mips::MSAIdx];
+}
+
+static const MipsRegisterBankInfo::ValueMapping *getFprbMapping(unsigned Size) {
+ return Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
+ : &Mips::ValueMappings[Mips::DPRIdx];
+}
+
+static const unsigned CustomMappingID = 1;
+
+// Only 64 bit mapping is available in fprb and will be marked as custom, i.e.
+// will be split into two 32 bit registers in gprb.
+static const MipsRegisterBankInfo::ValueMapping *
+getGprbOrCustomMapping(unsigned Size, unsigned &MappingID) {
+ if (Size == 32)
+ return &Mips::ValueMappings[Mips::GPRIdx];
+
+ MappingID = CustomMappingID;
+ return &Mips::ValueMappings[Mips::DPRIdx];
+}
+
const RegisterBankInfo::InstructionMapping &
MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
@@ -377,17 +416,35 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned NumOperands = MI.getNumOperands();
const ValueMapping *OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx];
unsigned MappingID = DefaultMappingID;
- const unsigned CustomMappingID = 1;
+
+ // Check if LLT sizes match sizes of available register banks.
+ for (const MachineOperand &Op : MI.operands()) {
+ if (Op.isReg()) {
+ LLT RegTy = MRI.getType(Op.getReg());
+
+ if (RegTy.isScalar() &&
+ (RegTy.getSizeInBits() != 32 && RegTy.getSizeInBits() != 64))
+ return getInvalidInstructionMapping();
+
+ if (RegTy.isVector() && RegTy.getSizeInBits() != 128)
+ return getInvalidInstructionMapping();
+ }
+ }
+
+ const LLT Op0Ty = MRI.getType(MI.getOperand(0).getReg());
+ unsigned Op0Size = Op0Ty.getSizeInBits();
+ InstType InstTy = InstType::Integer;
switch (Opc) {
case G_TRUNC:
- case G_ADD:
case G_SUB:
case G_MUL:
case G_UMULH:
case G_ZEXTLOAD:
case G_SEXTLOAD:
case G_GEP:
+ case G_INTTOPTR:
+ case G_PTRTOINT:
case G_AND:
case G_OR:
case G_XOR:
@@ -398,66 +455,42 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case G_UDIV:
case G_SREM:
case G_UREM:
+ case G_BRINDIRECT:
+ case G_VASTART:
OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx];
break;
- case G_LOAD: {
- unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- InstType InstTy = InstType::Integer;
- if (!MRI.getType(MI.getOperand(0).getReg()).isPointer()) {
- InstTy = TI.determineInstType(&MI);
- }
-
- if (InstTy == InstType::FloatingPoint ||
- (Size == 64 && InstTy == InstType::Ambiguous)) { // fprb
- OperandsMapping =
- getOperandsMapping({Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx],
- &Mips::ValueMappings[Mips::GPRIdx]});
+ case G_ADD:
+ OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx];
+ if (Op0Size == 128)
+ OperandsMapping = getMSAMapping(MF);
+ break;
+ case G_STORE:
+ case G_LOAD:
+ if (Op0Size == 128) {
+ OperandsMapping = getOperandsMapping(
+ {getMSAMapping(MF), &Mips::ValueMappings[Mips::GPRIdx]});
break;
- } else { // gprb
- OperandsMapping =
- getOperandsMapping({Size <= 32 ? &Mips::ValueMappings[Mips::GPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx],
- &Mips::ValueMappings[Mips::GPRIdx]});
- if (Size == 64)
- MappingID = CustomMappingID;
}
- break;
- }
- case G_STORE: {
- unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- InstType InstTy = InstType::Integer;
- if (!MRI.getType(MI.getOperand(0).getReg()).isPointer()) {
+ if (!Op0Ty.isPointer())
InstTy = TI.determineInstType(&MI);
- }
if (InstTy == InstType::FloatingPoint ||
- (Size == 64 && InstTy == InstType::Ambiguous)) { // fprb
- OperandsMapping =
- getOperandsMapping({Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx],
- &Mips::ValueMappings[Mips::GPRIdx]});
- break;
- } else { // gprb
+ (Op0Size == 64 && InstTy == InstType::Ambiguous))
+ OperandsMapping = getOperandsMapping(
+ {getFprbMapping(Op0Size), &Mips::ValueMappings[Mips::GPRIdx]});
+ else
OperandsMapping =
- getOperandsMapping({Size <= 32 ? &Mips::ValueMappings[Mips::GPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx],
+ getOperandsMapping({getGprbOrCustomMapping(Op0Size, MappingID),
&Mips::ValueMappings[Mips::GPRIdx]});
- if (Size == 64)
- MappingID = CustomMappingID;
- }
+
break;
- }
- case G_PHI: {
- unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- InstType InstTy = InstType::Integer;
- if (!MRI.getType(MI.getOperand(0).getReg()).isPointer()) {
+ case G_PHI:
+ if (!Op0Ty.isPointer())
InstTy = TI.determineInstType(&MI);
- }
// PHI is copylike and should have one regbank in mapping for def register.
- if (InstTy == InstType::Integer && Size == 64) { // fprb
+ if (InstTy == InstType::Integer && Op0Size == 64) {
OperandsMapping =
getOperandsMapping({&Mips::ValueMappings[Mips::DPRIdx]});
return getInstructionMapping(CustomMappingID, /*Cost=*/1, OperandsMapping,
@@ -465,80 +498,63 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
// Use default handling for PHI, i.e. set reg bank of def operand to match
// register banks of use operands.
- const RegisterBankInfo::InstructionMapping &Mapping =
- getInstrMappingImpl(MI);
- return Mapping;
- }
+ return getInstrMappingImpl(MI);
case G_SELECT: {
- unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- InstType InstTy = InstType::Integer;
- if (!MRI.getType(MI.getOperand(0).getReg()).isPointer()) {
+ if (!Op0Ty.isPointer())
InstTy = TI.determineInstType(&MI);
- }
if (InstTy == InstType::FloatingPoint ||
- (Size == 64 && InstTy == InstType::Ambiguous)) { // fprb
- const RegisterBankInfo::ValueMapping *Bank =
- Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx];
+ (Op0Size == 64 && InstTy == InstType::Ambiguous)) {
+ const RegisterBankInfo::ValueMapping *Bank = getFprbMapping(Op0Size);
OperandsMapping = getOperandsMapping(
{Bank, &Mips::ValueMappings[Mips::GPRIdx], Bank, Bank});
break;
- } else { // gprb
+ } else {
const RegisterBankInfo::ValueMapping *Bank =
- Size <= 32 ? &Mips::ValueMappings[Mips::GPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx];
+ getGprbOrCustomMapping(Op0Size, MappingID);
OperandsMapping = getOperandsMapping(
{Bank, &Mips::ValueMappings[Mips::GPRIdx], Bank, Bank});
- if (Size == 64)
- MappingID = CustomMappingID;
}
break;
}
- case G_UNMERGE_VALUES: {
+ case G_IMPLICIT_DEF:
+ if (!Op0Ty.isPointer())
+ InstTy = TI.determineInstType(&MI);
+
+ if (InstTy == InstType::FloatingPoint)
+ OperandsMapping = getFprbMapping(Op0Size);
+ else
+ OperandsMapping = getGprbOrCustomMapping(Op0Size, MappingID);
+
+ break;
+ case G_UNMERGE_VALUES:
OperandsMapping = getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx],
&Mips::ValueMappings[Mips::GPRIdx],
&Mips::ValueMappings[Mips::DPRIdx]});
MappingID = CustomMappingID;
break;
- }
- case G_MERGE_VALUES: {
+ case G_MERGE_VALUES:
OperandsMapping = getOperandsMapping({&Mips::ValueMappings[Mips::DPRIdx],
&Mips::ValueMappings[Mips::GPRIdx],
&Mips::ValueMappings[Mips::GPRIdx]});
MappingID = CustomMappingID;
break;
- }
case G_FADD:
case G_FSUB:
case G_FMUL:
case G_FDIV:
case G_FABS:
- case G_FSQRT:{
- unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- assert((Size == 32 || Size == 64) && "Unsupported floating point size");
- OperandsMapping = Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx];
+ case G_FSQRT:
+ OperandsMapping = getFprbMapping(Op0Size);
break;
- }
- case G_FCONSTANT: {
- unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- assert((Size == 32 || Size == 64) && "Unsupported floating point size");
- const RegisterBankInfo::ValueMapping *FPRValueMapping =
- Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx];
- OperandsMapping = getOperandsMapping({FPRValueMapping, nullptr});
+ case G_FCONSTANT:
+ OperandsMapping = getOperandsMapping({getFprbMapping(Op0Size), nullptr});
break;
- }
case G_FCMP: {
- unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
- assert((Size == 32 || Size == 64) && "Unsupported floating point size");
- const RegisterBankInfo::ValueMapping *FPRValueMapping =
- Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx];
+ unsigned Op2Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
OperandsMapping =
getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], nullptr,
- FPRValueMapping, FPRValueMapping});
+ getFprbMapping(Op2Size), getFprbMapping(Op2Size)});
break;
}
case G_FPEXT:
@@ -550,36 +566,31 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
&Mips::ValueMappings[Mips::DPRIdx]});
break;
case G_FPTOSI: {
+ assert((Op0Size == 32) && "Unsupported integer size");
unsigned SizeFP = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
- assert((MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 32) &&
- "Unsupported integer size");
- assert((SizeFP == 32 || SizeFP == 64) && "Unsupported floating point size");
- OperandsMapping = getOperandsMapping({
- &Mips::ValueMappings[Mips::GPRIdx],
- SizeFP == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx],
- });
+ OperandsMapping = getOperandsMapping(
+ {&Mips::ValueMappings[Mips::GPRIdx], getFprbMapping(SizeFP)});
break;
}
- case G_SITOFP: {
- unsigned SizeInt = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
- unsigned SizeFP = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- (void)SizeInt;
- assert((SizeInt == 32) && "Unsupported integer size");
- assert((SizeFP == 32 || SizeFP == 64) && "Unsupported floating point size");
- OperandsMapping =
- getOperandsMapping({SizeFP == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
- : &Mips::ValueMappings[Mips::DPRIdx],
- &Mips::ValueMappings[Mips::GPRIdx]});
+ case G_SITOFP:
+ assert((MRI.getType(MI.getOperand(1).getReg()).getSizeInBits() == 32) &&
+ "Unsupported integer size");
+ OperandsMapping = getOperandsMapping(
+ {getFprbMapping(Op0Size), &Mips::ValueMappings[Mips::GPRIdx]});
break;
- }
case G_CONSTANT:
case G_FRAME_INDEX:
case G_GLOBAL_VALUE:
+ case G_JUMP_TABLE:
case G_BRCOND:
OperandsMapping =
getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], nullptr});
break;
+ case G_BRJT:
+ OperandsMapping =
+ getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], nullptr,
+ &Mips::ValueMappings[Mips::GPRIdx]});
+ break;
case G_ICMP:
OperandsMapping =
getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], nullptr,
@@ -609,11 +620,41 @@ public:
};
} // end anonymous namespace
-/// Here we have to narrowScalar s64 operands to s32, combine away
-/// G_MERGE/G_UNMERGE and erase instructions that became dead in the process.
-/// We manually assign 32 bit gprb to register operands of all new instructions
-/// that got created in the process since they will not end up in RegBankSelect
-/// loop. Careful not to delete instruction after MI i.e. MI.getIterator()++.
+void MipsRegisterBankInfo::setRegBank(MachineInstr &MI,
+ MachineRegisterInfo &MRI) const {
+ Register Dest = MI.getOperand(0).getReg();
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_STORE:
+ // No def operands, skip this instruction.
+ break;
+ case TargetOpcode::G_CONSTANT:
+ case TargetOpcode::G_LOAD:
+ case TargetOpcode::G_SELECT:
+ case TargetOpcode::G_PHI:
+ case TargetOpcode::G_IMPLICIT_DEF: {
+ assert(MRI.getType(Dest) == LLT::scalar(32) && "Unexpected operand type.");
+ MRI.setRegBank(Dest, getRegBank(Mips::GPRBRegBankID));
+ break;
+ }
+ case TargetOpcode::G_GEP: {
+ assert(MRI.getType(Dest).isPointer() && "Unexpected operand type.");
+ MRI.setRegBank(Dest, getRegBank(Mips::GPRBRegBankID));
+ break;
+ }
+ default:
+ llvm_unreachable("Unexpected opcode.");
+ }
+}
+
+static void
+combineAwayG_UNMERGE_VALUES(LegalizationArtifactCombiner &ArtCombiner,
+ MachineInstr &MI) {
+ SmallVector<MachineInstr *, 2> DeadInstrs;
+ ArtCombiner.tryCombineMerges(MI, DeadInstrs);
+ for (MachineInstr *DeadMI : DeadInstrs)
+ DeadMI->eraseFromParent();
+}
+
void MipsRegisterBankInfo::applyMappingImpl(
const OperandsMapper &OpdMapper) const {
MachineInstr &MI = OpdMapper.getMI();
@@ -621,18 +662,19 @@ void MipsRegisterBankInfo::applyMappingImpl(
MachineIRBuilder B(MI);
MachineFunction *MF = MI.getMF();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
+ const LegalizerInfo &LegInfo = *MF->getSubtarget().getLegalizerInfo();
InstManager NewInstrObserver(NewInstrs);
GISelObserverWrapper WrapperObserver(&NewInstrObserver);
LegalizerHelper Helper(*MF, WrapperObserver, B);
- LegalizationArtifactCombiner ArtCombiner(
- B, MF->getRegInfo(), *MF->getSubtarget().getLegalizerInfo());
+ LegalizationArtifactCombiner ArtCombiner(B, MF->getRegInfo(), LegInfo);
switch (MI.getOpcode()) {
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
case TargetOpcode::G_PHI:
- case TargetOpcode::G_SELECT: {
+ case TargetOpcode::G_SELECT:
+ case TargetOpcode::G_IMPLICIT_DEF: {
Helper.narrowScalar(MI, 0, LLT::scalar(32));
// Handle new instructions.
while (!NewInstrs.empty()) {
@@ -640,35 +682,21 @@ void MipsRegisterBankInfo::applyMappingImpl(
// This is new G_UNMERGE that was created during narrowScalar and will
// not be considered for regbank selection. RegBankSelect for mips
// visits/makes corresponding G_MERGE first. Combine them here.
- if (NewMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
- SmallVector<MachineInstr *, 2> DeadInstrs;
- ArtCombiner.tryCombineMerges(*NewMI, DeadInstrs);
- for (MachineInstr *DeadMI : DeadInstrs)
- DeadMI->eraseFromParent();
- }
+ if (NewMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
+ combineAwayG_UNMERGE_VALUES(ArtCombiner, *NewMI);
// This G_MERGE will be combined away when its corresponding G_UNMERGE
// gets regBankSelected.
else if (NewMI->getOpcode() == TargetOpcode::G_MERGE_VALUES)
continue;
else
- // Manually set register banks for all register operands to 32 bit gprb.
- for (auto Op : NewMI->operands()) {
- if (Op.isReg()) {
- assert(MRI.getType(Op.getReg()).getSizeInBits() == 32 &&
- "Only 32 bit gprb is handled here.\n");
- MRI.setRegBank(Op.getReg(), getRegBank(Mips::GPRBRegBankID));
- }
- }
+ // Manually set register banks for def operands to 32 bit gprb.
+ setRegBank(*NewMI, MRI);
}
return;
}
- case TargetOpcode::G_UNMERGE_VALUES: {
- SmallVector<MachineInstr *, 2> DeadInstrs;
- ArtCombiner.tryCombineMerges(MI, DeadInstrs);
- for (MachineInstr *DeadMI : DeadInstrs)
- DeadMI->eraseFromParent();
+ case TargetOpcode::G_UNMERGE_VALUES:
+ combineAwayG_UNMERGE_VALUES(ArtCombiner, MI);
return;
- }
default:
break;
}
diff --git a/lib/Target/Mips/MipsRegisterBankInfo.h b/lib/Target/Mips/MipsRegisterBankInfo.h
index 176813c031ed..fa0f1c7bc941 100644
--- a/lib/Target/Mips/MipsRegisterBankInfo.h
+++ b/lib/Target/Mips/MipsRegisterBankInfo.h
@@ -38,8 +38,17 @@ public:
const InstructionMapping &
getInstrMapping(const MachineInstr &MI) const override;
+ /// Here we have to narrowScalar s64 operands to s32, combine away G_MERGE or
+ /// G_UNMERGE and erase instructions that became dead in the process. We
+ /// manually assign bank to def operand of all new instructions that were
+ /// created in the process since they will not end up in RegBankSelect loop.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
+ /// RegBankSelect determined that s64 operand is better to be split into two
+ /// s32 operands in gprb. Here we manually set register banks of def operands
+ /// of newly created instructions since they will not get regbankselected.
+ void setRegBank(MachineInstr &MI, MachineRegisterInfo &MRI) const;
+
private:
/// Some instructions are used with both floating point and integer operands.
/// We assign InstType to such instructions as it helps us to avoid cross bank
diff --git a/lib/Target/Mips/MipsRegisterBanks.td b/lib/Target/Mips/MipsRegisterBanks.td
index 14a0181f8f11..7d11475884ce 100644
--- a/lib/Target/Mips/MipsRegisterBanks.td
+++ b/lib/Target/Mips/MipsRegisterBanks.td
@@ -11,4 +11,4 @@
def GPRBRegBank : RegisterBank<"GPRB", [GPR32]>;
-def FPRBRegBank : RegisterBank<"FPRB", [FGR64, AFGR64]>;
+def FPRBRegBank : RegisterBank<"FPRB", [FGR64, AFGR64, MSA128D]>;
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index 4c6cc1ef771c..166ddea0431f 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -171,8 +171,8 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
- unsigned VR = MRI.createVirtualRegister(RC);
- unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
+ Register VR = MRI.createVirtualRegister(RC);
+ Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
@@ -186,8 +186,8 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
- unsigned VR = MRI.createVirtualRegister(RC);
- unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
+ Register VR = MRI.createVirtualRegister(RC);
+ Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
.addReg(Src, getKillRegState(I->getOperand(0).isKill()));
@@ -204,11 +204,11 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
- unsigned VR0 = MRI.createVirtualRegister(RC);
- unsigned VR1 = MRI.createVirtualRegister(RC);
- unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
- unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
- unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
+ Register VR0 = MRI.createVirtualRegister(RC);
+ Register VR1 = MRI.createVirtualRegister(RC);
+ Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
+ Register Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
+ Register Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
DebugLoc DL = I->getDebugLoc();
const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
@@ -229,9 +229,9 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
- unsigned VR0 = MRI.createVirtualRegister(RC);
- unsigned VR1 = MRI.createVirtualRegister(RC);
- unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
+ Register VR0 = MRI.createVirtualRegister(RC);
+ Register VR1 = MRI.createVirtualRegister(RC);
+ Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
DebugLoc DL = I->getDebugLoc();
@@ -242,7 +242,7 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
}
bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
- unsigned Src = I->getOperand(1).getReg();
+ Register Src = I->getOperand(1).getReg();
std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
if (!Opcodes.first)
@@ -262,11 +262,11 @@ bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst);
unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16;
const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
- unsigned VR0 = MRI.createVirtualRegister(RC);
- unsigned VR1 = MRI.createVirtualRegister(RC);
+ Register VR0 = MRI.createVirtualRegister(RC);
+ Register VR1 = MRI.createVirtualRegister(RC);
unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
- unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
- unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
+ Register DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
+ Register DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
DebugLoc DL = I->getDebugLoc();
BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
@@ -304,9 +304,9 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
// stack is used.
if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
&& I->getOperand(3).getReg() == Mips::SP) {
- unsigned DstReg = I->getOperand(0).getReg();
- unsigned LoReg = I->getOperand(1).getReg();
- unsigned HiReg = I->getOperand(2).getReg();
+ Register DstReg = I->getOperand(0).getReg();
+ Register LoReg = I->getOperand(1).getReg();
+ Register HiReg = I->getOperand(2).getReg();
// It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
// the cases where mthc1 is not available). 64-bit architectures and
@@ -346,7 +346,7 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
const MachineOperand &Op2 = I->getOperand(2);
if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) {
- unsigned DstReg = I->getOperand(0).getReg();
+ Register DstReg = I->getOperand(0).getReg();
BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg);
return true;
}
@@ -369,8 +369,8 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
// stack is used.
if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
&& I->getOperand(3).getReg() == Mips::SP) {
- unsigned DstReg = I->getOperand(0).getReg();
- unsigned SrcReg = Op1.getReg();
+ Register DstReg = I->getOperand(0).getReg();
+ Register SrcReg = Op1.getReg();
unsigned N = Op2.getImm();
int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
@@ -538,7 +538,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
if (RegInfo.needsStackRealignment(MF)) {
// addiu $Reg, $zero, -MaxAlignment
// andi $sp, $sp, $Reg
- unsigned VR = MF.getRegInfo().createVirtualRegister(RC);
+ Register VR = MF.getRegInfo().createVirtualRegister(RC);
assert(isInt<16>(MFI.getMaxAlignment()) &&
"Function's alignment size requirement is not supported.");
int MaxAlign = -(int)MFI.getMaxAlignment();
@@ -865,12 +865,15 @@ void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
MipsABIInfo ABI = STI.getABI();
+ unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
unsigned FP = ABI.GetFramePtr();
unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
- // Mark $fp as used if function has dedicated frame pointer.
- if (hasFP(MF))
+ // Mark $ra and $fp as used if function has dedicated frame pointer.
+ if (hasFP(MF)) {
+ setAliasRegs(MF, SavedRegs, RA);
setAliasRegs(MF, SavedRegs, FP);
+ }
// Mark $s7 as used if function has dedicated base pointer.
if (hasBP(MF))
setAliasRegs(MF, SavedRegs, BP);
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 703f99f37dd1..c8313240a678 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -124,6 +124,33 @@ bool MipsSEDAGToDAGISel::replaceUsesWithZeroReg(MachineRegisterInfo *MRI,
return true;
}
+void MipsSEDAGToDAGISel::emitMCountABI(MachineInstr &MI, MachineBasicBlock &MBB,
+ MachineFunction &MF) {
+ MachineInstrBuilder MIB(MF, &MI);
+ if (!Subtarget->isABI_O32()) { // N32, N64
+ // Save current return address.
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Mips::OR64))
+ .addDef(Mips::AT_64)
+ .addUse(Mips::RA_64, RegState::Undef)
+ .addUse(Mips::ZERO_64);
+ // Stops instruction above from being removed later on.
+ MIB.addUse(Mips::AT_64, RegState::Implicit);
+ } else { // O32
+ // Save current return address.
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Mips::OR))
+ .addDef(Mips::AT)
+ .addUse(Mips::RA, RegState::Undef)
+ .addUse(Mips::ZERO);
+ // _mcount pops 2 words from stack.
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Mips::ADDiu))
+ .addDef(Mips::SP)
+ .addUse(Mips::SP)
+ .addImm(-8);
+ // Stops first instruction above from being removed later on.
+ MIB.addUse(Mips::AT, RegState::Implicit);
+ }
+}
+
void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
MF.getInfo<MipsFunctionInfo>()->initGlobalBaseReg();
@@ -150,6 +177,24 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
if (Subtarget->isABI_FPXX() && !Subtarget->hasMTHC1())
MI.addOperand(MachineOperand::CreateReg(Mips::SP, false, true));
break;
+ case Mips::JAL:
+ case Mips::JAL_MM:
+ if (MI.getOperand(0).isGlobal() &&
+ MI.getOperand(0).getGlobal()->getGlobalIdentifier() == "_mcount")
+ emitMCountABI(MI, MBB, MF);
+ break;
+ case Mips::JALRPseudo:
+ case Mips::JALR64Pseudo:
+ case Mips::JALR16_MM:
+ if (MI.getOperand(2).isMCSymbol() &&
+ MI.getOperand(2).getMCSymbol()->getName() == "_mcount")
+ emitMCountABI(MI, MBB, MF);
+ break;
+ case Mips::JALR:
+ if (MI.getOperand(3).isMCSymbol() &&
+ MI.getOperand(3).getMCSymbol()->getName() == "_mcount")
+ emitMCountABI(MI, MBB, MF);
+ break;
default:
replaceUsesWithZeroReg(MRI, MI);
}
@@ -247,7 +292,8 @@ bool MipsSEDAGToDAGISel::selectAddrFrameIndexOffset(
Base = Addr.getOperand(0);
// If base is a FI, additional offset calculation is done in
// eliminateFrameIndex, otherwise we need to check the alignment
- if (OffsetToAlignment(CN->getZExtValue(), 1ull << ShiftAmount) != 0)
+ const Align Alignment(1ULL << ShiftAmount);
+ if (!isAligned(Alignment, CN->getZExtValue()))
return false;
}
@@ -719,7 +765,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
}
case ISD::ConstantFP: {
- ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Node);
+ auto *CN = cast<ConstantFPSDNode>(Node);
if (Node->getValueType(0) == MVT::f64 && CN->isExactlyValue(+0.0)) {
if (Subtarget->isGP64bit()) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
@@ -743,7 +789,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
}
case ISD::Constant: {
- const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node);
+ auto *CN = cast<ConstantSDNode>(Node);
int64_t Imm = CN->getSExtValue();
unsigned Size = CN->getValueSizeInBits(0);
@@ -969,7 +1015,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
break;
}
- SDNode *Res;
+ SDNode *Res = nullptr;
// If we have a signed 10 bit integer, we can splat it directly.
//
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h
index ce594e1fb4fa..39f665be571e 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -120,7 +120,7 @@ private:
/// power of 2.
bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const override;
/// Select constant vector splats whose value is a run of set bits
- /// ending at the most significant bit
+ /// ending at the most significant bit.
bool selectVSplatMaskL(SDValue N, SDValue &Imm) const override;
/// Select constant vector splats whose value is a run of set bits
/// starting at bit zero.
@@ -128,6 +128,10 @@ private:
bool trySelect(SDNode *Node) override;
+ // Emits proper ABI for _mcount profiling calls.
+ void emitMCountABI(MachineInstr &MI, MachineBasicBlock &MBB,
+ MachineFunction &MF);
+
void processFunctionAfterISel(MachineFunction &MF) override;
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp
index edf57a3840d1..5bd234f955ba 100644
--- a/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -71,8 +71,8 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
if (Subtarget.hasDSP() || Subtarget.hasMSA()) {
// Expand all truncating stores and extending loads.
- for (MVT VT0 : MVT::vector_valuetypes()) {
- for (MVT VT1 : MVT::vector_valuetypes()) {
+ for (MVT VT0 : MVT::fixedlen_vector_valuetypes()) {
+ for (MVT VT1 : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT0, VT1, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand);
@@ -327,6 +327,7 @@ addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
+ setOperationAction(ISD::UNDEF, Ty, Legal);
setOperationAction(ISD::ADD, Ty, Legal);
setOperationAction(ISD::AND, Ty, Legal);
@@ -2595,7 +2596,8 @@ static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
SDLoc DL(Op);
return DAG.getNode(MipsISD::SHF, DL, ResTy,
- DAG.getConstant(Imm, DL, MVT::i32), Op->getOperand(0));
+ DAG.getTargetConstant(Imm, DL, MVT::i32),
+ Op->getOperand(0));
}
/// Determine whether a range fits a regular pattern of values.
@@ -3062,13 +3064,13 @@ MipsSETargetLowering::emitBPOSGE32(MachineInstr &MI,
BuildMI(BB, DL, TII->get(Mips::BPOSGE32C_MMR3)).addMBB(TBB);
// Fill $FBB.
- unsigned VR2 = RegInfo.createVirtualRegister(RC);
+ Register VR2 = RegInfo.createVirtualRegister(RC);
BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
.addReg(Mips::ZERO).addImm(0);
BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
// Fill $TBB.
- unsigned VR1 = RegInfo.createVirtualRegister(RC);
+ Register VR1 = RegInfo.createVirtualRegister(RC);
BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
.addReg(Mips::ZERO).addImm(1);
@@ -3131,13 +3133,13 @@ MachineBasicBlock *MipsSETargetLowering::emitMSACBranchPseudo(
.addMBB(TBB);
// Fill $FBB.
- unsigned RD1 = RegInfo.createVirtualRegister(RC);
+ Register RD1 = RegInfo.createVirtualRegister(RC);
BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
.addReg(Mips::ZERO).addImm(0);
BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
// Fill $TBB.
- unsigned RD2 = RegInfo.createVirtualRegister(RC);
+ Register RD2 = RegInfo.createVirtualRegister(RC);
BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
.addReg(Mips::ZERO).addImm(1);
@@ -3169,8 +3171,8 @@ MipsSETargetLowering::emitCOPY_FW(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Fd = MI.getOperand(0).getReg();
- unsigned Ws = MI.getOperand(1).getReg();
+ Register Fd = MI.getOperand(0).getReg();
+ Register Ws = MI.getOperand(1).getReg();
unsigned Lane = MI.getOperand(2).getImm();
if (Lane == 0) {
@@ -3185,9 +3187,9 @@ MipsSETargetLowering::emitCOPY_FW(MachineInstr &MI,
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
} else {
- unsigned Wt = RegInfo.createVirtualRegister(
- Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
- &Mips::MSA128WEvensRegClass);
+ Register Wt = RegInfo.createVirtualRegister(
+ Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass
+ : &Mips::MSA128WEvensRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
@@ -3214,15 +3216,15 @@ MipsSETargetLowering::emitCOPY_FD(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- unsigned Fd = MI.getOperand(0).getReg();
- unsigned Ws = MI.getOperand(1).getReg();
+ Register Fd = MI.getOperand(0).getReg();
+ Register Ws = MI.getOperand(1).getReg();
unsigned Lane = MI.getOperand(2).getImm() * 2;
DebugLoc DL = MI.getDebugLoc();
if (Lane == 0)
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64);
else {
- unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+ Register Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64);
@@ -3244,13 +3246,13 @@ MipsSETargetLowering::emitINSERT_FW(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
- unsigned Wd_in = MI.getOperand(1).getReg();
+ Register Wd = MI.getOperand(0).getReg();
+ Register Wd_in = MI.getOperand(1).getReg();
unsigned Lane = MI.getOperand(2).getImm();
- unsigned Fs = MI.getOperand(3).getReg();
- unsigned Wt = RegInfo.createVirtualRegister(
- Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
- &Mips::MSA128WEvensRegClass);
+ Register Fs = MI.getOperand(3).getReg();
+ Register Wt = RegInfo.createVirtualRegister(
+ Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass
+ : &Mips::MSA128WEvensRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
.addImm(0)
@@ -3280,11 +3282,11 @@ MipsSETargetLowering::emitINSERT_FD(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
- unsigned Wd_in = MI.getOperand(1).getReg();
+ Register Wd = MI.getOperand(0).getReg();
+ Register Wd_in = MI.getOperand(1).getReg();
unsigned Lane = MI.getOperand(2).getImm();
- unsigned Fs = MI.getOperand(3).getReg();
- unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+ Register Fs = MI.getOperand(3).getReg();
+ Register Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
.addImm(0)
@@ -3326,10 +3328,10 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX(
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
- unsigned SrcVecReg = MI.getOperand(1).getReg();
- unsigned LaneReg = MI.getOperand(2).getReg();
- unsigned SrcValReg = MI.getOperand(3).getReg();
+ Register Wd = MI.getOperand(0).getReg();
+ Register SrcVecReg = MI.getOperand(1).getReg();
+ Register LaneReg = MI.getOperand(2).getReg();
+ Register SrcValReg = MI.getOperand(3).getReg();
const TargetRegisterClass *VecRC = nullptr;
// FIXME: This should be true for N32 too.
@@ -3370,7 +3372,7 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX(
}
if (IsFP) {
- unsigned Wt = RegInfo.createVirtualRegister(VecRC);
+ Register Wt = RegInfo.createVirtualRegister(VecRC);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
.addImm(0)
.addReg(SrcValReg)
@@ -3380,7 +3382,7 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX(
// Convert the lane index into a byte index
if (EltSizeInBytes != 1) {
- unsigned LaneTmp1 = RegInfo.createVirtualRegister(GPRRC);
+ Register LaneTmp1 = RegInfo.createVirtualRegister(GPRRC);
BuildMI(*BB, MI, DL, TII->get(ShiftOp), LaneTmp1)
.addReg(LaneReg)
.addImm(EltLog2Size);
@@ -3388,13 +3390,13 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX(
}
// Rotate bytes around so that the desired lane is element zero
- unsigned WdTmp1 = RegInfo.createVirtualRegister(VecRC);
+ Register WdTmp1 = RegInfo.createVirtualRegister(VecRC);
BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), WdTmp1)
.addReg(SrcVecReg)
.addReg(SrcVecReg)
.addReg(LaneReg, 0, SubRegIdx);
- unsigned WdTmp2 = RegInfo.createVirtualRegister(VecRC);
+ Register WdTmp2 = RegInfo.createVirtualRegister(VecRC);
if (IsFP) {
// Use insve.df to insert to element zero
BuildMI(*BB, MI, DL, TII->get(InsveOp), WdTmp2)
@@ -3413,7 +3415,7 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX(
// Rotate elements the rest of the way for a full rotation.
// sld.df inteprets $rt modulo the number of columns so we only need to negate
// the lane index to do this.
- unsigned LaneTmp2 = RegInfo.createVirtualRegister(GPRRC);
+ Register LaneTmp2 = RegInfo.createVirtualRegister(GPRRC);
BuildMI(*BB, MI, DL, TII->get(Subtarget.isABI_N64() ? Mips::DSUB : Mips::SUB),
LaneTmp2)
.addReg(Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO)
@@ -3440,12 +3442,12 @@ MipsSETargetLowering::emitFILL_FW(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
- unsigned Fs = MI.getOperand(1).getReg();
- unsigned Wt1 = RegInfo.createVirtualRegister(
+ Register Wd = MI.getOperand(0).getReg();
+ Register Fs = MI.getOperand(1).getReg();
+ Register Wt1 = RegInfo.createVirtualRegister(
Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass
: &Mips::MSA128WEvensRegClass);
- unsigned Wt2 = RegInfo.createVirtualRegister(
+ Register Wt2 = RegInfo.createVirtualRegister(
Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass
: &Mips::MSA128WEvensRegClass);
@@ -3475,10 +3477,10 @@ MipsSETargetLowering::emitFILL_FD(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
- unsigned Fs = MI.getOperand(1).getReg();
- unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
- unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+ Register Wd = MI.getOperand(0).getReg();
+ Register Fs = MI.getOperand(1).getReg();
+ Register Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+ Register Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
@@ -3509,8 +3511,8 @@ MipsSETargetLowering::emitST_F16_PSEUDO(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Ws = MI.getOperand(0).getReg();
- unsigned Rt = MI.getOperand(1).getReg();
+ Register Ws = MI.getOperand(0).getReg();
+ Register Rt = MI.getOperand(1).getReg();
const MachineMemOperand &MMO = **MI.memoperands_begin();
unsigned Imm = MMO.getOffset();
@@ -3522,11 +3524,11 @@ MipsSETargetLowering::emitST_F16_PSEUDO(MachineInstr &MI,
: (Subtarget.isABI_O32() ? &Mips::GPR32RegClass
: &Mips::GPR64RegClass);
const bool UsingMips32 = RC == &Mips::GPR32RegClass;
- unsigned Rs = RegInfo.createVirtualRegister(&Mips::GPR32RegClass);
+ Register Rs = RegInfo.createVirtualRegister(&Mips::GPR32RegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY_U_H), Rs).addReg(Ws).addImm(0);
if(!UsingMips32) {
- unsigned Tmp = RegInfo.createVirtualRegister(&Mips::GPR64RegClass);
+ Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR64RegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Tmp)
.addImm(0)
.addReg(Rs)
@@ -3564,7 +3566,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
+ Register Wd = MI.getOperand(0).getReg();
// Caution: A load via the GOT can expand to a GPR32 operand, a load via
// spill and reload can expand as a GPR64 operand. Examine the
@@ -3575,7 +3577,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI,
: &Mips::GPR64RegClass);
const bool UsingMips32 = RC == &Mips::GPR32RegClass;
- unsigned Rt = RegInfo.createVirtualRegister(RC);
+ Register Rt = RegInfo.createVirtualRegister(RC);
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt);
@@ -3583,7 +3585,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI,
MIB.add(MI.getOperand(i));
if(!UsingMips32) {
- unsigned Tmp = RegInfo.createVirtualRegister(&Mips::GPR32RegClass);
+ Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR32RegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Tmp).addReg(Rt, 0, Mips::sub_32);
Rt = Tmp;
}
@@ -3658,11 +3660,11 @@ MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Wd = MI.getOperand(0).getReg();
- unsigned Fs = MI.getOperand(1).getReg();
+ Register Wd = MI.getOperand(0).getReg();
+ Register Fs = MI.getOperand(1).getReg();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- unsigned Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
const TargetRegisterClass *GPRRC =
IsFGR64onMips64 ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
unsigned MFC1Opc = IsFGR64onMips64
@@ -3671,16 +3673,16 @@ MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI,
unsigned FILLOpc = IsFGR64onMips64 ? Mips::FILL_D : Mips::FILL_W;
// Perform the register class copy as mentioned above.
- unsigned Rtemp = RegInfo.createVirtualRegister(GPRRC);
+ Register Rtemp = RegInfo.createVirtualRegister(GPRRC);
BuildMI(*BB, MI, DL, TII->get(MFC1Opc), Rtemp).addReg(Fs);
BuildMI(*BB, MI, DL, TII->get(FILLOpc), Wtemp).addReg(Rtemp);
unsigned WPHI = Wtemp;
if (IsFGR64onMips32) {
- unsigned Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
+ Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
BuildMI(*BB, MI, DL, TII->get(Mips::MFHC1_D64), Rtemp2).addReg(Fs);
- unsigned Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
- unsigned Wtemp3 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ Register Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ Register Wtemp3 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_W), Wtemp2)
.addReg(Wtemp)
.addReg(Rtemp2)
@@ -3693,7 +3695,7 @@ MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI,
}
if (IsFGR64) {
- unsigned Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ Register Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::FEXDO_W), Wtemp2)
.addReg(WPHI)
.addReg(WPHI);
@@ -3817,8 +3819,8 @@ MipsSETargetLowering::emitFEXP2_W_1(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *RC = &Mips::MSA128WRegClass;
- unsigned Ws1 = RegInfo.createVirtualRegister(RC);
- unsigned Ws2 = RegInfo.createVirtualRegister(RC);
+ Register Ws1 = RegInfo.createVirtualRegister(RC);
+ Register Ws2 = RegInfo.createVirtualRegister(RC);
DebugLoc DL = MI.getDebugLoc();
// Splat 1.0 into a vector
@@ -3846,8 +3848,8 @@ MipsSETargetLowering::emitFEXP2_D_1(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *RC = &Mips::MSA128DRegClass;
- unsigned Ws1 = RegInfo.createVirtualRegister(RC);
- unsigned Ws2 = RegInfo.createVirtualRegister(RC);
+ Register Ws1 = RegInfo.createVirtualRegister(RC);
+ Register Ws2 = RegInfo.createVirtualRegister(RC);
DebugLoc DL = MI.getDebugLoc();
// Splat 1.0 into a vector
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index 4e49f5e7d9d1..2126a1bda493 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -628,7 +628,7 @@ unsigned MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
// The first instruction can be a LUi, which is different from other
// instructions (ADDiu, ORI and SLL) in that it does not have a register
// operand.
- unsigned Reg = RegInfo.createVirtualRegister(RC);
+ Register Reg = RegInfo.createVirtualRegister(RC);
if (Inst->Opc == LUi)
BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd));
@@ -734,9 +734,9 @@ void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB,
// Add lo/hi registers if the mtlo/hi instructions created have explicit
// def registers.
if (HasExplicitDef) {
- unsigned DstReg = I->getOperand(0).getReg();
- unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
- unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi);
+ Register DstReg = I->getOperand(0).getReg();
+ Register DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+ Register DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi);
LoInst.addReg(DstLo, RegState::Define);
HiInst.addReg(DstHi, RegState::Define);
}
@@ -773,14 +773,14 @@ void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
bool isMicroMips,
bool FP64) const {
- unsigned DstReg = I->getOperand(0).getReg();
- unsigned SrcReg = I->getOperand(1).getReg();
+ Register DstReg = I->getOperand(0).getReg();
+ Register SrcReg = I->getOperand(1).getReg();
unsigned N = I->getOperand(2).getImm();
DebugLoc dl = I->getDebugLoc();
assert(N < 2 && "Invalid immediate");
unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
- unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
+ Register SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
// FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
// in MipsSEFrameLowering.cpp.
@@ -815,7 +815,7 @@ void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
bool isMicroMips, bool FP64) const {
- unsigned DstReg = I->getOperand(0).getReg();
+ Register DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
DebugLoc dl = I->getDebugLoc();
@@ -883,8 +883,8 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
unsigned RA = Subtarget.isGP64bit() ? Mips::RA_64 : Mips::RA;
unsigned T9 = Subtarget.isGP64bit() ? Mips::T9_64 : Mips::T9;
unsigned ZERO = Subtarget.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
- unsigned OffsetReg = I->getOperand(0).getReg();
- unsigned TargetReg = I->getOperand(1).getReg();
+ Register OffsetReg = I->getOperand(0).getReg();
+ Register TargetReg = I->getOperand(1).getReg();
// addu $ra, $v0, $zero
// addu $sp, $sp, $v1
diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp
index f4b164d5c0ab..a48088c28919 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -212,11 +212,9 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
// element size), otherwise it is a 16-bit signed immediate.
unsigned OffsetBitSize =
getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1));
- unsigned OffsetAlign = getLoadStoreOffsetAlign(MI.getOpcode());
-
+ const Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode()));
if (OffsetBitSize < 16 && isInt<16>(Offset) &&
- (!isIntN(OffsetBitSize, Offset) ||
- OffsetToAlignment(Offset, OffsetAlign) != 0)) {
+ (!isIntN(OffsetBitSize, Offset) || !isAligned(OffsetAlign, Offset))) {
// If we have an offset that needs to fit into a signed n-bit immediate
// (where n < 16) and doesn't, but does fit into 16-bits then use an ADDiu
MachineBasicBlock &MBB = *MI.getParent();
@@ -224,7 +222,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
const TargetRegisterClass *PtrRC =
ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
- unsigned Reg = RegInfo.createVirtualRegister(PtrRC);
+ Register Reg = RegInfo.createVirtualRegister(PtrRC);
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo *>(
MBB.getParent()->getSubtarget().getInstrInfo());
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index d021b3d021b1..b9245c9fc0eb 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -69,7 +69,7 @@ void MipsSubtarget::anchor() {}
MipsSubtarget::MipsSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
bool little, const MipsTargetMachine &TM,
- unsigned StackAlignOverride)
+ MaybeAlign StackAlignOverride)
: MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(MipsDefault),
IsLittle(little), IsSoftFloat(false), IsSingleFloat(false), IsFPXX(false),
NoABICalls(false), Abs2008(false), IsFP64bit(false), UseOddSPReg(true),
@@ -81,10 +81,9 @@ MipsSubtarget::MipsSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
Os16(Mips_Os16), HasMSA(false), UseTCCInDIV(false), HasSym32(false),
HasEVA(false), DisableMadd4(false), HasMT(false), HasCRC(false),
HasVirt(false), HasGINV(false), UseIndirectJumpsHazard(false),
- StackAlignOverride(StackAlignOverride),
- TM(TM), TargetTriple(TT), TSInfo(),
- InstrInfo(
- MipsInstrInfo::create(initializeSubtargetDependencies(CPU, FS, TM))),
+ StackAlignOverride(StackAlignOverride), TM(TM), TargetTriple(TT),
+ TSInfo(), InstrInfo(MipsInstrInfo::create(
+ initializeSubtargetDependencies(CPU, FS, TM))),
FrameLowering(MipsFrameLowering::create(*this)),
TLInfo(MipsTargetLowering::create(TM, *this)) {
@@ -248,12 +247,12 @@ MipsSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS,
InMips16HardFloat = true;
if (StackAlignOverride)
- stackAlignment = StackAlignOverride;
+ stackAlignment = *StackAlignOverride;
else if (isABI_N32() || isABI_N64())
- stackAlignment = 16;
+ stackAlignment = Align(16);
else {
assert(isABI_O32() && "Unknown ABI for stack alignment!");
- stackAlignment = 8;
+ stackAlignment = Align(8);
}
return *this;
@@ -286,6 +285,6 @@ const RegisterBankInfo *MipsSubtarget::getRegBankInfo() const {
return RegBankInfo.get();
}
-const InstructionSelector *MipsSubtarget::getInstructionSelector() const {
+InstructionSelector *MipsSubtarget::getInstructionSelector() const {
return InstSelector.get();
}
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index aa1200579fc8..0a8c2ef8ae5c 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -189,12 +189,15 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
// Disable use of the `jal` instruction.
bool UseLongCalls = false;
+ // Assume 32-bit GOT.
+ bool UseXGOT = false;
+
/// The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
- unsigned stackAlignment;
+ Align stackAlignment;
/// The overridden stack alignment.
- unsigned StackAlignOverride;
+ MaybeAlign StackAlignOverride;
InstrItineraryData InstrItins;
@@ -227,7 +230,7 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
MipsSubtarget(const Triple &TT, StringRef CPU, StringRef FS, bool little,
- const MipsTargetMachine &TM, unsigned StackAlignOverride);
+ const MipsTargetMachine &TM, MaybeAlign StackAlignOverride);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -323,6 +326,8 @@ public:
bool useLongCalls() const { return UseLongCalls; }
+ bool useXGOT() const { return UseXGOT; }
+
bool enableLongBranchPass() const {
return hasStandardEncoding() || inMicroMipsMode() || allowMixed16_32();
}
@@ -344,7 +349,7 @@ public:
// really use them if in addition we are in mips16 mode
static bool useConstantIslands();
- unsigned getStackAlignment() const { return stackAlignment; }
+ Align getStackAlignment() const { return stackAlignment; }
// Grab relocation model
Reloc::Model getRelocationModel() const;
@@ -391,7 +396,7 @@ public:
const CallLowering *getCallLowering() const override;
const LegalizerInfo *getLegalizerInfo() const override;
const RegisterBankInfo *getRegBankInfo() const override;
- const InstructionSelector *getInstructionSelector() const override;
+ InstructionSelector *getInstructionSelector() const override;
};
} // End llvm namespace
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index c878abb042e4..e58f316791ba 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -117,14 +117,17 @@ MipsTargetMachine::MipsTargetMachine(const Target &T, const Triple &TT,
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
CPU, FS, Options, getEffectiveRelocModel(JIT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
- isLittle(isLittle), TLOF(llvm::make_unique<MipsTargetObjectFile>()),
+ isLittle(isLittle), TLOF(std::make_unique<MipsTargetObjectFile>()),
ABI(MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions)),
- Subtarget(nullptr), DefaultSubtarget(TT, CPU, FS, isLittle, *this,
- Options.StackAlignmentOverride),
+ Subtarget(nullptr),
+ DefaultSubtarget(TT, CPU, FS, isLittle, *this,
+ MaybeAlign(Options.StackAlignmentOverride)),
NoMips16Subtarget(TT, CPU, FS.empty() ? "-mips16" : FS.str() + ",-mips16",
- isLittle, *this, Options.StackAlignmentOverride),
+ isLittle, *this,
+ MaybeAlign(Options.StackAlignmentOverride)),
Mips16Subtarget(TT, CPU, FS.empty() ? "+mips16" : FS.str() + ",+mips16",
- isLittle, *this, Options.StackAlignmentOverride) {
+ isLittle, *this,
+ MaybeAlign(Options.StackAlignmentOverride)) {
Subtarget = &DefaultSubtarget;
initAsmInfo();
}
@@ -196,8 +199,9 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const {
// creation will depend on the TM and the code generation flags on the
// function that reside in TargetOptions.
resetTargetOptions(F);
- I = llvm::make_unique<MipsSubtarget>(TargetTriple, CPU, FS, isLittle, *this,
- Options.StackAlignmentOverride);
+ I = std::make_unique<MipsSubtarget>(
+ TargetTriple, CPU, FS, isLittle, *this,
+ MaybeAlign(Options.StackAlignmentOverride));
}
return I.get();
}
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
index 1fa8ebadd643..298d056ce2c3 100644
--- a/lib/Target/Mips/MipsTargetStreamer.h
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -130,6 +130,8 @@ public:
SMLoc IDLoc, const MCSubtargetInfo *STI);
void emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, unsigned Reg2,
SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitRRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1, unsigned Reg2,
+ MCOperand Op3, SMLoc IDLoc, const MCSubtargetInfo *STI);
void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm,
SMLoc IDLoc, const MCSubtargetInfo *STI);
void emitRRIII(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm0,
@@ -154,17 +156,13 @@ public:
unsigned BaseReg, int64_t Offset,
function_ref<unsigned()> GetATReg, SMLoc IDLoc,
const MCSubtargetInfo *STI);
- void emitStoreWithSymOffset(unsigned Opcode, unsigned SrcReg,
- unsigned BaseReg, MCOperand &HiOperand,
- MCOperand &LoOperand, unsigned ATReg, SMLoc IDLoc,
- const MCSubtargetInfo *STI);
+ void emitSCWithSymOffset(unsigned Opcode, unsigned SrcReg, unsigned BaseReg,
+ MCOperand &HiOperand, MCOperand &LoOperand,
+ unsigned ATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
void emitLoadWithImmOffset(unsigned Opcode, unsigned DstReg, unsigned BaseReg,
int64_t Offset, unsigned TmpReg, SMLoc IDLoc,
const MCSubtargetInfo *STI);
- void emitLoadWithSymOffset(unsigned Opcode, unsigned DstReg, unsigned BaseReg,
- MCOperand &HiOperand, MCOperand &LoOperand,
- unsigned ATReg, SMLoc IDLoc,
- const MCSubtargetInfo *STI);
void emitGPRestore(int Offset, SMLoc IDLoc, const MCSubtargetInfo *STI);
void forbidModuleDirective() { ModuleDirectiveAllowed = false; }