aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/Sparc
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/Sparc')
-rw-r--r--llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp14
-rw-r--r--llvm/lib/Target/Sparc/DelaySlotFiller.cpp3
-rw-r--r--llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp2
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp19
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp2
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h3
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp8
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp2
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp15
-rw-r--r--llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h2
-rw-r--r--llvm/lib/Target/Sparc/Sparc.td2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp119
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.h3
-rw-r--r--llvm/lib/Target/Sparc/SparcInstr64Bit.td33
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrAliases.td30
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrFormats.td5
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.cpp158
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.h9
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.td18
-rw-r--r--llvm/lib/Target/Sparc/SparcSubtarget.cpp22
-rw-r--r--llvm/lib/Target/Sparc/SparcSubtarget.h50
-rw-r--r--llvm/lib/Target/Sparc/SparcTargetMachine.cpp8
22 files changed, 314 insertions, 213 deletions
diff --git a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index 4f94392d4dae..9bfee26db806 100644
--- a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -12,7 +12,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -31,6 +30,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -76,7 +76,7 @@ class SparcAsmParser : public MCTargetAsmParser {
SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
- bool ParseDirective(AsmToken DirectiveID) override;
+ ParseStatus parseDirective(AsmToken DirectiveID) override;
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) override;
@@ -769,25 +769,23 @@ bool SparcAsmParser::ParseInstruction(ParseInstructionInfo &Info,
return false;
}
-bool SparcAsmParser::
-ParseDirective(AsmToken DirectiveID)
-{
+ParseStatus SparcAsmParser::parseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getString();
if (IDVal == ".register") {
// For now, ignore .register directive.
Parser.eatToEndOfStatement();
- return false;
+ return ParseStatus::Success;
}
if (IDVal == ".proc") {
// For compatibility, ignore this directive.
// (It's supposed to be an "optimization" in the Sun assembler)
Parser.eatToEndOfStatement();
- return false;
+ return ParseStatus::Success;
}
// Let the MC layer to handle other directives.
- return true;
+ return ParseStatus::NoMatch;
}
OperandMatchResultTy
diff --git a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
index cc132d46de85..7e129101fefc 100644
--- a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -250,8 +250,7 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
return true;
}
- for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) {
- const MachineOperand &MO = candidate->getOperand(i);
+ for (const MachineOperand &MO : candidate->operands()) {
if (!MO.isReg())
continue; // skip
diff --git a/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp b/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
index 496c08f76a16..b7581c1979d8 100644
--- a/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
+++ b/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
@@ -302,7 +302,7 @@ DecodeStatus SparcDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
// Calling the auto-generated decoder function.
- if (STI.getFeatureBits()[Sparc::FeatureV9])
+ if (STI.hasFeature(Sparc::FeatureV9))
{
Result = decodeInstruction(DecoderTableSparcV932, Instr, Insn, Address, this, STI);
}
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index aa89488bbb62..2c0696e8048b 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -41,11 +41,14 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Sparc::fixup_sparc_br19:
return (Value >> 2) & 0x7ffff;
- case Sparc::fixup_sparc_br16_2:
- return (Value >> 2) & 0xc000;
-
- case Sparc::fixup_sparc_br16_14:
- return (Value >> 2) & 0x3fff;
+ case Sparc::fixup_sparc_br16: {
+ // A.3 Branch on Integer Register with Prediction (BPr)
+ // Inst{21-20} = d16hi;
+ // Inst{13-0} = d16lo;
+ unsigned d16hi = (Value >> 16) & 0x3;
+ unsigned d16lo = (Value >> 2) & 0x3fff;
+ return (d16hi << 20) | d16lo;
+ }
case Sparc::fixup_sparc_hix22:
return (~Value >> 10) & 0x3fffff;
@@ -164,8 +167,7 @@ namespace {
{ "fixup_sparc_call30", 2, 30, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_br22", 10, 22, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_br19", 13, 19, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_sparc_br16_2", 10, 2, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_sparc_br16_14", 18, 14, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_13", 19, 13, 0 },
{ "fixup_sparc_hi22", 10, 22, 0 },
{ "fixup_sparc_lo10", 22, 10, 0 },
@@ -211,8 +213,7 @@ namespace {
{ "fixup_sparc_call30", 0, 30, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_br22", 0, 22, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_br19", 0, 19, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_sparc_br16_2", 20, 2, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_sparc_br16_14", 0, 14, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16", 32, 0, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_13", 0, 13, 0 },
{ "fixup_sparc_hi22", 0, 22, 0 },
{ "fixup_sparc_lo10", 0, 10, 0 },
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
index 9c50c41f6bf2..c48beab01229 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
@@ -62,6 +62,8 @@ unsigned SparcELFObjectWriter::getRelocType(MCContext &Ctx,
case Sparc::fixup_sparc_call30: return ELF::R_SPARC_WDISP30;
case Sparc::fixup_sparc_br22: return ELF::R_SPARC_WDISP22;
case Sparc::fixup_sparc_br19: return ELF::R_SPARC_WDISP19;
+ case Sparc::fixup_sparc_br16:
+ return ELF::R_SPARC_WDISP16;
case Sparc::fixup_sparc_pc22: return ELF::R_SPARC_PC22;
case Sparc::fixup_sparc_pc10: return ELF::R_SPARC_PC10;
case Sparc::fixup_sparc_wplt30: return ELF::R_SPARC_WPLT30;
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h b/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
index 701d8513e657..3b9132658989 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
@@ -26,8 +26,7 @@ namespace llvm {
fixup_sparc_br19,
/// fixup_sparc_bpr - 16-bit fixup for bpr
- fixup_sparc_br16_2,
- fixup_sparc_br16_14,
+ fixup_sparc_br16,
/// fixup_sparc_13 - 13-bit fixup
fixup_sparc_13,
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
index fb22ddd91ba0..51a6732d05c6 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
@@ -35,7 +35,7 @@ namespace Sparc {
#include "SparcGenAsmWriter.inc"
bool SparcInstPrinter::isV9(const MCSubtargetInfo &STI) const {
- return (STI.getFeatureBits()[Sparc::FeatureV9]) != 0;
+ return (STI.hasFeature(Sparc::FeatureV9)) != 0;
}
void SparcInstPrinter::printRegName(raw_ostream &OS, MCRegister Reg) const {
@@ -178,6 +178,8 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
default: break;
case SP::FBCOND:
case SP::FBCONDA:
+ case SP::FBCOND_V9:
+ case SP::FBCONDA_V9:
case SP::BPFCC:
case SP::BPFCCA:
case SP::BPFCCNT:
@@ -195,6 +197,10 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
// Make sure CC is a cp conditional flag.
CC = (CC < SPCC::CPCC_BEGIN) ? (CC + SPCC::CPCC_BEGIN) : CC;
break;
+ case SP::BPR:
+ case SP::BPRA:
+ case SP::BPRNT:
+ case SP::BPRANT:
case SP::MOVRri:
case SP::MOVRrr:
case SP::FMOVRS:
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index c4545ff56f74..f98a9dd138d4 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -12,11 +12,11 @@
#include "SparcMCAsmInfo.h"
#include "SparcMCExpr.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/TargetParser/Triple.h"
using namespace llvm;
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
index ee460002fc58..93c6365a8ddd 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
@@ -26,12 +26,12 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
#include <cassert>
#include <cstdint>
@@ -53,7 +53,7 @@ public:
SparcMCCodeEmitter &operator=(const SparcMCCodeEmitter &) = delete;
~SparcMCCodeEmitter() override = default;
- void encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
@@ -87,11 +87,12 @@ public:
} // end anonymous namespace
-void SparcMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
+void SparcMCCodeEmitter::encodeInstruction(const MCInst &MI,
+ SmallVectorImpl<char> &CB,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
unsigned Bits = getBinaryCodeForInstr(MI, Fixups, STI);
- support::endian::write(OS, Bits,
+ support::endian::write(CB, Bits,
Ctx.getAsmInfo()->isLittleEndian() ? support::little
: support::big);
@@ -235,10 +236,8 @@ getBranchOnRegTargetOpValue(const MCInst &MI, unsigned OpNo,
if (MO.isReg() || MO.isImm())
return getMachineOpValue(MI, MO, Fixups, STI);
- Fixups.push_back(MCFixup::create(0, MO.getExpr(),
- (MCFixupKind)Sparc::fixup_sparc_br16_2));
- Fixups.push_back(MCFixup::create(0, MO.getExpr(),
- (MCFixupKind)Sparc::fixup_sparc_br16_14));
+ Fixups.push_back(
+ MCFixup::create(0, MO.getExpr(), (MCFixupKind)Sparc::fixup_sparc_br16));
return 0;
}
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
index d98ad26c96a9..d26a748b6e53 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
@@ -108,8 +108,6 @@ public:
return E->getKind() == MCExpr::Target;
}
- static bool classof(const SparcMCExpr *) { return true; }
-
static VariantKind parseVariantKind(StringRef name);
static bool printVariantKind(raw_ostream &OS, VariantKind Kind);
static Sparc::Fixups getFixupKind(VariantKind Kind);
diff --git a/llvm/lib/Target/Sparc/Sparc.td b/llvm/lib/Target/Sparc/Sparc.td
index da95602309a1..4cc713abe046 100644
--- a/llvm/lib/Target/Sparc/Sparc.td
+++ b/llvm/lib/Target/Sparc/Sparc.td
@@ -34,7 +34,7 @@ def FeatureV9
: SubtargetFeature<"v9", "IsV9", "true",
"Enable SPARC-V9 instructions">;
def FeatureV8Deprecated
- : SubtargetFeature<"deprecated-v8", "V8DeprecatedInsts", "true",
+ : SubtargetFeature<"deprecated-v8", "UseV8DeprecatedInsts", "true",
"Enable deprecated V8 instructions in V9 mode">;
def FeatureVIS
: SubtargetFeature<"vis", "IsVIS", "true",
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 913f133465b9..0aa3c875a14f 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -268,7 +268,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
// Analyze return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
- SDValue Flag;
+ SDValue Glue;
SmallVector<SDValue, 4> RetOps(1, Chain);
// Make room for the return address offset.
RetOps.push_back(SDValue());
@@ -294,17 +294,17 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
Arg,
DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
- Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
- Flag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
+ Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
VA = RVLocs[++i]; // skip ahead to next loc
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
- Flag);
+ Glue);
} else
- Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
// Guarantee that all emitted copies are stuck together with flags.
- Flag = Chain.getValue(1);
+ Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
@@ -317,8 +317,8 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
llvm_unreachable("sret virtual register not created in the entry block");
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
- Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
- Flag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
+ Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
}
@@ -326,11 +326,11 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
RetOps[0] = Chain; // Update chain.
RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
- // Add the flag if we have it.
- if (Flag.getNode())
- RetOps.push_back(Flag);
+ // Add the glue if we have it.
+ if (Glue.getNode())
+ RetOps.push_back(Glue);
- return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
+ return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
}
// Lower return values for the 64-bit ABI.
@@ -351,7 +351,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
// Analyze return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
- SDValue Flag;
+ SDValue Glue;
SmallVector<SDValue, 4> RetOps(1, Chain);
// The second operand on the return instruction is the return address offset.
@@ -396,20 +396,20 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
}
}
- Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
// Guarantee that all emitted copies are stuck together with flags.
- Flag = Chain.getValue(1);
+ Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
RetOps[0] = Chain; // Update chain.
// Add the flag if we have it.
- if (Flag.getNode())
- RetOps.push_back(Flag);
+ if (Glue.getNode())
+ RetOps.push_back(Glue);
- return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
+ return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
}
SDValue SparcTargetLowering::LowerFormalArguments(
@@ -584,7 +584,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32(
};
unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
- unsigned ArgOffset = CCInfo.getNextStackOffset();
+ unsigned ArgOffset = CCInfo.getStackSize();
if (NumAllocated == 6)
ArgOffset += StackOffset;
else {
@@ -703,7 +703,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_64(
//
// The va_start intrinsic needs to know the offset to the first variable
// argument.
- unsigned ArgOffset = CCInfo.getNextStackOffset();
+ unsigned ArgOffset = CCInfo.getStackSize();
SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
// Skip the 128 bytes of register save area.
FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
@@ -773,8 +773,8 @@ bool SparcTargetLowering::IsEligibleForTailCallOptimization(
// Do not tail call opt if the stack is used to pass parameters.
// 64-bit targets have a slightly higher limit since the ABI requires
// to allocate some space even when all the parameters fit inside registers.
- unsigned StackOffsetLimit = Subtarget->is64Bit() ? 48 : 0;
- if (CCInfo.getNextStackOffset() > StackOffsetLimit)
+ unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
+ if (CCInfo.getStackSize() > StackSizeLimit)
return false;
// Do not tail call opt if either the callee or caller returns
@@ -816,7 +816,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
CCInfo, CLI, DAG.getMachineFunction());
// Get the size of the outgoing arguments stack space requirement.
- unsigned ArgsSize = CCInfo.getNextStackOffset();
+ unsigned ArgsSize = CCInfo.getStackSize();
// Keep stack frames 8-byte aligned.
ArgsSize = (ArgsSize+7) & ~7;
@@ -1012,15 +1012,15 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
- // The InFlag in necessary since all emitted instructions must be
+ // The InGlue in necessary since all emitted instructions must be
// stuck together.
- SDValue InFlag;
+ SDValue InGlue;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Register Reg = RegsToPass[i].first;
if (!isTailCall)
Reg = toCallerWindow(Reg);
- Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InGlue);
+ InGlue = Chain.getValue(1);
}
bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
@@ -1058,8 +1058,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
- if (InFlag.getNode())
- Ops.push_back(InFlag);
+ if (InGlue.getNode())
+ Ops.push_back(InGlue);
if (isTailCall) {
DAG.getMachineFunction().getFrameInfo().setHasTailCall();
@@ -1067,10 +1067,10 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
}
Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
- InFlag = Chain.getValue(1);
+ InGlue = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InFlag, dl);
- InFlag = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
+ InGlue = Chain.getValue(1);
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -1085,24 +1085,24 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
if (RVLocs[i].getLocVT() == MVT::v2i32) {
SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
SDValue Lo = DAG.getCopyFromReg(
- Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
+ Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
Chain = Lo.getValue(1);
- InFlag = Lo.getValue(2);
+ InGlue = Lo.getValue(2);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
DAG.getConstant(0, dl, MVT::i32));
SDValue Hi = DAG.getCopyFromReg(
- Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
+ Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
Chain = Hi.getValue(1);
- InFlag = Hi.getValue(2);
+ InGlue = Hi.getValue(2);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
DAG.getConstant(1, dl, MVT::i32));
InVals.push_back(Vec);
} else {
Chain =
DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
- RVLocs[i].getValVT(), InFlag)
+ RVLocs[i].getValVT(), InGlue)
.getValue(1);
- InFlag = Chain.getValue(2);
+ InGlue = Chain.getValue(2);
InVals.push_back(Chain.getValue(0));
}
}
@@ -1204,7 +1204,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Called functions expect 6 argument words to exist in the stack frame, used
// or not.
unsigned StackReserved = 6 * 8u;
- unsigned ArgsSize = std::max(StackReserved, CCInfo.getNextStackOffset());
+ unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
// Keep stack frames 16-byte aligned.
ArgsSize = alignTo(ArgsSize, 16);
@@ -1977,6 +1977,8 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
case SPISD::BRFCC: return "SPISD::BRFCC";
case SPISD::BRFCC_V9:
return "SPISD::BRFCC_V9";
+ case SPISD::BR_REG:
+ return "SPISD::BR_REG";
case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
@@ -1989,7 +1991,7 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
case SPISD::FTOX: return "SPISD::FTOX";
case SPISD::XTOF: return "SPISD::XTOF";
case SPISD::CALL: return "SPISD::CALL";
- case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
+ case SPISD::RET_GLUE: return "SPISD::RET_GLUE";
case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
case SPISD::FLUSHW: return "SPISD::FLUSHW";
case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
@@ -2029,7 +2031,7 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
// Only known if known in both the LHS and RHS.
- Known = KnownBits::commonBits(Known, Known2);
+ Known = Known.intersectWith(Known2);
break;
}
}
@@ -2200,11 +2202,11 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
withTargetFlags(Op, addTF, DAG));
SDValue Chain = DAG.getEntryNode();
- SDValue InFlag;
+ SDValue InGlue;
Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
- Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
- InFlag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
+ InGlue = Chain.getValue(1);
SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
SDValue Symbol = withTargetFlags(Op, callTF, DAG);
@@ -2217,12 +2219,12 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
Symbol,
DAG.getRegister(SP::O0, PtrVT),
DAG.getRegisterMask(Mask),
- InFlag};
+ InGlue};
Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
- InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InFlag, DL);
- InFlag = Chain.getValue(1);
- SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
+ InGlue = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL);
+ InGlue = Chain.getValue(1);
+ SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
if (model != TLSModel::LocalDynamic)
return Ret;
@@ -2582,7 +2584,7 @@ static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
const SparcTargetLowering &TLI, bool hasHardQuad,
- bool isV9) {
+ bool isV9, bool is64Bit) {
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue LHS = Op.getOperand(2);
@@ -2599,6 +2601,15 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
// Get the condition flag.
SDValue CompareFlag;
if (LHS.getValueType().isInteger()) {
+ // On V9 processors running in 64-bit mode, if CC compares two `i64`s
+ // and the RHS is zero we might be able to use a specialized branch.
+ const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
+ if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 && RHSC &&
+ RHSC->isZero() && !ISD::isUnsignedIntSetCC(CC))
+ return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
+ DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
+ LHS);
+
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
if (isV9)
@@ -3144,10 +3155,8 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
SDValue MulResult = TLI.makeLibCall(DAG,
RTLIB::MUL_I128, WideVT,
Args, CallOptions, dl).first;
- SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
- MulResult, DAG.getIntPtrConstant(0, dl));
- SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
- MulResult, DAG.getIntPtrConstant(1, dl));
+ SDValue BottomHalf, TopHalf;
+ std::tie(BottomHalf, TopHalf) = DAG.SplitScalar(MulResult, dl, VT, VT);
if (isSigned) {
SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
@@ -3215,7 +3224,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
hasHardQuad);
case ISD::BR_CC:
- return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9);
+ return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
case ISD::SELECT_CC:
return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 563a832ee61e..5504dcd464fb 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -31,6 +31,7 @@ namespace llvm {
BPXCC, // Branch to dest on xcc condition, with prediction (64-bit only).
BRFCC, // Branch to dest on fcc condition
BRFCC_V9, // Branch to dest on fcc condition (v9 variant).
+ BR_REG, // Branch to dest using the comparison of a register with zero.
SELECT_ICC, // Select between two values using the current ICC flags.
SELECT_XCC, // Select between two values using the current XCC flags.
SELECT_FCC, // Select between two values using the current FCC flags.
@@ -46,7 +47,7 @@ namespace llvm {
XTOF, // Int64 to FP within a FP register.
CALL, // A call instruction.
- RET_FLAG, // Return with a flag operand.
+ RET_GLUE, // Return with a glue operand.
GLOBAL_BASE_REG, // Global base reg for PIC.
FLUSHW, // FLUSH register windows to stack.
diff --git a/llvm/lib/Target/Sparc/SparcInstr64Bit.td b/llvm/lib/Target/Sparc/SparcInstr64Bit.td
index 77f203fd0d68..0a6479487418 100644
--- a/llvm/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/llvm/lib/Target/Sparc/SparcInstr64Bit.td
@@ -356,15 +356,15 @@ def FMOVQ_XCC : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
// Branch On integer register with Prediction (BPr).
let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in
-multiclass BranchOnReg<bits<3> cond, string OpcStr> {
- def napt : F2_4<cond, 0, 1, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
- !strconcat(OpcStr, " $rs1, $imm16"), []>;
- def apt : F2_4<cond, 1, 1, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
- !strconcat(OpcStr, ",a $rs1, $imm16"), []>;
- def napn : F2_4<cond, 0, 0, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
- !strconcat(OpcStr, ",pn $rs1, $imm16"), []>;
- def apn : F2_4<cond, 1, 0, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
- !strconcat(OpcStr, ",a,pn $rs1, $imm16"), []>;
+multiclass BranchOnReg<list<dag> CCPattern> {
+ def R : F2_4<0, 1, (outs), (ins bprtarget16:$imm16, RegCCOp:$rcond, I64Regs:$rs1),
+ "br$rcond $rs1, $imm16", CCPattern>;
+ def RA : F2_4<1, 1, (outs), (ins bprtarget16:$imm16, RegCCOp:$rcond, I64Regs:$rs1),
+ "br$rcond,a $rs1, $imm16", []>;
+ def RNT : F2_4<0, 0, (outs), (ins bprtarget16:$imm16, RegCCOp:$rcond, I64Regs:$rs1),
+ "br$rcond,pn $rs1, $imm16", []>;
+ def RANT : F2_4<1, 0, (outs), (ins bprtarget16:$imm16, RegCCOp:$rcond, I64Regs:$rs1),
+ "br$rcond,a,pn $rs1, $imm16", []>;
}
multiclass bpr_alias<string OpcStr, Instruction NAPT, Instruction APT> {
@@ -374,19 +374,8 @@ multiclass bpr_alias<string OpcStr, Instruction NAPT, Instruction APT> {
(APT I64Regs:$rs1, bprtarget16:$imm16), 0>;
}
-defm BPZ : BranchOnReg<0b001, "brz">;
-defm BPLEZ : BranchOnReg<0b010, "brlez">;
-defm BPLZ : BranchOnReg<0b011, "brlz">;
-defm BPNZ : BranchOnReg<0b101, "brnz">;
-defm BPGZ : BranchOnReg<0b110, "brgz">;
-defm BPGEZ : BranchOnReg<0b111, "brgez">;
-
-defm : bpr_alias<"brz", BPZnapt, BPZapt >;
-defm : bpr_alias<"brlez", BPLEZnapt, BPLEZapt>;
-defm : bpr_alias<"brlz", BPLZnapt, BPLZapt >;
-defm : bpr_alias<"brnz", BPNZnapt, BPNZapt >;
-defm : bpr_alias<"brgz", BPGZnapt, BPGZapt >;
-defm : bpr_alias<"brgez", BPGEZnapt, BPGEZapt>;
+let Predicates = [Is64Bit] in
+ defm BP : BranchOnReg<[(SPbrreg bb:$imm16, imm:$rcond, i64:$rs1)]>;
// Move integer register on register condition (MOVr).
let Predicates = [Is64Bit], Constraints = "$f = $rd" in {
diff --git a/llvm/lib/Target/Sparc/SparcInstrAliases.td b/llvm/lib/Target/Sparc/SparcInstrAliases.td
index f10021321406..01c3696cc7bc 100644
--- a/llvm/lib/Target/Sparc/SparcInstrAliases.td
+++ b/llvm/lib/Target/Sparc/SparcInstrAliases.td
@@ -295,6 +295,36 @@ multiclass cp_cond_alias<string cond, int condVal> {
// Instruction aliases for register conditional branches and moves.
multiclass reg_cond_alias<string rcond, int condVal> {
+ // br<rcond> $rs1, $imm
+ def : InstAlias<!strconcat(!strconcat("br", rcond), " $rs1, $imm"),
+ (BPR bprtarget16:$imm, condVal, I64Regs:$rs1)>,
+ Requires<[Is64Bit]>;
+
+ // br<rcond>,pt $rs1, $imm
+ def : InstAlias<!strconcat(!strconcat("br", rcond), ",pt $rs1, $imm"),
+ (BPR bprtarget16:$imm, condVal, I64Regs:$rs1)>,
+ Requires<[Is64Bit]>;
+
+ // br<rcond>,pn $rs1, $imm
+ def : InstAlias<!strconcat(!strconcat("br", rcond), ",pn $rs1, $imm"),
+ (BPRNT bprtarget16:$imm, condVal, I64Regs:$rs1)>,
+ Requires<[Is64Bit]>;
+
+ // br<rcond>,a $rs1, $imm
+ def : InstAlias<!strconcat(!strconcat("br", rcond), ",a $rs1, $imm"),
+ (BPRA bprtarget16:$imm, condVal, I64Regs:$rs1)>,
+ Requires<[Is64Bit]>;
+
+ // br<rcond>,a,pt $rs1, $imm
+ def : InstAlias<!strconcat(!strconcat("br", rcond), ",a,pt $rs1, $imm"),
+ (BPRA bprtarget16:$imm, condVal, I64Regs:$rs1)>,
+ Requires<[Is64Bit]>;
+
+ // br<rcond>,a,pn $rs1, $imm
+ def : InstAlias<!strconcat(!strconcat("br", rcond), ",a,pn $rs1, $imm"),
+ (BPRANT bprtarget16:$imm, condVal, I64Regs:$rs1)>,
+ Requires<[Is64Bit]>;
+
defm : regcond_mov_alias<rcond, condVal,
MOVRrr, MOVRri,
FMOVRS, FMOVRD, FMOVRQ>,
diff --git a/llvm/lib/Target/Sparc/SparcInstrFormats.td b/llvm/lib/Target/Sparc/SparcInstrFormats.td
index 522dcd96a112..c67b591ab98a 100644
--- a/llvm/lib/Target/Sparc/SparcInstrFormats.td
+++ b/llvm/lib/Target/Sparc/SparcInstrFormats.td
@@ -83,17 +83,18 @@ class F2_3<bits<3> op2Val, bit annul, bit pred,
let Inst{18-0} = imm19;
}
-class F2_4<bits<3> cond, bit annul, bit pred, dag outs, dag ins,
+class F2_4<bit annul, bit pred, dag outs, dag ins,
string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
: InstSP<outs, ins, asmstr, pattern, itin> {
bits<16> imm16;
bits<5> rs1;
+ bits<3> rcond;
let op = 0; // op = 0
let Inst{29} = annul;
let Inst{28} = 0;
- let Inst{27-25} = cond;
+ let Inst{27-25} = rcond;
let Inst{24-22} = 0b011;
let Inst{21-20} = imm16{15-14};
let Inst{19} = pred;
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 63f662c41f93..90662cd87dcf 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -28,6 +28,14 @@ using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "SparcGenInstrInfo.inc"
+static cl::opt<unsigned> BPccDisplacementBits(
+ "sparc-bpcc-offset-bits", cl::Hidden, cl::init(19),
+ cl::desc("Restrict range of BPcc/FBPfcc instructions (DEBUG)"));
+
+static cl::opt<unsigned>
+ BPrDisplacementBits("sparc-bpr-offset-bits", cl::Hidden, cl::init(16),
+ cl::desc("Restrict range of BPr instructions (DEBUG)"));
+
// Pin the vtable to this file.
void SparcInstrInfo::anchor() {}
@@ -73,11 +81,6 @@ unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
return 0;
}
-static bool IsIntegerCC(unsigned CC)
-{
- return (CC <= SPCC::ICC_VC);
-}
-
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
{
switch(CC) {
@@ -155,9 +158,7 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
llvm_unreachable("Invalid cond code");
}
-static bool isUncondBranchOpcode(int Opc) {
- return Opc == SP::BA || Opc == SP::BPA;
-}
+static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
static bool isI32CondBranchOpcode(int Opc) {
return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA ||
@@ -169,11 +170,19 @@ static bool isI64CondBranchOpcode(int Opc) {
Opc == SP::BPXCCANT;
}
-static bool isFCondBranchOpcode(int Opc) { return Opc == SP::FBCOND; }
+static bool isRegCondBranchOpcode(int Opc) {
+ return Opc == SP::BPR || Opc == SP::BPRA || Opc == SP::BPRNT ||
+ Opc == SP::BPRANT;
+}
+
+static bool isFCondBranchOpcode(int Opc) {
+ return Opc == SP::FBCOND || Opc == SP::FBCONDA || Opc == SP::FBCOND_V9 ||
+ Opc == SP::FBCONDA_V9;
+}
static bool isCondBranchOpcode(int Opc) {
return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) ||
- isFCondBranchOpcode(Opc);
+ isRegCondBranchOpcode(Opc) || isFCondBranchOpcode(Opc);
}
static bool isIndirectBranchOpcode(int Opc) {
@@ -190,9 +199,48 @@ static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
Cond.push_back(MachineOperand::CreateImm(Opc));
Cond.push_back(MachineOperand::CreateImm(CC));
+ // Branch on register contents need another argument to indicate
+ // the register it branches on.
+ if (isRegCondBranchOpcode(Opc)) {
+ Register Reg = LastInst->getOperand(2).getReg();
+ Cond.push_back(MachineOperand::CreateReg(Reg, false));
+ }
+
Target = LastInst->getOperand(0).getMBB();
}
+MachineBasicBlock *
+SparcInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case SP::BA:
+ case SP::BCOND:
+ case SP::BCONDA:
+ case SP::FBCOND:
+ case SP::FBCONDA:
+ case SP::BPICC:
+ case SP::BPICCA:
+ case SP::BPICCNT:
+ case SP::BPICCANT:
+ case SP::BPXCC:
+ case SP::BPXCCA:
+ case SP::BPXCCNT:
+ case SP::BPXCCANT:
+ case SP::BPFCC:
+ case SP::BPFCCA:
+ case SP::BPFCCNT:
+ case SP::BPFCCANT:
+ case SP::FBCOND_V9:
+ case SP::FBCONDA_V9:
+ case SP::BPR:
+ case SP::BPRA:
+ case SP::BPRNT:
+ case SP::BPRANT:
+ return MI.getOperand(0).getMBB();
+ }
+}
+
bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -283,38 +331,44 @@ unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
const DebugLoc &DL,
int *BytesAdded) const {
assert(TBB && "insertBranch must not be told to insert a fallthrough");
- assert((Cond.size() <= 2) &&
- "Sparc branch conditions should have at most two components!");
- assert(!BytesAdded && "code size not handled");
+ assert((Cond.size() <= 3) &&
+ "Sparc branch conditions should have at most three components!");
if (Cond.empty()) {
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(Subtarget.isV9() ? SP::BPA : SP::BA)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded = 8;
return 1;
}
// Conditional branch
unsigned Opc = Cond[0].getImm();
unsigned CC = Cond[1].getImm();
-
- if (IsIntegerCC(CC)) {
- BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC);
+ if (isRegCondBranchOpcode(Opc)) {
+ Register Reg = Cond[2].getReg();
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC).addReg(Reg);
} else {
- BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC);
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC);
}
- if (!FBB)
+
+ if (!FBB) {
+ if (BytesAdded)
+ *BytesAdded = 8;
return 1;
+ }
- BuildMI(&MBB, DL, get(Subtarget.isV9() ? SP::BPA : SP::BA)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
+ if (BytesAdded)
+ *BytesAdded = 16;
return 2;
}
unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
- assert(!BytesRemoved && "code size not handled");
-
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
+ int Removed = 0;
while (I != MBB.begin()) {
--I;
@@ -325,21 +379,62 @@ unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
!isUncondBranchOpcode(I->getOpcode()))
break; // Not a branch
+ Removed += getInstSizeInBytes(*I);
I->eraseFromParent();
I = MBB.end();
++Count;
}
+
+ if (BytesRemoved)
+ *BytesRemoved = Removed;
return Count;
}
bool SparcInstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const {
- assert(Cond.size() <= 2);
+ assert(Cond.size() <= 3);
SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[1].getImm());
Cond[1].setImm(GetOppositeBranchCondition(CC));
return false;
}
+bool SparcInstrInfo::isBranchOffsetInRange(unsigned BranchOpc,
+ int64_t Offset) const {
+ assert((Offset & 0b11) == 0 && "Malformed branch offset");
+ switch (BranchOpc) {
+ case SP::BA:
+ case SP::BCOND:
+ case SP::BCONDA:
+ case SP::FBCOND:
+ case SP::FBCONDA:
+ return isIntN(22, Offset >> 2);
+
+ case SP::BPICC:
+ case SP::BPICCA:
+ case SP::BPICCNT:
+ case SP::BPICCANT:
+ case SP::BPXCC:
+ case SP::BPXCCA:
+ case SP::BPXCCNT:
+ case SP::BPXCCANT:
+ case SP::BPFCC:
+ case SP::BPFCCA:
+ case SP::BPFCCNT:
+ case SP::BPFCCANT:
+ case SP::FBCOND_V9:
+ case SP::FBCONDA_V9:
+ return isIntN(BPccDisplacementBits, Offset >> 2);
+
+ case SP::BPR:
+ case SP::BPRA:
+ case SP::BPRNT:
+ case SP::BPRANT:
+ return isIntN(BPrDisplacementBits, Offset >> 2);
+ }
+
+ llvm_unreachable("Unknown branch instruction!");
+}
+
void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg,
@@ -530,6 +625,23 @@ Register SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
return GlobalBaseReg;
}
+unsigned SparcInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
+
+ if (MI.isInlineAsm()) {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+
+ // If the instruction has a delay slot, be conservative and also include
+ // it for sizing purposes. This is done so that the BranchRelaxation pass
+ // will not mistakenly mark out-of-range branches as in-range.
+ if (MI.hasDelaySlot())
+ return get(Opcode).getSize() * 2;
+ return get(Opcode).getSize();
+}
+
bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case TargetOpcode::LOAD_STACK_GUARD: {
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.h b/llvm/lib/Target/Sparc/SparcInstrInfo.h
index 39cf791c2173..7056d6babe17 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -64,6 +64,8 @@ public:
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
+ MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
+
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -80,6 +82,9 @@ public:
bool
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+ /// Determine if the branch target is in range.
+ bool isBranchOffsetInRange(unsigned BranchOpc, int64_t Offset) const override;
+
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
bool KillSrc) const override;
@@ -99,6 +104,10 @@ public:
Register getGlobalBaseReg(MachineFunction *MF) const;
+ /// GetInstSize - Return the number of bytes of code the specified
+ /// instruction may be. This returns the maximum number of bytes.
+ unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
+
// Lower pseudo instructions after register allocation.
bool expandPostRAPseudo(MachineInstr &MI) const override;
};
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 2c45a7218d04..3d602e7e4376 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -72,7 +72,7 @@ def HasFSMULD : Predicate<"!Subtarget->hasNoFSMULD()">;
// V8, or when it is V9 but the V8 deprecated instructions are efficient enough
// to use when appropriate. In either of these cases, the instruction selector
// will pick deprecated instructions.
-def UseDeprecatedInsts : Predicate<"Subtarget->useDeprecatedV8Instructions()">;
+def UseDeprecatedInsts : Predicate<"Subtarget->useV8DeprecatedInsts()">;
//===----------------------------------------------------------------------===//
// Instruction Pattern Stuff
@@ -224,6 +224,8 @@ def SDTSPcmpfcc :
SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisSameAs<0, 1>]>;
def SDTSPbrcc :
SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>]>;
+def SDTSPbrreg :
+SDTypeProfile<0, 3, [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>, SDTCisVT<2, i64>]>;
def SDTSPselectcc :
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i32>]>;
def SDTSPselectreg :
@@ -253,6 +255,7 @@ def SPbpicc : SDNode<"SPISD::BPICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPbpxcc : SDNode<"SPISD::BPXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPbrfccv9 : SDNode<"SPISD::BRFCC_V9", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbrreg : SDNode<"SPISD::BR_REG", SDTSPbrreg, [SDNPHasChain, SDNPInGlue]>;
def SPhi : SDNode<"SPISD::Hi", SDTIntUnaryOp>;
def SPlo : SDNode<"SPISD::Lo", SDTIntUnaryOp>;
@@ -288,7 +291,7 @@ def tailcall : SDNode<"SPISD::TAIL_CALL", SDT_SPCall,
SDNPVariadic]>;
def SDT_SPRet : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
-def retflag : SDNode<"SPISD::RET_FLAG", SDT_SPRet,
+def retglue : SDNode<"SPISD::RET_GLUE", SDT_SPRet,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
@@ -850,15 +853,8 @@ class BranchPredictAlways<dag ins, string asmstr, list<dag> pattern>
: F2_3<0b001, 0, 1, (outs), ins, asmstr, pattern>;
}
-let cond = 8 in {
- // If we're compiling for v9, prefer BPA rather than BA
- // TODO: Disallow BA emission when FeatureV8Deprecated isn't enabled
- let Predicates = [HasV9], cc = 0b00 in
- def BPA : BranchPredictAlways<(ins bprtarget:$imm19),
- "ba %icc, $imm19", [(br bb:$imm19)]>;
-
+let cond = 8 in
def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
-}
let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
@@ -1041,7 +1037,7 @@ let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
def RETL: F3_2<2, 0b111000,
(outs), (ins i32imm:$simm13),
"jmp %o7+$simm13",
- [(retflag simm13:$simm13)],
+ [(retglue simm13:$simm13)],
IIC_jmp_or_call>;
let rd = 0, rs1 = 31 in
diff --git a/llvm/lib/Target/Sparc/SparcSubtarget.cpp b/llvm/lib/Target/Sparc/SparcSubtarget.cpp
index 618a8633f0a9..81c2137ea730 100644
--- a/llvm/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/llvm/lib/Target/Sparc/SparcSubtarget.cpp
@@ -27,28 +27,6 @@ void SparcSubtarget::anchor() { }
SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
- UseSoftMulDiv = false;
- IsV9 = false;
- IsLeon = false;
- V8DeprecatedInsts = false;
- IsVIS = false;
- IsVIS2 = false;
- IsVIS3 = false;
- HasHardQuad = false;
- UsePopc = false;
- UseSoftFloat = false;
- HasNoFSMULD = false;
- HasNoFMULS = false;
-
- // Leon features
- HasLeonCasa = false;
- HasUmacSmac = false;
- HasPWRPSR = false;
- InsertNOPLoad = false;
- FixAllFDIVSQRT = false;
- DetectRoundChange = false;
- HasLeonCycleCounter = false;
-
// Determine default and user specified characteristics
std::string CPUName = std::string(CPU);
if (CPUName.empty())
diff --git a/llvm/lib/Target/Sparc/SparcSubtarget.h b/llvm/lib/Target/Sparc/SparcSubtarget.h
index 82a4aa510355..8e3d05d5d7e5 100644
--- a/llvm/lib/Target/Sparc/SparcSubtarget.h
+++ b/llvm/lib/Target/Sparc/SparcSubtarget.h
@@ -16,10 +16,10 @@
#include "SparcFrameLowering.h"
#include "SparcISelLowering.h"
#include "SparcInstrInfo.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#define GET_SUBTARGETINFO_HEADER
@@ -31,26 +31,12 @@ class StringRef;
class SparcSubtarget : public SparcGenSubtargetInfo {
Triple TargetTriple;
virtual void anchor();
- bool UseSoftMulDiv;
- bool IsV9;
- bool IsLeon;
- bool V8DeprecatedInsts;
- bool IsVIS, IsVIS2, IsVIS3;
+
bool Is64Bit;
- bool HasHardQuad;
- bool UsePopc;
- bool UseSoftFloat;
- bool HasNoFSMULD;
- bool HasNoFMULS;
-
- // LEON features
- bool HasUmacSmac;
- bool HasLeonCasa;
- bool HasPWRPSR;
- bool InsertNOPLoad;
- bool FixAllFDIVSQRT;
- bool DetectRoundChange;
- bool HasLeonCycleCounter;
+
+#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
+ bool ATTRIBUTE = DEFAULT;
+#include "SparcGenSubtargetInfo.inc"
SparcInstrInfo InstrInfo;
SparcTargetLowering TLInfo;
@@ -77,27 +63,9 @@ public:
bool enableMachineScheduler() const override;
- bool useSoftMulDiv() const { return UseSoftMulDiv; }
- bool isV9() const { return IsV9; }
- bool isLeon() const { return IsLeon; }
- bool isVIS() const { return IsVIS; }
- bool isVIS2() const { return IsVIS2; }
- bool isVIS3() const { return IsVIS3; }
- bool useDeprecatedV8Instructions() const { return V8DeprecatedInsts; }
- bool hasHardQuad() const { return HasHardQuad; }
- bool usePopc() const { return UsePopc; }
- bool useSoftFloat() const { return UseSoftFloat; }
- bool hasNoFSMULD() const { return HasNoFSMULD; }
- bool hasNoFMULS() const { return HasNoFMULS; }
-
- // Leon options
- bool hasUmacSmac() const { return HasUmacSmac; }
- bool hasLeonCasa() const { return HasLeonCasa; }
- bool hasPWRPSR() const { return HasPWRPSR; }
- bool insertNOPLoad() const { return InsertNOPLoad; }
- bool fixAllFDIVSQRT() const { return FixAllFDIVSQRT; }
- bool detectRoundChange() const { return DetectRoundChange; }
- bool hasLeonCycleCounter() const { return HasLeonCycleCounter; }
+#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
+ bool GETTER() const { return ATTRIBUTE; }
+#include "SparcGenSubtargetInfo.inc"
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index 58faaafc29d6..577dc1351de9 100644
--- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -17,7 +17,6 @@
#include "TargetInfo/SparcTargetInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
-#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/TargetRegistry.h"
#include <optional>
using namespace llvm;
@@ -32,6 +31,10 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSparcTarget() {
initializeSparcDAGToDAGISelPass(PR);
}
+static cl::opt<bool>
+ BranchRelaxation("sparc-enable-branch-relax", cl::Hidden, cl::init(true),
+ cl::desc("Relax out of range conditional branches"));
+
static std::string computeDataLayout(const Triple &T, bool is64Bit) {
// Sparc is typically big endian, but some are little.
std::string Ret = T.getArch() == Triple::sparcel ? "e" : "E";
@@ -182,6 +185,9 @@ bool SparcPassConfig::addInstSelector() {
}
void SparcPassConfig::addPreEmitPass(){
+ if (BranchRelaxation)
+ addPass(&BranchRelaxationPassID);
+
addPass(createSparcDelaySlotFillerPass());
if (this->getSparcTargetMachine().getSubtargetImpl()->insertNOPLoad())