aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/Sparc/SparcISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/Sparc/SparcISelLowering.cpp')
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp231
1 files changed, 177 insertions, 54 deletions
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 2cb74e7709c7..913f133465b9 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
@@ -101,9 +102,9 @@ static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
}
// Allocate a full-sized argument for the 64-bit ABI.
-static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT, CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
assert((LocVT == MVT::f32 || LocVT == MVT::f128
|| LocVT.getSizeInBits() == 64) &&
"Can't handle non-64 bits locations");
@@ -133,6 +134,11 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
return true;
}
+ // Bail out if this is a return CC and we run out of registers to place
+ // values into.
+ if (IsReturn)
+ return false;
+
// This argument goes on the stack in an 8-byte slot.
// When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
// the right-aligned float. The first 4 bytes of the stack slot are undefined.
@@ -146,9 +152,9 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
// Allocate a half-sized argument for the 64-bit ABI.
//
// This is used when passing { float, int } structs by value in registers.
-static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT, CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
unsigned Offset = State.AllocateStack(4, Align(4));
@@ -174,10 +180,43 @@ static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
return true;
}
+ // Bail out if this is a return CC and we run out of registers to place
+ // values into.
+ if (IsReturn)
+ return false;
+
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return true;
}
+static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
#include "SparcGenCallingConv.inc"
// The calling conventions in SparcCallingConv.td are described in terms of the
@@ -191,6 +230,15 @@ static unsigned toCallerWindow(unsigned Reg) {
return Reg;
}
+bool SparcTargetLowering::CanLowerReturn(
+ CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
+ : RetCC_Sparc32);
+}
+
SDValue
SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
@@ -723,7 +771,10 @@ bool SparcTargetLowering::IsEligibleForTailCallOptimization(
return false;
// Do not tail call opt if the stack is used to pass parameters.
- if (CCInfo.getNextStackOffset() != 0)
+ // 64-bit targets have a slightly higher limit since the ABI requires
+ // to allocate some space even when all the parameters fit inside registers.
+ unsigned StackOffsetLimit = Subtarget->is64Bit() ? 48 : 0;
+ if (CCInfo.getNextStackOffset() > StackOffsetLimit)
return false;
// Do not tail call opt if either the callee or caller returns
@@ -1018,8 +1069,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
- DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InFlag, dl);
InFlag = Chain.getValue(1);
// Assign locations to each value returned by this call.
@@ -1031,6 +1081,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
if (RVLocs[i].getLocVT() == MVT::v2i32) {
SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
SDValue Lo = DAG.getCopyFromReg(
@@ -1091,7 +1142,7 @@ Register SparcTargetLowering::getRegisterByName(const char* RegName, LLT VT,
static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
ArrayRef<ISD::OutputArg> Outs) {
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- const CCValAssign &VA = ArgLocs[i];
+ CCValAssign &VA = ArgLocs[i];
MVT ValTy = VA.getLocVT();
// FIXME: What about f32 arguments? C promotes them to f64 when calling
// varargs functions.
@@ -1102,8 +1153,6 @@ static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
continue;
// This floating point argument should be reassigned.
- CCValAssign NewVA;
-
// Determine the offset into the argument array.
Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
@@ -1115,21 +1164,20 @@ static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
unsigned IReg = SP::I0 + Offset/8;
if (ValTy == MVT::f64)
// Full register, just bitconvert into i64.
- NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
- IReg, MVT::i64, CCValAssign::BCvt);
+ VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
+ CCValAssign::BCvt);
else {
assert(ValTy == MVT::f128 && "Unexpected type!");
// Full register, just bitconvert into i128 -- We will lower this into
// two i64s in LowerCall_64.
- NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
- IReg, MVT::i128, CCValAssign::BCvt);
+ VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
+ MVT::i128, CCValAssign::BCvt);
}
} else {
// This needs to go to memory, we're out of integer registers.
- NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
- Offset, VA.getLocVT(), VA.getLocInfo());
+ VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
+ VA.getLocVT(), VA.getLocInfo());
}
- ArgLocs[i] = NewVA;
}
}
@@ -1142,20 +1190,21 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
SDValue Chain = CLI.Chain;
auto PtrVT = getPointerTy(DAG.getDataLayout());
- // Sparc target does not yet support tail call optimization.
- CLI.IsTailCall = false;
-
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
+ CLI.IsTailCall = CLI.IsTailCall && IsEligibleForTailCallOptimization(
+ CCInfo, CLI, DAG.getMachineFunction());
+
// Get the size of the outgoing arguments stack space requirement.
// The stack offset computed by CC_Sparc64 includes all arguments.
// Called functions expect 6 argument words to exist in the stack frame, used
// or not.
- unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
+ unsigned StackReserved = 6 * 8u;
+ unsigned ArgsSize = std::max(StackReserved, CCInfo.getNextStackOffset());
// Keep stack frames 16-byte aligned.
ArgsSize = alignTo(ArgsSize, 16);
@@ -1164,10 +1213,13 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
if (CLI.IsVarArg)
fixupVariableFloatArgs(ArgLocs, CLI.Outs);
+ assert(!CLI.IsTailCall || ArgsSize == StackReserved);
+
// Adjust the stack pointer to make room for the arguments.
// FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
// with more than 6 arguments.
- Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
+ if (!CLI.IsTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
// Collect the set of registers to pass to the function and their values.
// This will be emitted as a sequence of CopyToReg nodes glued to the call
@@ -1227,10 +1279,16 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
SDValue Lo64 =
DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
- RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
- Hi64));
- RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
- Lo64));
+
+ Register HiReg = VA.getLocReg();
+ Register LoReg = VA.getLocReg() + 1;
+ if (!CLI.IsTailCall) {
+ HiReg = toCallerWindow(HiReg);
+ LoReg = toCallerWindow(LoReg);
+ }
+
+ RegsToPass.push_back(std::make_pair(HiReg, Hi64));
+ RegsToPass.push_back(std::make_pair(LoReg, Lo64));
continue;
}
@@ -1251,7 +1309,11 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
++i;
}
}
- RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
+
+ Register Reg = VA.getLocReg();
+ if (!CLI.IsTailCall)
+ Reg = toCallerWindow(Reg);
+ RegsToPass.push_back(std::make_pair(Reg, Arg));
continue;
}
@@ -1319,13 +1381,16 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(InGlue);
// Now the call itself.
+ if (CLI.IsTailCall) {
+ DAG.getMachineFunction().getFrameInfo().setHasTailCall();
+ return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
+ }
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
InGlue = Chain.getValue(1);
// Revert the stack pointer immediately after the call.
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
- DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
+ Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
InGlue = Chain.getValue(1);
// Now extract the return values. This is more or less the same as
@@ -1346,6 +1411,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
unsigned Reg = toCallerWindow(VA.getLocReg());
// When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
@@ -1405,6 +1471,27 @@ TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWIn
return AtomicExpansionKind::CmpXChg;
}
+/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
+/// rcond condition.
+static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC) {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unknown/unsigned integer condition code!");
+ case ISD::SETEQ:
+ return SPCC::REG_Z;
+ case ISD::SETNE:
+ return SPCC::REG_NZ;
+ case ISD::SETLT:
+ return SPCC::REG_LZ;
+ case ISD::SETGT:
+ return SPCC::REG_GZ;
+ case ISD::SETLE:
+ return SPCC::REG_LEZ;
+ case ISD::SETGE:
+ return SPCC::REG_GEZ;
+ }
+}
+
/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
/// condition.
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
@@ -1880,12 +1967,21 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
case SPISD::FIRST_NUMBER: break;
case SPISD::CMPICC: return "SPISD::CMPICC";
case SPISD::CMPFCC: return "SPISD::CMPFCC";
+ case SPISD::CMPFCC_V9:
+ return "SPISD::CMPFCC_V9";
case SPISD::BRICC: return "SPISD::BRICC";
- case SPISD::BRXCC: return "SPISD::BRXCC";
+ case SPISD::BPICC:
+ return "SPISD::BPICC";
+ case SPISD::BPXCC:
+ return "SPISD::BPXCC";
case SPISD::BRFCC: return "SPISD::BRFCC";
+ case SPISD::BRFCC_V9:
+ return "SPISD::BRFCC_V9";
case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
+ case SPISD::SELECT_REG:
+ return "SPISD::SELECT_REG";
case SPISD::Hi: return "SPISD::Hi";
case SPISD::Lo: return "SPISD::Lo";
case SPISD::FTOI: return "SPISD::FTOI";
@@ -1942,15 +2038,14 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
ISD::CondCode CC, unsigned &SPCC) {
- if (isNullConstant(RHS) &&
- CC == ISD::SETNE &&
+ if (isNullConstant(RHS) && CC == ISD::SETNE &&
(((LHS.getOpcode() == SPISD::SELECT_ICC ||
LHS.getOpcode() == SPISD::SELECT_XCC) &&
LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
(LHS.getOpcode() == SPISD::SELECT_FCC &&
- LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
- isOneConstant(LHS.getOperand(0)) &&
- isNullConstant(LHS.getOperand(1))) {
+ (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
+ LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
+ isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
SDValue CMPCC = LHS.getOperand(3);
SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
LHS = CMPCC.getOperand(0);
@@ -2125,8 +2220,7 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
InFlag};
Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
- DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
+ Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InFlag, DL);
InFlag = Chain.getValue(1);
SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
@@ -2487,8 +2581,8 @@ static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
- const SparcTargetLowering &TLI,
- bool hasHardQuad) {
+ const SparcTargetLowering &TLI, bool hasHardQuad,
+ bool isV9) {
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue LHS = Op.getOperand(2);
@@ -2500,23 +2594,29 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
// If this is a br_cc of a "setcc", and if the setcc got lowered into
// an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
LookThroughSetCC(LHS, RHS, CC, SPCC);
+ assert(LHS.getValueType() == RHS.getValueType());
// Get the condition flag.
SDValue CompareFlag;
if (LHS.getValueType().isInteger()) {
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
- // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
- Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
+ if (isV9)
+ // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
+ Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
+ else
+ // Non-v9 targets don't have xcc.
+ Opc = SPISD::BRICC;
} else {
if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
- Opc = SPISD::BRICC;
+ Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
} else {
- CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
+ unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
+ CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
- Opc = SPISD::BRFCC;
+ Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
}
}
return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
@@ -2524,8 +2624,8 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
- const SparcTargetLowering &TLI,
- bool hasHardQuad) {
+ const SparcTargetLowering &TLI, bool hasHardQuad,
+ bool isV9, bool is64Bit) {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
@@ -2537,9 +2637,26 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
// If this is a select_cc of a "setcc", and if the setcc got lowered into
// an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
LookThroughSetCC(LHS, RHS, CC, SPCC);
+ assert(LHS.getValueType() == RHS.getValueType());
SDValue CompareFlag;
if (LHS.getValueType().isInteger()) {
+ // On V9 processors running in 64-bit mode, if CC compares two `i64`s
+ // and the RHS is zero we might be able to use a specialized select.
+ // All SELECT_CC between any two scalar integer types are eligible for
+ // lowering to specialized instructions. Additionally, f32 and f64 types
+ // are also eligible, but for f128 we can only use the specialized
+ // instruction when we have hardquad.
+ EVT ValType = TrueVal.getValueType();
+ bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
+ ValType == MVT::f64 ||
+ (ValType == MVT::f128 && hasHardQuad);
+ if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
+ isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
+ return DAG.getNode(
+ SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
+ DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
+
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
Opc = LHS.getValueType() == MVT::i32 ?
SPISD::SELECT_ICC : SPISD::SELECT_XCC;
@@ -2550,7 +2667,8 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
Opc = SPISD::SELECT_ICC;
} else {
- CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
+ unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
+ CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
Opc = SPISD::SELECT_FCC;
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
}
@@ -2600,7 +2718,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
// We can't count on greater alignment than the word size.
return DAG.getLoad(
VT, DL, InChain, VAList, MachinePointerInfo(),
- std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8);
+ Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
}
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
@@ -3075,6 +3193,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
bool hasHardQuad = Subtarget->hasHardQuad();
bool isV9 = Subtarget->isV9();
+ bool is64Bit = Subtarget->is64Bit();
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
@@ -3095,10 +3214,10 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
hasHardQuad);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
hasHardQuad);
- case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
- hasHardQuad);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
- hasHardQuad);
+ case ISD::BR_CC:
+ return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9);
+ case ISD::SELECT_CC:
+ return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
@@ -3175,6 +3294,8 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case SP::SELECT_CC_FP_ICC:
case SP::SELECT_CC_DFP_ICC:
case SP::SELECT_CC_QFP_ICC:
+ if (Subtarget->isV9())
+ return expandSelectCC(MI, BB, SP::BPICC);
return expandSelectCC(MI, BB, SP::BCOND);
case SP::SELECT_CC_Int_XCC:
case SP::SELECT_CC_FP_XCC:
@@ -3185,6 +3306,8 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case SP::SELECT_CC_FP_FCC:
case SP::SELECT_CC_DFP_FCC:
case SP::SELECT_CC_QFP_FCC:
+ if (Subtarget->isV9())
+ return expandSelectCC(MI, BB, SP::FBCOND_V9);
return expandSelectCC(MI, BB, SP::FBCOND);
}
}