aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp47
1 files changed, 26 insertions, 21 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 8d6edf07bc53..25cc34badda0 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2433,7 +2433,7 @@ unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
/// the constant being splatted. The ByteSize field indicates the number of
/// bytes of each element [124] -> [bhw].
SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
- SDValue OpVal(nullptr, 0);
+ SDValue OpVal;
// If ByteSize of the splat is bigger than the element size of the
// build_vector, then we have a case where we are checking for a splat where
@@ -3508,8 +3508,9 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
int ShuffV[] = {1, 0, 3, 2};
SDValue Shuff =
DAG.getVectorShuffle(MVT::v4i32, dl, SetCC32, SetCC32, ShuffV);
- return DAG.getBitcast(
- MVT::v2i64, DAG.getNode(ISD::AND, dl, MVT::v4i32, Shuff, SetCC32));
+ return DAG.getBitcast(MVT::v2i64,
+ DAG.getNode(CC == ISD::SETEQ ? ISD::AND : ISD::OR,
+ dl, MVT::v4i32, Shuff, SetCC32));
}
// We handle most of these in the usual way.
@@ -4078,8 +4079,8 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// virtual ones.
if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
assert(i + 1 < e && "No second half of double precision argument");
- unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
- unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
+ Register RegLo = MF.addLiveIn(VA.getLocReg(), RC);
+ Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
if (!Subtarget.isLittleEndian())
@@ -4087,7 +4088,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
ArgValueHi);
} else {
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
ValVT == MVT::i1 ? MVT::i32 : ValVT);
if (ValVT == MVT::i1)
@@ -4179,7 +4180,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// dereferencing the result of va_next.
for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
// Get an existing live-in vreg, or add a new one.
- unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
+ Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
@@ -4198,7 +4199,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// on the stack.
for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
// Get an existing live-in vreg, or add a new one.
- unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
+ Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
@@ -4384,7 +4385,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
InVals.push_back(Arg);
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8);
@@ -4408,7 +4409,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
if (GPR_idx == Num_GPR_Regs)
break;
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Addr = FIN;
@@ -4432,7 +4433,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
case MVT::i64:
if (Flags.isNest()) {
// The 'nest' parameter, if any, is passed in R11.
- unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
@@ -4445,7 +4446,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. Clang may use those instead of "byval" aggregate
// types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
@@ -4491,7 +4492,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// This can only ever happen in the presence of f32 array types,
// since otherwise we never run out of FPRs before running out
// of GPRs.
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
@@ -4532,7 +4533,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. The latter are used to implement ELFv2 homogenous
// vector aggregates.
if (VR_idx != Num_VR_Regs) {
- unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
+ Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++VR_idx;
} else {
@@ -4591,7 +4592,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// the result of va_next.
for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx < Num_GPR_Regs; ++GPR_idx) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
@@ -7059,7 +7060,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
unsigned Offset) {
- const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
+ const Register VReg = MF.addLiveIn(PhysReg, RegClass);
// Since the callers side has left justified the aggregate in the
// register, we can simply store the entire register into the stack
// slot.
@@ -7156,7 +7157,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
(CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
GPRIndex < NumGPArgRegs; ++GPRIndex) {
- const unsigned VReg =
+ const Register VReg =
IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
: MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
@@ -11178,13 +11179,17 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::STRICT_FP_TO_SINT:
case ISD::STRICT_FP_TO_UINT:
case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
+ case ISD::FP_TO_UINT: {
// LowerFP_TO_INT() can only handle f32 and f64.
if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
MVT::ppcf128)
return;
- Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
+ SDValue LoweredValue = LowerFP_TO_INT(SDValue(N, 0), DAG, dl);
+ Results.push_back(LoweredValue);
+ if (N->isStrictFPOpcode())
+ Results.push_back(LoweredValue.getValue(1));
return;
+ }
case ISD::TRUNCATE: {
if (!N->getValueType(0).isVector())
return;
@@ -17890,7 +17895,7 @@ Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&
"Only support quadword now");
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Type *ValTy = cast<PointerType>(AlignedAddr->getType())->getElementType();
+ Type *ValTy = AlignedAddr->getType()->getPointerElementType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *RMW = Intrinsic::getDeclaration(
M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation()));
@@ -17915,7 +17920,7 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&
"Only support quadword now");
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Type *ValTy = cast<PointerType>(AlignedAddr->getType())->getElementType();
+ Type *ValTy = AlignedAddr->getType()->getPointerElementType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *IntCmpXchg =
Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128);