aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp1991
1 files changed, 1436 insertions, 555 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 667e1a04dc34..da519f99ad7e 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -63,30 +63,48 @@ getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) {
return std::make_pair(NumParts, NumLeftover);
}
+static Type *getFloatTypeForLLT(LLVMContext &Ctx, LLT Ty) {
+
+ if (!Ty.isScalar())
+ return nullptr;
+
+ switch (Ty.getSizeInBits()) {
+ case 16:
+ return Type::getHalfTy(Ctx);
+ case 32:
+ return Type::getFloatTy(Ctx);
+ case 64:
+ return Type::getDoubleTy(Ctx);
+ case 128:
+ return Type::getFP128Ty(Ctx);
+ default:
+ return nullptr;
+ }
+}
+
LegalizerHelper::LegalizerHelper(MachineFunction &MF,
GISelChangeObserver &Observer,
MachineIRBuilder &Builder)
- : MIRBuilder(Builder), MRI(MF.getRegInfo()),
- LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) {
- MIRBuilder.setMF(MF);
+ : MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()),
+ LI(*MF.getSubtarget().getLegalizerInfo()) {
MIRBuilder.setChangeObserver(Observer);
}
LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
GISelChangeObserver &Observer,
MachineIRBuilder &B)
- : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) {
- MIRBuilder.setMF(MF);
+ : MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI) {
MIRBuilder.setChangeObserver(Observer);
}
LegalizerHelper::LegalizeResult
LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
- LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Legalizing: " << MI);
+
+ MIRBuilder.setInstrAndDebugLoc(MI);
if (MI.getOpcode() == TargetOpcode::G_INTRINSIC ||
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS)
- return LI.legalizeIntrinsic(MI, MRI, MIRBuilder) ? Legalized
- : UnableToLegalize;
+ return LI.legalizeIntrinsic(*this, MI) ? Legalized : UnableToLegalize;
auto Step = LI.getAction(MI, MRI);
switch (Step.Action) {
case Legal:
@@ -101,6 +119,9 @@ LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
case WidenScalar:
LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
return widenScalar(MI, Step.TypeIdx, Step.NewType);
+ case Bitcast:
+ LLVM_DEBUG(dbgs() << ".. Bitcast type\n");
+ return bitcast(MI, Step.TypeIdx, Step.NewType);
case Lower:
LLVM_DEBUG(dbgs() << ".. Lower\n");
return lower(MI, Step.TypeIdx, Step.NewType);
@@ -112,8 +133,7 @@ LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
return moreElementsVector(MI, Step.TypeIdx, Step.NewType);
case Custom:
LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
- return LI.legalizeCustom(MI, MRI, MIRBuilder, Observer) ? Legalized
- : UnableToLegalize;
+ return LI.legalizeCustom(*this, MI) ? Legalized : UnableToLegalize;
default:
LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
return UnableToLegalize;
@@ -172,26 +192,6 @@ bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
return true;
}
-static LLT getGCDType(LLT OrigTy, LLT TargetTy) {
- if (OrigTy.isVector() && TargetTy.isVector()) {
- assert(OrigTy.getElementType() == TargetTy.getElementType());
- int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
- TargetTy.getNumElements());
- return LLT::scalarOrVector(GCD, OrigTy.getElementType());
- }
-
- if (OrigTy.isVector() && !TargetTy.isVector()) {
- assert(OrigTy.getElementType() == TargetTy);
- return TargetTy;
- }
-
- assert(!OrigTy.isVector() && !TargetTy.isVector());
-
- int GCD = greatestCommonDivisor(OrigTy.getSizeInBits(),
- TargetTy.getSizeInBits());
- return LLT::scalar(GCD);
-}
-
void LegalizerHelper::insertParts(Register DstReg,
LLT ResultTy, LLT PartTy,
ArrayRef<Register> PartRegs,
@@ -237,92 +237,222 @@ void LegalizerHelper::insertParts(Register DstReg,
}
}
+/// Return the result registers of G_UNMERGE_VALUES \p MI in \p Regs
+static void getUnmergeResults(SmallVectorImpl<Register> &Regs,
+ const MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
+
+ const int NumResults = MI.getNumOperands() - 1;
+ Regs.resize(NumResults);
+ for (int I = 0; I != NumResults; ++I)
+ Regs[I] = MI.getOperand(I).getReg();
+}
+
+LLT LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
+ LLT NarrowTy, Register SrcReg) {
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ LLT GCDTy = getGCDType(DstTy, getGCDType(SrcTy, NarrowTy));
+ if (SrcTy == GCDTy) {
+ // If the source already evenly divides the result type, we don't need to do
+ // anything.
+ Parts.push_back(SrcReg);
+ } else {
+ // Need to split into common type sized pieces.
+ auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
+ getUnmergeResults(Parts, *Unmerge);
+ }
+
+ return GCDTy;
+}
+
+LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
+ SmallVectorImpl<Register> &VRegs,
+ unsigned PadStrategy) {
+ LLT LCMTy = getLCMType(DstTy, NarrowTy);
+
+ int NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits();
+ int NumSubParts = NarrowTy.getSizeInBits() / GCDTy.getSizeInBits();
+ int NumOrigSrc = VRegs.size();
+
+ Register PadReg;
+
+ // Get a value we can use to pad the source value if the sources won't evenly
+ // cover the result type.
+ if (NumOrigSrc < NumParts * NumSubParts) {
+ if (PadStrategy == TargetOpcode::G_ZEXT)
+ PadReg = MIRBuilder.buildConstant(GCDTy, 0).getReg(0);
+ else if (PadStrategy == TargetOpcode::G_ANYEXT)
+ PadReg = MIRBuilder.buildUndef(GCDTy).getReg(0);
+ else {
+ assert(PadStrategy == TargetOpcode::G_SEXT);
+
+ // Shift the sign bit of the low register through the high register.
+ auto ShiftAmt =
+ MIRBuilder.buildConstant(LLT::scalar(64), GCDTy.getSizeInBits() - 1);
+ PadReg = MIRBuilder.buildAShr(GCDTy, VRegs.back(), ShiftAmt).getReg(0);
+ }
+ }
+
+ // Registers for the final merge to be produced.
+ SmallVector<Register, 4> Remerge(NumParts);
+
+ // Registers needed for intermediate merges, which will be merged into a
+ // source for Remerge.
+ SmallVector<Register, 4> SubMerge(NumSubParts);
+
+ // Once we've fully read off the end of the original source bits, we can reuse
+ // the same high bits for remaining padding elements.
+ Register AllPadReg;
+
+ // Build merges to the LCM type to cover the original result type.
+ for (int I = 0; I != NumParts; ++I) {
+ bool AllMergePartsArePadding = true;
+
+ // Build the requested merges to the requested type.
+ for (int J = 0; J != NumSubParts; ++J) {
+ int Idx = I * NumSubParts + J;
+ if (Idx >= NumOrigSrc) {
+ SubMerge[J] = PadReg;
+ continue;
+ }
+
+ SubMerge[J] = VRegs[Idx];
+
+ // There are meaningful bits here we can't reuse later.
+ AllMergePartsArePadding = false;
+ }
+
+ // If we've filled up a complete piece with padding bits, we can directly
+ // emit the natural sized constant if applicable, rather than a merge of
+ // smaller constants.
+ if (AllMergePartsArePadding && !AllPadReg) {
+ if (PadStrategy == TargetOpcode::G_ANYEXT)
+ AllPadReg = MIRBuilder.buildUndef(NarrowTy).getReg(0);
+ else if (PadStrategy == TargetOpcode::G_ZEXT)
+ AllPadReg = MIRBuilder.buildConstant(NarrowTy, 0).getReg(0);
+
+ // If this is a sign extension, we can't materialize a trivial constant
+ // with the right type and have to produce a merge.
+ }
+
+ if (AllPadReg) {
+ // Avoid creating additional instructions if we're just adding additional
+ // copies of padding bits.
+ Remerge[I] = AllPadReg;
+ continue;
+ }
+
+ if (NumSubParts == 1)
+ Remerge[I] = SubMerge[0];
+ else
+ Remerge[I] = MIRBuilder.buildMerge(NarrowTy, SubMerge).getReg(0);
+
+ // In the sign extend padding case, re-use the first all-signbit merge.
+ if (AllMergePartsArePadding && !AllPadReg)
+ AllPadReg = Remerge[I];
+ }
+
+ VRegs = std::move(Remerge);
+ return LCMTy;
+}
+
+void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
+ ArrayRef<Register> RemergeRegs) {
+ LLT DstTy = MRI.getType(DstReg);
+
+ // Create the merge to the widened source, and extract the relevant bits into
+ // the result.
+
+ if (DstTy == LCMTy) {
+ MIRBuilder.buildMerge(DstReg, RemergeRegs);
+ return;
+ }
+
+ auto Remerge = MIRBuilder.buildMerge(LCMTy, RemergeRegs);
+ if (DstTy.isScalar() && LCMTy.isScalar()) {
+ MIRBuilder.buildTrunc(DstReg, Remerge);
+ return;
+ }
+
+ if (LCMTy.isVector()) {
+ MIRBuilder.buildExtract(DstReg, Remerge, 0);
+ return;
+ }
+
+ llvm_unreachable("unhandled case");
+}
+
static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
+#define RTLIBCASE(LibcallPrefix) \
+ do { \
+ switch (Size) { \
+ case 32: \
+ return RTLIB::LibcallPrefix##32; \
+ case 64: \
+ return RTLIB::LibcallPrefix##64; \
+ case 128: \
+ return RTLIB::LibcallPrefix##128; \
+ default: \
+ llvm_unreachable("unexpected size"); \
+ } \
+ } while (0)
+
+ assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
+
switch (Opcode) {
case TargetOpcode::G_SDIV:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- switch (Size) {
- case 32:
- return RTLIB::SDIV_I32;
- case 64:
- return RTLIB::SDIV_I64;
- case 128:
- return RTLIB::SDIV_I128;
- default:
- llvm_unreachable("unexpected size");
- }
+ RTLIBCASE(SDIV_I);
case TargetOpcode::G_UDIV:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- switch (Size) {
- case 32:
- return RTLIB::UDIV_I32;
- case 64:
- return RTLIB::UDIV_I64;
- case 128:
- return RTLIB::UDIV_I128;
- default:
- llvm_unreachable("unexpected size");
- }
+ RTLIBCASE(UDIV_I);
case TargetOpcode::G_SREM:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32;
+ RTLIBCASE(SREM_I);
case TargetOpcode::G_UREM:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32;
+ RTLIBCASE(UREM_I);
case TargetOpcode::G_CTLZ_ZERO_UNDEF:
- assert(Size == 32 && "Unsupported size");
- return RTLIB::CTLZ_I32;
+ RTLIBCASE(CTLZ_I);
case TargetOpcode::G_FADD:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
+ RTLIBCASE(ADD_F);
case TargetOpcode::G_FSUB:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32;
+ RTLIBCASE(SUB_F);
case TargetOpcode::G_FMUL:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32;
+ RTLIBCASE(MUL_F);
case TargetOpcode::G_FDIV:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32;
+ RTLIBCASE(DIV_F);
case TargetOpcode::G_FEXP:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::EXP_F64 : RTLIB::EXP_F32;
+ RTLIBCASE(EXP_F);
case TargetOpcode::G_FEXP2:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::EXP2_F64 : RTLIB::EXP2_F32;
+ RTLIBCASE(EXP2_F);
case TargetOpcode::G_FREM:
- return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
+ RTLIBCASE(REM_F);
case TargetOpcode::G_FPOW:
- return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
+ RTLIBCASE(POW_F);
case TargetOpcode::G_FMA:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
+ RTLIBCASE(FMA_F);
case TargetOpcode::G_FSIN:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::SIN_F128
- : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32;
+ RTLIBCASE(SIN_F);
case TargetOpcode::G_FCOS:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::COS_F128
- : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32;
+ RTLIBCASE(COS_F);
case TargetOpcode::G_FLOG10:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::LOG10_F128
- : Size == 64 ? RTLIB::LOG10_F64 : RTLIB::LOG10_F32;
+ RTLIBCASE(LOG10_F);
case TargetOpcode::G_FLOG:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::LOG_F128
- : Size == 64 ? RTLIB::LOG_F64 : RTLIB::LOG_F32;
+ RTLIBCASE(LOG_F);
case TargetOpcode::G_FLOG2:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::LOG2_F128
- : Size == 64 ? RTLIB::LOG2_F64 : RTLIB::LOG2_F32;
+ RTLIBCASE(LOG2_F);
case TargetOpcode::G_FCEIL:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::CEIL_F64 : RTLIB::CEIL_F32;
+ RTLIBCASE(CEIL_F);
case TargetOpcode::G_FFLOOR:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::FLOOR_F64 : RTLIB::FLOOR_F32;
+ RTLIBCASE(FLOOR_F);
+ case TargetOpcode::G_FMINNUM:
+ RTLIBCASE(FMIN_F);
+ case TargetOpcode::G_FMAXNUM:
+ RTLIBCASE(FMAX_F);
+ case TargetOpcode::G_FSQRT:
+ RTLIBCASE(SQRT_F);
+ case TargetOpcode::G_FRINT:
+ RTLIBCASE(RINT_F);
+ case TargetOpcode::G_FNEARBYINT:
+ RTLIBCASE(NEARBYINT_F);
}
llvm_unreachable("Unknown libcall function");
}
@@ -330,7 +460,8 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
/// True if an instruction is in tail position in its caller. Intended for
/// legalizing libcalls as tail calls when possible.
static bool isLibCallInTailPosition(MachineInstr &MI) {
- const Function &F = MI.getParent()->getParent()->getFunction();
+ MachineBasicBlock &MBB = *MI.getParent();
+ const Function &F = MBB.getParent()->getFunction();
// Conservatively require the attributes of the call to match those of
// the return. Ignore NoAlias and NonNull because they don't affect the
@@ -349,23 +480,22 @@ static bool isLibCallInTailPosition(MachineInstr &MI) {
// Only tail call if the following instruction is a standard return.
auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
- MachineInstr *Next = MI.getNextNode();
- if (!Next || TII.isTailCall(*Next) || !Next->isReturn())
+ auto Next = next_nodbg(MI.getIterator(), MBB.instr_end());
+ if (Next == MBB.instr_end() || TII.isTailCall(*Next) || !Next->isReturn())
return false;
return true;
}
LegalizerHelper::LegalizeResult
-llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+llvm::createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
const CallLowering::ArgInfo &Result,
- ArrayRef<CallLowering::ArgInfo> Args) {
+ ArrayRef<CallLowering::ArgInfo> Args,
+ const CallingConv::ID CC) {
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
- auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
- const char *Name = TLI.getLibcallName(Libcall);
CallLowering::CallLoweringInfo Info;
- Info.CallConv = TLI.getLibcallCallingConv(Libcall);
+ Info.CallConv = CC;
Info.Callee = MachineOperand::CreateES(Name);
Info.OrigRet = Result;
std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs));
@@ -375,6 +505,16 @@ llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
return LegalizerHelper::Legalized;
}
+LegalizerHelper::LegalizeResult
+llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args) {
+ auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
+ const char *Name = TLI.getLibcallName(Libcall);
+ const CallingConv::ID CC = TLI.getLibcallCallingConv(Libcall);
+ return createLibcall(MIRBuilder, Name, Result, Args, CC);
+}
+
// Useful for libcalls where all operands have the same type.
static LegalizerHelper::LegalizeResult
simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
@@ -428,7 +568,7 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
}
const char *Name = TLI.getLibcallName(RTLibcall);
- MIRBuilder.setInstr(MI);
+ MIRBuilder.setInstrAndDebugLoc(MI);
CallLowering::CallLoweringInfo Info;
Info.CallConv = TLI.getLibcallCallingConv(RTLibcall);
@@ -443,14 +583,16 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
if (Info.LoweredTailCall) {
assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?");
- // We must have a return following the call to get past
+ // We must have a return following the call (or debug insts) to get past
// isLibCallInTailPosition.
- assert(MI.getNextNode() && MI.getNextNode()->isReturn() &&
- "Expected instr following MI to be a return?");
-
- // We lowered a tail call, so the call is now the return from the block.
- // Delete the old return.
- MI.getNextNode()->eraseFromParent();
+ do {
+ MachineInstr *Next = MI.getNextNode();
+ assert(Next && (Next->isReturn() || Next->isDebugInstr()) &&
+ "Expected instr following MI to be return or debug inst?");
+ // We lowered a tail call, so the call is now the return from the block.
+ // Delete the old return.
+ Next->eraseFromParent();
+ } while (MI.getNextNode());
}
return LegalizerHelper::Legalized;
@@ -492,8 +634,6 @@ LegalizerHelper::libcall(MachineInstr &MI) {
unsigned Size = LLTy.getSizeInBits();
auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
- MIRBuilder.setInstr(MI);
-
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
@@ -523,37 +663,29 @@ LegalizerHelper::libcall(MachineInstr &MI) {
case TargetOpcode::G_FEXP:
case TargetOpcode::G_FEXP2:
case TargetOpcode::G_FCEIL:
- case TargetOpcode::G_FFLOOR: {
- if (Size > 64) {
- LLVM_DEBUG(dbgs() << "Size " << Size << " too large to legalize.\n");
+ case TargetOpcode::G_FFLOOR:
+ case TargetOpcode::G_FMINNUM:
+ case TargetOpcode::G_FMAXNUM:
+ case TargetOpcode::G_FSQRT:
+ case TargetOpcode::G_FRINT:
+ case TargetOpcode::G_FNEARBYINT: {
+ Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
+ if (!HLTy || (Size != 32 && Size != 64 && Size != 128)) {
+ LLVM_DEBUG(dbgs() << "No libcall available for size " << Size << ".\n");
return UnableToLegalize;
}
- Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
if (Status != Legalized)
return Status;
break;
}
- case TargetOpcode::G_FPEXT: {
- // FIXME: Support other floating point types (half, fp128 etc)
- unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
- unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- if (ToSize != 64 || FromSize != 32)
- return UnableToLegalize;
- LegalizeResult Status = conversionLibcall(
- MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
- if (Status != Legalized)
- return Status;
- break;
- }
+ case TargetOpcode::G_FPEXT:
case TargetOpcode::G_FPTRUNC: {
- // FIXME: Support other floating point types (half, fp128 etc)
- unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
- unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- if (ToSize != 32 || FromSize != 64)
+ Type *FromTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(1).getReg()));
+ Type *ToTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(0).getReg()));
+ if (!FromTy || !ToTy)
return UnableToLegalize;
- LegalizeResult Status = conversionLibcall(
- MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
+ LegalizeResult Status = conversionLibcall(MI, MIRBuilder, ToTy, FromTy );
if (Status != Legalized)
return Status;
break;
@@ -597,8 +729,6 @@ LegalizerHelper::libcall(MachineInstr &MI) {
LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned TypeIdx,
LLT NarrowTy) {
- MIRBuilder.setInstr(MI);
-
uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
uint64_t NarrowSize = NarrowTy.getSizeInBits();
@@ -606,19 +736,34 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
default:
return UnableToLegalize;
case TargetOpcode::G_IMPLICIT_DEF: {
- // FIXME: add support for when SizeOp0 isn't an exact multiple of
- // NarrowSize.
- if (SizeOp0 % NarrowSize != 0)
- return UnableToLegalize;
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+
+ // If SizeOp0 is not an exact multiple of NarrowSize, emit
+ // G_ANYEXT(G_IMPLICIT_DEF). Cast result to vector if needed.
+ // FIXME: Although this would also be legal for the general case, it causes
+ // a lot of regressions in the emitted code (superfluous COPYs, artifact
+ // combines not being hit). This seems to be a problem related to the
+ // artifact combiner.
+ if (SizeOp0 % NarrowSize != 0) {
+ LLT ImplicitTy = NarrowTy;
+ if (DstTy.isVector())
+ ImplicitTy = LLT::vector(DstTy.getNumElements(), ImplicitTy);
+
+ Register ImplicitReg = MIRBuilder.buildUndef(ImplicitTy).getReg(0);
+ MIRBuilder.buildAnyExt(DstReg, ImplicitReg);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
int NumParts = SizeOp0 / NarrowSize;
SmallVector<Register, 2> DstRegs;
for (int i = 0; i < NumParts; ++i)
- DstRegs.push_back(
- MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
+ DstRegs.push_back(MIRBuilder.buildUndef(NarrowTy).getReg(0));
- Register DstReg = MI.getOperand(0).getReg();
- if(MRI.getType(DstReg).isVector())
+ if (DstTy.isVector())
MIRBuilder.buildBuildVector(DstReg, DstRegs);
else
MIRBuilder.buildMerge(DstReg, DstRegs);
@@ -657,49 +802,10 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MI.eraseFromParent();
return Legalized;
}
- case TargetOpcode::G_SEXT: {
- if (TypeIdx != 0)
- return UnableToLegalize;
-
- Register SrcReg = MI.getOperand(1).getReg();
- LLT SrcTy = MRI.getType(SrcReg);
-
- // FIXME: support the general case where the requested NarrowTy may not be
- // the same as the source type. E.g. s128 = sext(s32)
- if ((SrcTy.getSizeInBits() != SizeOp0 / 2) ||
- SrcTy.getSizeInBits() != NarrowTy.getSizeInBits()) {
- LLVM_DEBUG(dbgs() << "Can't narrow sext to type " << NarrowTy << "\n");
- return UnableToLegalize;
- }
-
- // Shift the sign bit of the low register through the high register.
- auto ShiftAmt =
- MIRBuilder.buildConstant(LLT::scalar(64), NarrowTy.getSizeInBits() - 1);
- auto Shift = MIRBuilder.buildAShr(NarrowTy, SrcReg, ShiftAmt);
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {SrcReg, Shift.getReg(0)});
- MI.eraseFromParent();
- return Legalized;
- }
- case TargetOpcode::G_ZEXT: {
- if (TypeIdx != 0)
- return UnableToLegalize;
-
- LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
- uint64_t SizeOp1 = SrcTy.getSizeInBits();
- if (SizeOp0 % SizeOp1 != 0)
- return UnableToLegalize;
-
- // Generate a merge where the bottom bits are taken from the source, and
- // zero everything else.
- Register ZeroReg = MIRBuilder.buildConstant(SrcTy, 0).getReg(0);
- unsigned NumParts = SizeOp0 / SizeOp1;
- SmallVector<Register, 4> Srcs = {MI.getOperand(1).getReg()};
- for (unsigned Part = 1; Part < NumParts; ++Part)
- Srcs.push_back(ZeroReg);
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), Srcs);
- MI.eraseFromParent();
- return Legalized;
- }
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_ANYEXT:
+ return narrowScalarExt(MI, TypeIdx, NarrowTy);
case TargetOpcode::G_TRUNC: {
if (TypeIdx != 1)
return UnableToLegalize;
@@ -710,12 +816,15 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
return UnableToLegalize;
}
- auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1).getReg());
- MIRBuilder.buildCopy(MI.getOperand(0).getReg(), Unmerge.getReg(0));
+ auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1));
+ MIRBuilder.buildCopy(MI.getOperand(0), Unmerge.getReg(0));
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_FREEZE:
+ return reduceOperationWidth(MI, TypeIdx, NarrowTy);
+
case TargetOpcode::G_ADD: {
// FIXME: add support for when SizeOp0 isn't an exact multiple of
// NarrowSize.
@@ -779,7 +888,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(DstReg);
BorrowIn = BorrowOut;
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
+ MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -800,7 +909,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (8 * MMO.getSize() != DstTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
auto &MMO = **MI.memoperands_begin();
- MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO);
+ MIRBuilder.buildLoad(TmpReg, MI.getOperand(1), MMO);
MIRBuilder.buildAnyExt(DstReg, TmpReg);
MI.eraseFromParent();
return Legalized;
@@ -819,12 +928,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (MMO.getSizeInBits() == NarrowSize) {
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
} else {
- unsigned ExtLoad = ZExt ? TargetOpcode::G_ZEXTLOAD
- : TargetOpcode::G_SEXTLOAD;
- MIRBuilder.buildInstr(ExtLoad)
- .addDef(TmpReg)
- .addUse(PtrReg)
- .addMemOperand(&MMO);
+ MIRBuilder.buildLoadInstr(MI.getOpcode(), TmpReg, PtrReg, MMO);
}
if (ZExt)
@@ -853,7 +957,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
auto &MMO = **MI.memoperands_begin();
MIRBuilder.buildTrunc(TmpReg, SrcReg);
- MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO);
+ MIRBuilder.buildStore(TmpReg, MI.getOperand(1), MMO);
MI.eraseFromParent();
return Legalized;
}
@@ -885,8 +989,19 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
case TargetOpcode::G_CTTZ:
case TargetOpcode::G_CTTZ_ZERO_UNDEF:
case TargetOpcode::G_CTPOP:
- if (TypeIdx != 0)
- return UnableToLegalize; // TODO
+ if (TypeIdx == 1)
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_CTLZ:
+ case TargetOpcode::G_CTLZ_ZERO_UNDEF:
+ return narrowScalarCTLZ(MI, TypeIdx, NarrowTy);
+ case TargetOpcode::G_CTTZ:
+ case TargetOpcode::G_CTTZ_ZERO_UNDEF:
+ return narrowScalarCTTZ(MI, TypeIdx, NarrowTy);
+ case TargetOpcode::G_CTPOP:
+ return narrowScalarCTPOP(MI, TypeIdx, NarrowTy);
+ default:
+ return UnableToLegalize;
+ }
Observer.changingInstr(MI);
narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
@@ -910,10 +1025,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
return Legalized;
case TargetOpcode::G_PHI: {
unsigned NumParts = SizeOp0 / NarrowSize;
- SmallVector<Register, 2> DstRegs;
- SmallVector<SmallVector<Register, 2>, 2> SrcRegs;
- DstRegs.resize(NumParts);
- SrcRegs.resize(MI.getNumOperands() / 2);
+ SmallVector<Register, 2> DstRegs(NumParts);
+ SmallVector<SmallVector<Register, 2>, 2> SrcRegs(MI.getNumOperands() / 2);
Observer.changingInstr(MI);
for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB();
@@ -931,7 +1044,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1));
}
MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
+ MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
Observer.changedInstr(MI);
MI.eraseFromParent();
return Legalized;
@@ -955,11 +1068,11 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
Observer.changingInstr(MI);
Register LHSL = MRI.createGenericVirtualRegister(NarrowTy);
Register LHSH = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2).getReg());
+ MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2));
Register RHSL = MRI.createGenericVirtualRegister(NarrowTy);
Register RHSH = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3).getReg());
+ MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3));
CmpInst::Predicate Pred =
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
@@ -970,14 +1083,14 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH);
MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH);
MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0);
- MIRBuilder.buildICmp(Pred, MI.getOperand(0).getReg(), Or, Zero);
+ MIRBuilder.buildICmp(Pred, MI.getOperand(0), Or, Zero);
} else {
MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH);
MachineInstrBuilder CmpHEQ =
MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH);
MachineInstrBuilder CmpLU = MIRBuilder.buildICmp(
ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), CmpHEQ, CmpLU, CmpH);
+ MIRBuilder.buildSelect(MI.getOperand(0), CmpHEQ, CmpLU, CmpH);
}
Observer.changedInstr(MI);
MI.eraseFromParent();
@@ -987,8 +1100,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (TypeIdx != 0)
return UnableToLegalize;
- if (!MI.getOperand(2).isImm())
- return UnableToLegalize;
int64_t SizeInBits = MI.getOperand(2).getImm();
// So long as the new type has more bits than the bits we're extending we
@@ -998,13 +1109,13 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// We don't lose any non-extension bits by truncating the src and
// sign-extending the dst.
MachineOperand &MO1 = MI.getOperand(1);
- auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1.getReg());
- MO1.setReg(TruncMIB->getOperand(0).getReg());
+ auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1);
+ MO1.setReg(TruncMIB.getReg(0));
MachineOperand &MO2 = MI.getOperand(0);
Register DstExt = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildInstr(TargetOpcode::G_SEXT, {MO2.getReg()}, {DstExt});
+ MIRBuilder.buildSExt(MO2, DstExt);
MO2.setReg(DstExt);
Observer.changedInstr(MI);
return Legalized;
@@ -1031,12 +1142,11 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
}
// Explode the big arguments into smaller chunks.
- MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1).getReg());
+ MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1));
Register AshrCstReg =
MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1)
- ->getOperand(0)
- .getReg();
+ .getReg(0);
Register FullExtensionReg = 0;
Register PartialExtensionReg = 0;
@@ -1051,11 +1161,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(FullExtensionReg);
continue;
}
- DstRegs.push_back(MIRBuilder
- .buildInstr(TargetOpcode::G_ASHR, {NarrowTy},
- {PartialExtensionReg, AshrCstReg})
- ->getOperand(0)
- .getReg());
+ DstRegs.push_back(
+ MIRBuilder.buildAShr(NarrowTy, PartialExtensionReg, AshrCstReg)
+ .getReg(0));
FullExtensionReg = DstRegs.back();
} else {
DstRegs.push_back(
@@ -1063,8 +1171,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
.buildInstr(
TargetOpcode::G_SEXT_INREG, {NarrowTy},
{SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()})
- ->getOperand(0)
- .getReg());
+ .getReg(0));
PartialExtensionReg = DstRegs.back();
}
}
@@ -1091,28 +1198,57 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(DstPart.getReg(0));
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
+ MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
Observer.changedInstr(MI);
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_PTRMASK: {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+ Observer.changingInstr(MI);
+ narrowScalarSrc(MI, NarrowTy, 2);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
}
}
+Register LegalizerHelper::coerceToScalar(Register Val) {
+ LLT Ty = MRI.getType(Val);
+ if (Ty.isScalar())
+ return Val;
+
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ LLT NewTy = LLT::scalar(Ty.getSizeInBits());
+ if (Ty.isPointer()) {
+ if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
+ return Register();
+ return MIRBuilder.buildPtrToInt(NewTy, Val).getReg(0);
+ }
+
+ Register NewVal = Val;
+
+ assert(Ty.isVector());
+ LLT EltTy = Ty.getElementType();
+ if (EltTy.isPointer())
+ NewVal = MIRBuilder.buildPtrToInt(NewTy, NewVal).getReg(0);
+ return MIRBuilder.buildBitcast(NewTy, NewVal).getReg(0);
+}
+
void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
unsigned OpIdx, unsigned ExtOpcode) {
MachineOperand &MO = MI.getOperand(OpIdx);
- auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO.getReg()});
- MO.setReg(ExtB->getOperand(0).getReg());
+ auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO});
+ MO.setReg(ExtB.getReg(0));
}
void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy,
unsigned OpIdx) {
MachineOperand &MO = MI.getOperand(OpIdx);
- auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy},
- {MO.getReg()});
- MO.setReg(ExtB->getOperand(0).getReg());
+ auto ExtB = MIRBuilder.buildTrunc(NarrowTy, MO);
+ MO.setReg(ExtB.getReg(0));
}
void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
@@ -1120,7 +1256,7 @@ void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
MachineOperand &MO = MI.getOperand(OpIdx);
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt});
+ MIRBuilder.buildInstr(TruncOpcode, {MO}, {DstExt});
MO.setReg(DstExt);
}
@@ -1129,7 +1265,7 @@ void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy,
MachineOperand &MO = MI.getOperand(OpIdx);
Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc});
+ MIRBuilder.buildInstr(ExtOpcode, {MO}, {DstTrunc});
MO.setReg(DstTrunc);
}
@@ -1138,7 +1274,7 @@ void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy,
MachineOperand &MO = MI.getOperand(OpIdx);
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildExtract(MO.getReg(), DstExt, 0);
+ MIRBuilder.buildExtract(MO, DstExt, 0);
MO.setReg(DstExt);
}
@@ -1172,6 +1308,19 @@ void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
MO.setReg(MoreReg);
}
+void LegalizerHelper::bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx) {
+ MachineOperand &Op = MI.getOperand(OpIdx);
+ Op.setReg(MIRBuilder.buildBitcast(CastTy, Op).getReg(0));
+}
+
+void LegalizerHelper::bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx) {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ Register CastDst = MRI.createGenericVirtualRegister(CastTy);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
+ MIRBuilder.buildBitcast(MO, CastDst);
+ MO.setReg(CastDst);
+}
+
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
@@ -1300,10 +1449,10 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
if (TypeIdx != 0)
return UnableToLegalize;
- unsigned NumDst = MI.getNumOperands() - 1;
+ int NumDst = MI.getNumOperands() - 1;
Register SrcReg = MI.getOperand(NumDst).getReg();
LLT SrcTy = MRI.getType(SrcReg);
- if (!SrcTy.isScalar())
+ if (SrcTy.isVector())
return UnableToLegalize;
Register Dst0Reg = MI.getOperand(0).getReg();
@@ -1311,26 +1460,90 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
if (!DstTy.isScalar())
return UnableToLegalize;
- unsigned NewSrcSize = NumDst * WideTy.getSizeInBits();
- LLT NewSrcTy = LLT::scalar(NewSrcSize);
- unsigned SizeDiff = WideTy.getSizeInBits() - DstTy.getSizeInBits();
+ if (WideTy.getSizeInBits() >= SrcTy.getSizeInBits()) {
+ if (SrcTy.isPointer()) {
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) {
+ LLVM_DEBUG(
+ dbgs() << "Not casting non-integral address space integer\n");
+ return UnableToLegalize;
+ }
+
+ SrcTy = LLT::scalar(SrcTy.getSizeInBits());
+ SrcReg = MIRBuilder.buildPtrToInt(SrcTy, SrcReg).getReg(0);
+ }
+
+ // Widen SrcTy to WideTy. This does not affect the result, but since the
+ // user requested this size, it is probably better handled than SrcTy and
+ // should reduce the total number of legalization artifacts
+ if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
+ SrcTy = WideTy;
+ SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0);
+ }
- auto WideSrc = MIRBuilder.buildZExt(NewSrcTy, SrcReg);
+ // Theres no unmerge type to target. Directly extract the bits from the
+ // source type
+ unsigned DstSize = DstTy.getSizeInBits();
- for (unsigned I = 1; I != NumDst; ++I) {
- auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, SizeDiff * I);
- auto Shl = MIRBuilder.buildShl(NewSrcTy, WideSrc, ShiftAmt);
- WideSrc = MIRBuilder.buildOr(NewSrcTy, WideSrc, Shl);
+ MIRBuilder.buildTrunc(Dst0Reg, SrcReg);
+ for (int I = 1; I != NumDst; ++I) {
+ auto ShiftAmt = MIRBuilder.buildConstant(SrcTy, DstSize * I);
+ auto Shr = MIRBuilder.buildLShr(SrcTy, SrcReg, ShiftAmt);
+ MIRBuilder.buildTrunc(MI.getOperand(I), Shr);
+ }
+
+ MI.eraseFromParent();
+ return Legalized;
}
- Observer.changingInstr(MI);
+ // Extend the source to a wider type.
+ LLT LCMTy = getLCMType(SrcTy, WideTy);
- MI.getOperand(NumDst).setReg(WideSrc->getOperand(0).getReg());
- for (unsigned I = 0; I != NumDst; ++I)
- widenScalarDst(MI, WideTy, I);
+ Register WideSrc = SrcReg;
+ if (LCMTy.getSizeInBits() != SrcTy.getSizeInBits()) {
+ // TODO: If this is an integral address space, cast to integer and anyext.
+ if (SrcTy.isPointer()) {
+ LLVM_DEBUG(dbgs() << "Widening pointer source types not implemented\n");
+ return UnableToLegalize;
+ }
- Observer.changedInstr(MI);
+ WideSrc = MIRBuilder.buildAnyExt(LCMTy, WideSrc).getReg(0);
+ }
+
+ auto Unmerge = MIRBuilder.buildUnmerge(WideTy, WideSrc);
+ // Create a sequence of unmerges to the original results. since we may have
+ // widened the source, we will need to pad the results with dead defs to cover
+ // the source register.
+ // e.g. widen s16 to s32:
+ // %1:_(s16), %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0:_(s48)
+ //
+ // =>
+ // %4:_(s64) = G_ANYEXT %0:_(s48)
+ // %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4 ; Requested unmerge
+ // %1:_(s16), %2:_(s16) = G_UNMERGE_VALUES %5 ; unpack to original regs
+ // %3:_(s16), dead %7 = G_UNMERGE_VALUES %6 ; original reg + extra dead def
+
+ const int NumUnmerge = Unmerge->getNumOperands() - 1;
+ const int PartsPerUnmerge = WideTy.getSizeInBits() / DstTy.getSizeInBits();
+
+ for (int I = 0; I != NumUnmerge; ++I) {
+ auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES);
+
+ for (int J = 0; J != PartsPerUnmerge; ++J) {
+ int Idx = I * PartsPerUnmerge + J;
+ if (Idx < NumDst)
+ MIB.addDef(MI.getOperand(Idx).getReg());
+ else {
+ // Create dead def for excess components.
+ MIB.addDef(MRI.createGenericVirtualRegister(DstTy));
+ }
+ }
+
+ MIB.addUse(Unmerge.getReg(I));
+ }
+
+ MI.eraseFromParent();
return Legalized;
}
@@ -1426,9 +1639,45 @@ LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
-LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
- MIRBuilder.setInstr(MI);
+LegalizerHelper::widenScalarAddSubSat(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy) {
+ bool IsSigned = MI.getOpcode() == TargetOpcode::G_SADDSAT ||
+ MI.getOpcode() == TargetOpcode::G_SSUBSAT;
+ // We can convert this to:
+ // 1. Any extend iN to iM
+ // 2. SHL by M-N
+ // 3. [US][ADD|SUB]SAT
+ // 4. L/ASHR by M-N
+ //
+ // It may be more efficient to lower this to a min and a max operation in
+ // the higher precision arithmetic if the promoted operation isn't legal,
+ // but this decision is up to the target's lowering request.
+ Register DstReg = MI.getOperand(0).getReg();
+
+ unsigned NewBits = WideTy.getScalarSizeInBits();
+ unsigned SHLAmount = NewBits - MRI.getType(DstReg).getScalarSizeInBits();
+
+ auto LHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(1));
+ auto RHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(2));
+ auto ShiftK = MIRBuilder.buildConstant(WideTy, SHLAmount);
+ auto ShiftL = MIRBuilder.buildShl(WideTy, LHS, ShiftK);
+ auto ShiftR = MIRBuilder.buildShl(WideTy, RHS, ShiftK);
+
+ auto WideInst = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy},
+ {ShiftL, ShiftR}, MI.getFlags());
+
+ // Use a shift that will preserve the number of sign bits when the trunc is
+ // folded away.
+ auto Result = IsSigned ? MIRBuilder.buildAShr(WideTy, WideInst, ShiftK)
+ : MIRBuilder.buildLShr(WideTy, WideInst, ShiftK);
+ MIRBuilder.buildTrunc(DstReg, Result);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
@@ -1444,28 +1693,30 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
case TargetOpcode::G_USUBO: {
if (TypeIdx == 1)
return UnableToLegalize; // TODO
- auto LHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
- {MI.getOperand(2).getReg()});
- auto RHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
- {MI.getOperand(3).getReg()});
+ auto LHSZext = MIRBuilder.buildZExt(WideTy, MI.getOperand(2));
+ auto RHSZext = MIRBuilder.buildZExt(WideTy, MI.getOperand(3));
unsigned Opcode = MI.getOpcode() == TargetOpcode::G_UADDO
? TargetOpcode::G_ADD
: TargetOpcode::G_SUB;
// Do the arithmetic in the larger type.
auto NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSZext, RHSZext});
LLT OrigTy = MRI.getType(MI.getOperand(0).getReg());
- APInt Mask = APInt::getAllOnesValue(OrigTy.getSizeInBits());
- auto AndOp = MIRBuilder.buildInstr(
- TargetOpcode::G_AND, {WideTy},
- {NewOp, MIRBuilder.buildConstant(WideTy, Mask.getZExtValue())});
+ APInt Mask =
+ APInt::getLowBitsSet(WideTy.getSizeInBits(), OrigTy.getSizeInBits());
+ auto AndOp = MIRBuilder.buildAnd(
+ WideTy, NewOp, MIRBuilder.buildConstant(WideTy, Mask));
// There is no overflow if the AndOp is the same as NewOp.
- MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1).getReg(), NewOp,
- AndOp);
+ MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1), NewOp, AndOp);
// Now trunc the NewOp to the original result.
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), NewOp);
+ MIRBuilder.buildTrunc(MI.getOperand(0), NewOp);
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_SADDSAT:
+ case TargetOpcode::G_SSUBSAT:
+ case TargetOpcode::G_UADDSAT:
+ case TargetOpcode::G_USUBSAT:
+ return widenScalarAddSubSat(MI, TypeIdx, WideTy);
case TargetOpcode::G_CTTZ:
case TargetOpcode::G_CTTZ_ZERO_UNDEF:
case TargetOpcode::G_CTLZ:
@@ -1500,9 +1751,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) {
// The correct result is NewOp - (Difference in widety and current ty).
unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits();
- MIBNewOp = MIRBuilder.buildInstr(
- TargetOpcode::G_SUB, {WideTy},
- {MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)});
+ MIBNewOp = MIRBuilder.buildSub(
+ WideTy, MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff));
}
MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp);
@@ -1525,10 +1775,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
LLT Ty = MRI.getType(DstReg);
unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
MIRBuilder.buildConstant(ShiftAmtReg, DiffBits);
- MIRBuilder.buildInstr(TargetOpcode::G_LSHR)
- .addDef(ShrReg)
- .addUse(DstExt)
- .addUse(ShiftAmtReg);
+ MIRBuilder.buildLShr(ShrReg, DstExt, ShiftAmtReg);
MIRBuilder.buildTrunc(DstReg, ShrReg);
Observer.changedInstr(MI);
@@ -1552,6 +1799,13 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
}
+ case TargetOpcode::G_FREEZE:
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ widenScalarDst(MI, WideTy);
+ Observer.changedInstr(MI);
+ return Legalized;
+
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
case TargetOpcode::G_MUL:
@@ -1844,9 +2098,10 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
// TODO: Probably should be zext
widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_SEXT);
Observer.changedInstr(MI);
+ return Legalized;
}
- return Legalized;
+ return UnableToLegalize;
}
case TargetOpcode::G_FADD:
case TargetOpcode::G_FMUL:
@@ -1932,29 +2187,162 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC);
Observer.changedInstr(MI);
return Legalized;
+ case TargetOpcode::G_PTRMASK: {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ }
+}
+
+static void getUnmergePieces(SmallVectorImpl<Register> &Pieces,
+ MachineIRBuilder &B, Register Src, LLT Ty) {
+ auto Unmerge = B.buildUnmerge(Ty, Src);
+ for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
+ Pieces.push_back(Unmerge.getReg(I));
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerBitcast(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+
+ if (SrcTy.isVector()) {
+ LLT SrcEltTy = SrcTy.getElementType();
+ SmallVector<Register, 8> SrcRegs;
+
+ if (DstTy.isVector()) {
+ int NumDstElt = DstTy.getNumElements();
+ int NumSrcElt = SrcTy.getNumElements();
+
+ LLT DstEltTy = DstTy.getElementType();
+ LLT DstCastTy = DstEltTy; // Intermediate bitcast result type
+ LLT SrcPartTy = SrcEltTy; // Original unmerge result type.
+
+ // If there's an element size mismatch, insert intermediate casts to match
+ // the result element type.
+ if (NumSrcElt < NumDstElt) { // Source element type is larger.
+ // %1:_(<4 x s8>) = G_BITCAST %0:_(<2 x s16>)
+ //
+ // =>
+ //
+ // %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0
+ // %3:_(<2 x s8>) = G_BITCAST %2
+ // %4:_(<2 x s8>) = G_BITCAST %3
+ // %1:_(<4 x s16>) = G_CONCAT_VECTORS %3, %4
+ DstCastTy = LLT::vector(NumDstElt / NumSrcElt, DstEltTy);
+ SrcPartTy = SrcEltTy;
+ } else if (NumSrcElt > NumDstElt) { // Source element type is smaller.
+ //
+ // %1:_(<2 x s16>) = G_BITCAST %0:_(<4 x s8>)
+ //
+ // =>
+ //
+ // %2:_(<2 x s8>), %3:_(<2 x s8>) = G_UNMERGE_VALUES %0
+ // %3:_(s16) = G_BITCAST %2
+ // %4:_(s16) = G_BITCAST %3
+ // %1:_(<2 x s16>) = G_BUILD_VECTOR %3, %4
+ SrcPartTy = LLT::vector(NumSrcElt / NumDstElt, SrcEltTy);
+ DstCastTy = DstEltTy;
+ }
+
+ getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcPartTy);
+ for (Register &SrcReg : SrcRegs)
+ SrcReg = MIRBuilder.buildBitcast(DstCastTy, SrcReg).getReg(0);
+ } else
+ getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcEltTy);
+
+ MIRBuilder.buildMerge(Dst, SrcRegs);
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ if (DstTy.isVector()) {
+ SmallVector<Register, 8> SrcRegs;
+ getUnmergePieces(SrcRegs, MIRBuilder, Src, DstTy.getElementType());
+ MIRBuilder.buildMerge(Dst, SrcRegs);
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::bitcast(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_LOAD: {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Observer.changingInstr(MI);
+ bitcastDst(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ case TargetOpcode::G_STORE: {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Observer.changingInstr(MI);
+ bitcastSrc(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ case TargetOpcode::G_SELECT: {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ if (MRI.getType(MI.getOperand(1).getReg()).isVector()) {
+ LLVM_DEBUG(
+ dbgs() << "bitcast action not implemented for vector select\n");
+ return UnableToLegalize;
+ }
+
+ Observer.changingInstr(MI);
+ bitcastSrc(MI, CastTy, 2);
+ bitcastSrc(MI, CastTy, 3);
+ bitcastDst(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ case TargetOpcode::G_AND:
+ case TargetOpcode::G_OR:
+ case TargetOpcode::G_XOR: {
+ Observer.changingInstr(MI);
+ bitcastSrc(MI, CastTy, 1);
+ bitcastSrc(MI, CastTy, 2);
+ bitcastDst(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ default:
+ return UnableToLegalize;
}
}
LegalizerHelper::LegalizeResult
LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
using namespace TargetOpcode;
- MIRBuilder.setInstr(MI);
switch(MI.getOpcode()) {
default:
return UnableToLegalize;
+ case TargetOpcode::G_BITCAST:
+ return lowerBitcast(MI);
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM: {
- Register QuotReg = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
- .addDef(QuotReg)
- .addUse(MI.getOperand(1).getReg())
- .addUse(MI.getOperand(2).getReg());
-
- Register ProdReg = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
- MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
- ProdReg);
+ auto Quot =
+ MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, {Ty},
+ {MI.getOperand(1), MI.getOperand(2)});
+
+ auto Prod = MIRBuilder.buildMul(Ty, Quot, MI.getOperand(2));
+ MIRBuilder.buildSub(MI.getOperand(0), MI.getOperand(1), Prod);
MI.eraseFromParent();
return Legalized;
}
@@ -1970,36 +2358,30 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(2).getReg();
Register RHS = MI.getOperand(3).getReg();
- MIRBuilder.buildMul(Res, LHS, RHS);
-
unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
? TargetOpcode::G_SMULH
: TargetOpcode::G_UMULH;
- Register HiPart = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildInstr(Opcode)
- .addDef(HiPart)
- .addUse(LHS)
- .addUse(RHS);
+ Observer.changingInstr(MI);
+ const auto &TII = MIRBuilder.getTII();
+ MI.setDesc(TII.get(TargetOpcode::G_MUL));
+ MI.RemoveOperand(1);
+ Observer.changedInstr(MI);
- Register Zero = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildConstant(Zero, 0);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
+
+ auto HiPart = MIRBuilder.buildInstr(Opcode, {Ty}, {LHS, RHS});
+ auto Zero = MIRBuilder.buildConstant(Ty, 0);
// For *signed* multiply, overflow is detected by checking:
// (hi != (lo >> bitwidth-1))
if (Opcode == TargetOpcode::G_SMULH) {
- Register Shifted = MRI.createGenericVirtualRegister(Ty);
- Register ShiftAmt = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
- MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
- .addDef(Shifted)
- .addUse(Res)
- .addUse(ShiftAmt);
+ auto ShiftAmt = MIRBuilder.buildConstant(Ty, Ty.getSizeInBits() - 1);
+ auto Shifted = MIRBuilder.buildAShr(Ty, Res, ShiftAmt);
MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
} else {
MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
}
- MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FNEG: {
@@ -2008,31 +2390,16 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
if (Ty.isVector())
return UnableToLegalize;
Register Res = MI.getOperand(0).getReg();
- Type *ZeroTy;
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
- switch (Ty.getSizeInBits()) {
- case 16:
- ZeroTy = Type::getHalfTy(Ctx);
- break;
- case 32:
- ZeroTy = Type::getFloatTy(Ctx);
- break;
- case 64:
- ZeroTy = Type::getDoubleTy(Ctx);
- break;
- case 128:
- ZeroTy = Type::getFP128Ty(Ctx);
- break;
- default:
- llvm_unreachable("unexpected floating-point type");
- }
+ Type *ZeroTy = getFloatTypeForLLT(Ctx, Ty);
+ if (!ZeroTy)
+ return UnableToLegalize;
ConstantFP &ZeroForNegation =
*cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
Register SubByReg = MI.getOperand(1).getReg();
- Register ZeroReg = Zero->getOperand(0).getReg();
- MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg},
- MI.getFlags());
+ Register ZeroReg = Zero.getReg(0);
+ MIRBuilder.buildFSub(Res, ZeroReg, SubByReg, MI.getFlags());
MI.eraseFromParent();
return Legalized;
}
@@ -2046,13 +2413,15 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
Register Neg = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
- MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags());
+ MIRBuilder.buildFNeg(Neg, RHS);
+ MIRBuilder.buildFAdd(Res, LHS, Neg, MI.getFlags());
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FMAD:
return lowerFMad(MI);
+ case TargetOpcode::G_FFLOOR:
+ return lowerFFloor(MI);
case TargetOpcode::G_INTRINSIC_ROUND:
return lowerIntrinsicRound(MI);
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
@@ -2089,7 +2458,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// result values together, before truncating back down to the non-pow-2
// type.
// E.g. v1 = i24 load =>
- // v2 = i32 load (2 byte)
+ // v2 = i32 zextload (2 byte)
// v3 = i32 load (1 byte)
// v4 = i32 shl v3, 16
// v5 = i32 or v4, v2
@@ -2110,11 +2479,11 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
LLT AnyExtTy = LLT::scalar(AnyExtSize);
Register LargeLdReg = MRI.createGenericVirtualRegister(AnyExtTy);
Register SmallLdReg = MRI.createGenericVirtualRegister(AnyExtTy);
- auto LargeLoad =
- MIRBuilder.buildLoad(LargeLdReg, PtrReg, *LargeMMO);
+ auto LargeLoad = MIRBuilder.buildLoadInstr(
+ TargetOpcode::G_ZEXTLOAD, LargeLdReg, PtrReg, *LargeMMO);
- auto OffsetCst =
- MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
+ auto OffsetCst = MIRBuilder.buildConstant(
+ LLT::scalar(PtrTy.getSizeInBits()), LargeSplitSize / 8);
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr =
MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
@@ -2186,8 +2555,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// Generate the PtrAdd and truncating stores.
LLT PtrTy = MRI.getType(PtrReg);
- auto OffsetCst =
- MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
+ auto OffsetCst = MIRBuilder.buildConstant(
+ LLT::scalar(PtrTy.getSizeInBits()), LargeSplitSize / 8);
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr =
MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
@@ -2226,12 +2595,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(2).getReg();
Register RHS = MI.getOperand(3).getReg();
Register CarryIn = MI.getOperand(4).getReg();
+ LLT Ty = MRI.getType(Res);
- Register TmpRes = MRI.createGenericVirtualRegister(Ty);
- Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
-
- MIRBuilder.buildAdd(TmpRes, LHS, RHS);
- MIRBuilder.buildZExt(ZExtCarryIn, CarryIn);
+ auto TmpRes = MIRBuilder.buildAdd(Ty, LHS, RHS);
+ auto ZExtCarryIn = MIRBuilder.buildZExt(Ty, CarryIn);
MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn);
MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS);
@@ -2256,17 +2623,15 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(2).getReg();
Register RHS = MI.getOperand(3).getReg();
Register BorrowIn = MI.getOperand(4).getReg();
+ const LLT CondTy = MRI.getType(BorrowOut);
+ const LLT Ty = MRI.getType(Res);
- Register TmpRes = MRI.createGenericVirtualRegister(Ty);
- Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty);
- Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
- Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
-
- MIRBuilder.buildSub(TmpRes, LHS, RHS);
- MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn);
+ auto TmpRes = MIRBuilder.buildSub(Ty, LHS, RHS);
+ auto ZExtBorrowIn = MIRBuilder.buildZExt(Ty, BorrowIn);
MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn);
- MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LHS_EQ_RHS, LHS, RHS);
- MIRBuilder.buildICmp(CmpInst::ICMP_ULT, LHS_ULT_RHS, LHS, RHS);
+
+ auto LHS_EQ_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, LHS, RHS);
+ auto LHS_ULT_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CondTy, LHS, RHS);
MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS);
MI.eraseFromParent();
@@ -2278,6 +2643,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return lowerSITOFP(MI, TypeIdx, Ty);
case G_FPTOUI:
return lowerFPTOUI(MI, TypeIdx, Ty);
+ case G_FPTOSI:
+ return lowerFPTOSI(MI);
+ case G_FPTRUNC:
+ return lowerFPTRUNC(MI, TypeIdx, Ty);
case G_SMIN:
case G_SMAX:
case G_UMIN:
@@ -2288,6 +2657,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
case G_FMINNUM:
case G_FMAXNUM:
return lowerFMinNumMaxNum(MI);
+ case G_MERGE_VALUES:
+ return lowerMergeValues(MI);
case G_UNMERGE_VALUES:
return lowerUnmergeValues(MI);
case TargetOpcode::G_SEXT_INREG: {
@@ -2300,8 +2671,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register TmpRes = MRI.createGenericVirtualRegister(DstTy);
auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits);
- MIRBuilder.buildInstr(TargetOpcode::G_SHL, {TmpRes}, {SrcReg, MIBSz->getOperand(0).getReg()});
- MIRBuilder.buildInstr(TargetOpcode::G_ASHR, {DstReg}, {TmpRes, MIBSz->getOperand(0).getReg()});
+ MIRBuilder.buildShl(TmpRes, SrcReg, MIBSz->getOperand(0));
+ MIRBuilder.buildAShr(DstReg, TmpRes, MIBSz->getOperand(0));
MI.eraseFromParent();
return Legalized;
}
@@ -2318,7 +2689,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
case G_BITREVERSE:
return lowerBitreverse(MI);
case G_READ_REGISTER:
- return lowerReadRegister(MI);
+ case G_WRITE_REGISTER:
+ return lowerReadWriteRegister(MI);
}
}
@@ -2350,99 +2722,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
return Legalized;
}
-LegalizerHelper::LegalizeResult
-LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy) {
- const unsigned Opc = MI.getOpcode();
- const unsigned NumOps = MI.getNumOperands() - 1;
- const unsigned NarrowSize = NarrowTy.getSizeInBits();
- const Register DstReg = MI.getOperand(0).getReg();
- const unsigned Flags = MI.getFlags();
- const LLT DstTy = MRI.getType(DstReg);
- const unsigned Size = DstTy.getSizeInBits();
- const int NumParts = Size / NarrowSize;
- const LLT EltTy = DstTy.getElementType();
- const unsigned EltSize = EltTy.getSizeInBits();
- const unsigned BitsForNumParts = NarrowSize * NumParts;
-
- // Check if we have any leftovers. If we do, then only handle the case where
- // the leftover is one element.
- if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size)
- return UnableToLegalize;
-
- if (BitsForNumParts != Size) {
- Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy);
- MIRBuilder.buildUndef(AccumDstReg);
-
- // Handle the pieces which evenly divide into the requested type with
- // extract/op/insert sequence.
- for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) {
- SmallVector<SrcOp, 4> SrcOps;
- for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
- Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset);
- SrcOps.push_back(PartOpReg);
- }
-
- Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
-
- Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy);
- MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset);
- AccumDstReg = PartInsertReg;
- }
-
- // Handle the remaining element sized leftover piece.
- SmallVector<SrcOp, 4> SrcOps;
- for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
- Register PartOpReg = MRI.createGenericVirtualRegister(EltTy);
- MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(),
- BitsForNumParts);
- SrcOps.push_back(PartOpReg);
- }
-
- Register PartDstReg = MRI.createGenericVirtualRegister(EltTy);
- MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
- MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts);
- MI.eraseFromParent();
-
- return Legalized;
- }
-
- SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
-
- extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
-
- if (NumOps >= 2)
- extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src1Regs);
-
- if (NumOps >= 3)
- extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs);
-
- for (int i = 0; i < NumParts; ++i) {
- Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
-
- if (NumOps == 1)
- MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags);
- else if (NumOps == 2) {
- MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i], Src1Regs[i]}, Flags);
- } else if (NumOps == 3) {
- MIRBuilder.buildInstr(Opc, {DstReg},
- {Src0Regs[i], Src1Regs[i], Src2Regs[i]}, Flags);
- }
-
- DstRegs.push_back(DstReg);
- }
-
- if (NarrowTy.isVector())
- MIRBuilder.buildConcatVectors(DstReg, DstRegs);
- else
- MIRBuilder.buildBuildVector(DstReg, DstRegs);
-
- MI.eraseFromParent();
- return Legalized;
-}
-
// Handle splitting vector operations which need to have the same number of
// elements in each type index, but each type index may have a different element
// type.
@@ -2482,7 +2761,6 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
SmallVector<Register, 4> PartRegs, LeftoverRegs;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
- LLT LeftoverTy;
Register SrcReg = MI.getOperand(I).getReg();
LLT SrcTyI = MRI.getType(SrcReg);
LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType());
@@ -2571,9 +2849,8 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
for (unsigned I = 0; I < NumParts; ++I) {
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
- MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode())
- .addDef(DstReg)
- .addUse(SrcRegs[I]);
+ MachineInstr *NewInst =
+ MIRBuilder.buildInstr(MI.getOpcode(), {DstReg}, {SrcRegs[I]});
NewInst->setFlags(MI.getFlags());
DstRegs.push_back(DstReg);
@@ -2913,6 +3190,12 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
Register AddrReg = MI.getOperand(1).getReg();
LLT ValTy = MRI.getType(ValReg);
+ // FIXME: Do we need a distinct NarrowMemory legalize action?
+ if (ValTy.getSizeInBits() != 8 * MMO->getSize()) {
+ LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
+ return UnableToLegalize;
+ }
+
int NumParts = -1;
int NumLeftover = -1;
LLT LeftoverTy;
@@ -2981,14 +3264,147 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::reduceOperationWidth(MachineInstr &MI, unsigned int TypeIdx,
+ LLT NarrowTy) {
+ assert(TypeIdx == 0 && "only one type index expected");
+
+ const unsigned Opc = MI.getOpcode();
+ const int NumOps = MI.getNumOperands() - 1;
+ const Register DstReg = MI.getOperand(0).getReg();
+ const unsigned Flags = MI.getFlags();
+ const unsigned NarrowSize = NarrowTy.getSizeInBits();
+ const LLT NarrowScalarTy = LLT::scalar(NarrowSize);
+
+ assert(NumOps <= 3 && "expected instruction with 1 result and 1-3 sources");
+
+ // First of all check whether we are narrowing (changing the element type)
+ // or reducing the vector elements
+ const LLT DstTy = MRI.getType(DstReg);
+ const bool IsNarrow = NarrowTy.getScalarType() != DstTy.getScalarType();
+
+ SmallVector<Register, 8> ExtractedRegs[3];
+ SmallVector<Register, 8> Parts;
+
+ unsigned NarrowElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
+
+ // Break down all the sources into NarrowTy pieces we can operate on. This may
+ // involve creating merges to a wider type, padded with undef.
+ for (int I = 0; I != NumOps; ++I) {
+ Register SrcReg = MI.getOperand(I + 1).getReg();
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ // The type to narrow SrcReg to. For narrowing, this is a smaller scalar.
+ // For fewerElements, this is a smaller vector with the same element type.
+ LLT OpNarrowTy;
+ if (IsNarrow) {
+ OpNarrowTy = NarrowScalarTy;
+
+ // In case of narrowing, we need to cast vectors to scalars for this to
+ // work properly
+ // FIXME: Can we do without the bitcast here if we're narrowing?
+ if (SrcTy.isVector()) {
+ SrcTy = LLT::scalar(SrcTy.getSizeInBits());
+ SrcReg = MIRBuilder.buildBitcast(SrcTy, SrcReg).getReg(0);
+ }
+ } else {
+ OpNarrowTy = LLT::scalarOrVector(NarrowElts, SrcTy.getScalarType());
+ }
+
+ LLT GCDTy = extractGCDType(ExtractedRegs[I], SrcTy, OpNarrowTy, SrcReg);
+
+ // Build a sequence of NarrowTy pieces in ExtractedRegs for this operand.
+ buildLCMMergePieces(SrcTy, OpNarrowTy, GCDTy, ExtractedRegs[I],
+ TargetOpcode::G_ANYEXT);
+ }
+
+ SmallVector<Register, 8> ResultRegs;
+
+ // Input operands for each sub-instruction.
+ SmallVector<SrcOp, 4> InputRegs(NumOps, Register());
+
+ int NumParts = ExtractedRegs[0].size();
+ const unsigned DstSize = DstTy.getSizeInBits();
+ const LLT DstScalarTy = LLT::scalar(DstSize);
+
+ // Narrowing needs to use scalar types
+ LLT DstLCMTy, NarrowDstTy;
+ if (IsNarrow) {
+ DstLCMTy = getLCMType(DstScalarTy, NarrowScalarTy);
+ NarrowDstTy = NarrowScalarTy;
+ } else {
+ DstLCMTy = getLCMType(DstTy, NarrowTy);
+ NarrowDstTy = NarrowTy;
+ }
+
+ // We widened the source registers to satisfy merge/unmerge size
+ // constraints. We'll have some extra fully undef parts.
+ const int NumRealParts = (DstSize + NarrowSize - 1) / NarrowSize;
+
+ for (int I = 0; I != NumRealParts; ++I) {
+ // Emit this instruction on each of the split pieces.
+ for (int J = 0; J != NumOps; ++J)
+ InputRegs[J] = ExtractedRegs[J][I];
+
+ auto Inst = MIRBuilder.buildInstr(Opc, {NarrowDstTy}, InputRegs, Flags);
+ ResultRegs.push_back(Inst.getReg(0));
+ }
+
+ // Fill out the widened result with undef instead of creating instructions
+ // with undef inputs.
+ int NumUndefParts = NumParts - NumRealParts;
+ if (NumUndefParts != 0)
+ ResultRegs.append(NumUndefParts,
+ MIRBuilder.buildUndef(NarrowDstTy).getReg(0));
+
+ // Extract the possibly padded result. Use a scratch register if we need to do
+ // a final bitcast, otherwise use the original result register.
+ Register MergeDstReg;
+ if (IsNarrow && DstTy.isVector())
+ MergeDstReg = MRI.createGenericVirtualRegister(DstScalarTy);
+ else
+ MergeDstReg = DstReg;
+
+ buildWidenedRemergeToDst(MergeDstReg, DstLCMTy, ResultRegs);
+
+ // Recast to vector if we narrowed a vector
+ if (IsNarrow && DstTy.isVector())
+ MIRBuilder.buildBitcast(DstReg, MergeDstReg);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ int64_t Imm = MI.getOperand(2).getImm();
+
+ LLT DstTy = MRI.getType(DstReg);
+
+ SmallVector<Register, 8> Parts;
+ LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
+ LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts);
+
+ for (Register &R : Parts)
+ R = MIRBuilder.buildSExtInReg(NarrowTy, R, Imm).getReg(0);
+
+ buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
using namespace TargetOpcode;
- MIRBuilder.setInstr(MI);
switch (MI.getOpcode()) {
case G_IMPLICIT_DEF:
return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy);
+ case G_TRUNC:
case G_AND:
case G_OR:
case G_XOR:
@@ -3038,7 +3454,14 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_FMAXNUM_IEEE:
case G_FMINIMUM:
case G_FMAXIMUM:
- return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy);
+ case G_FSHL:
+ case G_FSHR:
+ case G_FREEZE:
+ case G_SADDSAT:
+ case G_SSUBSAT:
+ case G_UADDSAT:
+ case G_USUBSAT:
+ return reduceOperationWidth(MI, TypeIdx, NarrowTy);
case G_SHL:
case G_LSHR:
case G_ASHR:
@@ -3076,6 +3499,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_LOAD:
case G_STORE:
return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
+ case G_SEXT_INREG:
+ return fewerElementsVectorSextInReg(MI, TypeIdx, NarrowTy);
default:
return UnableToLegalize;
}
@@ -3087,10 +3512,10 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
Register InL = MRI.createGenericVirtualRegister(HalfTy);
Register InH = MRI.createGenericVirtualRegister(HalfTy);
- MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
+ MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
if (Amt.isNullValue()) {
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH});
+ MIRBuilder.buildMerge(MI.getOperand(0), {InL, InH});
MI.eraseFromParent();
return Legalized;
}
@@ -3163,7 +3588,7 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
}
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()});
+ MIRBuilder.buildMerge(MI.getOperand(0), {Lo, Hi});
MI.eraseFromParent();
return Legalized;
@@ -3211,7 +3636,7 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
Register InL = MRI.createGenericVirtualRegister(HalfTy);
Register InH = MRI.createGenericVirtualRegister(HalfTy);
- MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
+ MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
@@ -3302,7 +3727,6 @@ LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
LegalizerHelper::LegalizeResult
LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
LLT MoreTy) {
- MIRBuilder.setInstr(MI);
unsigned Opc = MI.getOpcode();
switch (Opc) {
case TargetOpcode::G_IMPLICIT_DEF:
@@ -3349,6 +3773,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
Observer.changedInstr(MI);
return Legalized;
case TargetOpcode::G_INSERT:
+ case TargetOpcode::G_FREEZE:
if (TypeIdx != 0)
return UnableToLegalize;
Observer.changingInstr(MI);
@@ -3479,10 +3904,10 @@ LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
- SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
+ SmallVector<Register, 2> Src1Parts, Src2Parts;
+ SmallVector<Register, 2> DstTmpRegs(DstTmpParts);
extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
- DstTmpRegs.resize(DstTmpParts);
multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
// Take only high half of registers if this is high mul.
@@ -3550,10 +3975,12 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
}
Register DstReg = MI.getOperand(0).getReg();
- if(MRI.getType(DstReg).isVector())
+ if (MRI.getType(DstReg).isVector())
MIRBuilder.buildBuildVector(DstReg, DstRegs);
- else
+ else if (DstRegs.size() > 1)
MIRBuilder.buildMerge(DstReg, DstRegs);
+ else
+ MIRBuilder.buildCopy(DstReg, DstRegs[0]);
MI.eraseFromParent();
return Legalized;
}
@@ -3657,14 +4084,14 @@ LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy},
{Src0Regs[I], Src1Regs[I]});
- DstRegs.push_back(Inst->getOperand(0).getReg());
+ DstRegs.push_back(Inst.getReg(0));
}
for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
auto Inst = MIRBuilder.buildInstr(
MI.getOpcode(),
{LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]});
- DstLeftoverRegs.push_back(Inst->getOperand(0).getReg());
+ DstLeftoverRegs.push_back(Inst.getReg(0));
}
insertParts(DstReg, DstTy, NarrowTy, DstRegs,
@@ -3675,6 +4102,28 @@ LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarExt(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+
+ LLT DstTy = MRI.getType(DstReg);
+ if (DstTy.isVector())
+ return UnableToLegalize;
+
+ SmallVector<Register, 8> Parts;
+ LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
+ LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, MI.getOpcode());
+ buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
if (TypeIdx != 0)
@@ -3704,13 +4153,13 @@ LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
auto Select = MIRBuilder.buildSelect(NarrowTy,
CondReg, Src1Regs[I], Src2Regs[I]);
- DstRegs.push_back(Select->getOperand(0).getReg());
+ DstRegs.push_back(Select.getReg(0));
}
for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
auto Select = MIRBuilder.buildSelect(
LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]);
- DstLeftoverRegs.push_back(Select->getOperand(0).getReg());
+ DstLeftoverRegs.push_back(Select.getReg(0));
}
insertParts(DstReg, DstTy, NarrowTy, DstRegs,
@@ -3721,6 +4170,103 @@ LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned NarrowSize = NarrowTy.getSizeInBits();
+
+ if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
+ const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF;
+
+ MachineIRBuilder &B = MIRBuilder;
+ auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
+ // ctlz(Hi:Lo) -> Hi == 0 ? (NarrowSize + ctlz(Lo)) : ctlz(Hi)
+ auto C_0 = B.buildConstant(NarrowTy, 0);
+ auto HiIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
+ UnmergeSrc.getReg(1), C_0);
+ auto LoCTLZ = IsUndef ?
+ B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0)) :
+ B.buildCTLZ(DstTy, UnmergeSrc.getReg(0));
+ auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize);
+ auto HiIsZeroCTLZ = B.buildAdd(DstTy, LoCTLZ, C_NarrowSize);
+ auto HiCTLZ = B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1));
+ B.buildSelect(DstReg, HiIsZero, HiIsZeroCTLZ, HiCTLZ);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned NarrowSize = NarrowTy.getSizeInBits();
+
+ if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
+ const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTTZ_ZERO_UNDEF;
+
+ MachineIRBuilder &B = MIRBuilder;
+ auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
+ // cttz(Hi:Lo) -> Lo == 0 ? (cttz(Hi) + NarrowSize) : cttz(Lo)
+ auto C_0 = B.buildConstant(NarrowTy, 0);
+ auto LoIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
+ UnmergeSrc.getReg(0), C_0);
+ auto HiCTTZ = IsUndef ?
+ B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1)) :
+ B.buildCTTZ(DstTy, UnmergeSrc.getReg(1));
+ auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize);
+ auto LoIsZeroCTTZ = B.buildAdd(DstTy, HiCTTZ, C_NarrowSize);
+ auto LoCTTZ = B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0));
+ B.buildSelect(DstReg, LoIsZero, LoIsZeroCTTZ, LoCTTZ);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
+ unsigned NarrowSize = NarrowTy.getSizeInBits();
+
+ if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
+ auto UnmergeSrc = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1));
+
+ auto LoCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(0));
+ auto HiCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(1));
+ MIRBuilder.buildAdd(DstReg, HiCTPOP, LoCTPOP);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
unsigned Opc = MI.getOpcode();
auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
@@ -3739,18 +4285,20 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return Legalized;
}
case TargetOpcode::G_CTLZ: {
+ Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
- unsigned Len = Ty.getSizeInBits();
- if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) {
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned Len = SrcTy.getSizeInBits();
+
+ if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {DstTy, SrcTy}})) {
// If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero.
- auto MIBCtlzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF,
- {Ty}, {SrcReg});
- auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
- auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
- auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
- SrcReg, MIBZero);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
- MIBCtlzZU);
+ auto CtlzZU = MIRBuilder.buildCTLZ_ZERO_UNDEF(DstTy, SrcReg);
+ auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0);
+ auto ICmp = MIRBuilder.buildICmp(
+ CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc);
+ auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
+ MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CtlzZU);
MI.eraseFromParent();
return Legalized;
}
@@ -3768,16 +4316,14 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register Op = SrcReg;
unsigned NewLen = PowerOf2Ceil(Len);
for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) {
- auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i);
- auto MIBOp = MIRBuilder.buildInstr(
- TargetOpcode::G_OR, {Ty},
- {Op, MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {Ty},
- {Op, MIBShiftAmt})});
- Op = MIBOp->getOperand(0).getReg();
+ auto MIBShiftAmt = MIRBuilder.buildConstant(SrcTy, 1ULL << i);
+ auto MIBOp = MIRBuilder.buildOr(
+ SrcTy, Op, MIRBuilder.buildLShr(SrcTy, Op, MIBShiftAmt));
+ Op = MIBOp.getReg(0);
}
- auto MIBPop = MIRBuilder.buildInstr(TargetOpcode::G_CTPOP, {Ty}, {Op});
- MIRBuilder.buildInstr(TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
- {MIRBuilder.buildConstant(Ty, Len), MIBPop});
+ auto MIBPop = MIRBuilder.buildCTPOP(DstTy, Op);
+ MIRBuilder.buildSub(MI.getOperand(0), MIRBuilder.buildConstant(DstTy, Len),
+ MIBPop);
MI.eraseFromParent();
return Legalized;
}
@@ -3789,19 +4335,21 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return Legalized;
}
case TargetOpcode::G_CTTZ: {
+ Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
- unsigned Len = Ty.getSizeInBits();
- if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) {
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ unsigned Len = SrcTy.getSizeInBits();
+ if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {DstTy, SrcTy}})) {
// If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with
// zero.
- auto MIBCttzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF,
- {Ty}, {SrcReg});
- auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
- auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
- auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
- SrcReg, MIBZero);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
- MIBCttzZU);
+ auto CttzZU = MIRBuilder.buildCTTZ_ZERO_UNDEF(DstTy, SrcReg);
+ auto Zero = MIRBuilder.buildConstant(SrcTy, 0);
+ auto ICmp = MIRBuilder.buildICmp(
+ CmpInst::ICMP_EQ, DstTy.changeElementSize(1), SrcReg, Zero);
+ auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
+ MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CttzZU);
MI.eraseFromParent();
return Legalized;
}
@@ -3810,24 +4358,70 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// { return 32 - nlz(~x & (x-1)); }
// Ref: "Hacker's Delight" by Henry Warren
auto MIBCstNeg1 = MIRBuilder.buildConstant(Ty, -1);
- auto MIBNot =
- MIRBuilder.buildInstr(TargetOpcode::G_XOR, {Ty}, {SrcReg, MIBCstNeg1});
- auto MIBTmp = MIRBuilder.buildInstr(
- TargetOpcode::G_AND, {Ty},
- {MIBNot, MIRBuilder.buildInstr(TargetOpcode::G_ADD, {Ty},
- {SrcReg, MIBCstNeg1})});
+ auto MIBNot = MIRBuilder.buildXor(Ty, SrcReg, MIBCstNeg1);
+ auto MIBTmp = MIRBuilder.buildAnd(
+ Ty, MIBNot, MIRBuilder.buildAdd(Ty, SrcReg, MIBCstNeg1));
if (!isSupported({TargetOpcode::G_CTPOP, {Ty, Ty}}) &&
isSupported({TargetOpcode::G_CTLZ, {Ty, Ty}})) {
auto MIBCstLen = MIRBuilder.buildConstant(Ty, Len);
- MIRBuilder.buildInstr(
- TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
- {MIBCstLen,
- MIRBuilder.buildInstr(TargetOpcode::G_CTLZ, {Ty}, {MIBTmp})});
+ MIRBuilder.buildSub(MI.getOperand(0), MIBCstLen,
+ MIRBuilder.buildCTLZ(Ty, MIBTmp));
MI.eraseFromParent();
return Legalized;
}
MI.setDesc(TII.get(TargetOpcode::G_CTPOP));
- MI.getOperand(1).setReg(MIBTmp->getOperand(0).getReg());
+ MI.getOperand(1).setReg(MIBTmp.getReg(0));
+ return Legalized;
+ }
+ case TargetOpcode::G_CTPOP: {
+ unsigned Size = Ty.getSizeInBits();
+ MachineIRBuilder &B = MIRBuilder;
+
+ // Count set bits in blocks of 2 bits. Default approach would be
+ // B2Count = { val & 0x55555555 } + { (val >> 1) & 0x55555555 }
+ // We use following formula instead:
+ // B2Count = val - { (val >> 1) & 0x55555555 }
+ // since it gives same result in blocks of 2 with one instruction less.
+ auto C_1 = B.buildConstant(Ty, 1);
+ auto B2Set1LoTo1Hi = B.buildLShr(Ty, MI.getOperand(1).getReg(), C_1);
+ APInt B2Mask1HiTo0 = APInt::getSplat(Size, APInt(8, 0x55));
+ auto C_B2Mask1HiTo0 = B.buildConstant(Ty, B2Mask1HiTo0);
+ auto B2Count1Hi = B.buildAnd(Ty, B2Set1LoTo1Hi, C_B2Mask1HiTo0);
+ auto B2Count = B.buildSub(Ty, MI.getOperand(1).getReg(), B2Count1Hi);
+
+ // In order to get count in blocks of 4 add values from adjacent block of 2.
+ // B4Count = { B2Count & 0x33333333 } + { (B2Count >> 2) & 0x33333333 }
+ auto C_2 = B.buildConstant(Ty, 2);
+ auto B4Set2LoTo2Hi = B.buildLShr(Ty, B2Count, C_2);
+ APInt B4Mask2HiTo0 = APInt::getSplat(Size, APInt(8, 0x33));
+ auto C_B4Mask2HiTo0 = B.buildConstant(Ty, B4Mask2HiTo0);
+ auto B4HiB2Count = B.buildAnd(Ty, B4Set2LoTo2Hi, C_B4Mask2HiTo0);
+ auto B4LoB2Count = B.buildAnd(Ty, B2Count, C_B4Mask2HiTo0);
+ auto B4Count = B.buildAdd(Ty, B4HiB2Count, B4LoB2Count);
+
+ // For count in blocks of 8 bits we don't have to mask high 4 bits before
+ // addition since count value sits in range {0,...,8} and 4 bits are enough
+ // to hold such binary values. After addition high 4 bits still hold count
+ // of set bits in high 4 bit block, set them to zero and get 8 bit result.
+ // B8Count = { B4Count + (B4Count >> 4) } & 0x0F0F0F0F
+ auto C_4 = B.buildConstant(Ty, 4);
+ auto B8HiB4Count = B.buildLShr(Ty, B4Count, C_4);
+ auto B8CountDirty4Hi = B.buildAdd(Ty, B8HiB4Count, B4Count);
+ APInt B8Mask4HiTo0 = APInt::getSplat(Size, APInt(8, 0x0F));
+ auto C_B8Mask4HiTo0 = B.buildConstant(Ty, B8Mask4HiTo0);
+ auto B8Count = B.buildAnd(Ty, B8CountDirty4Hi, C_B8Mask4HiTo0);
+
+ assert(Size<=128 && "Scalar size is too large for CTPOP lower algorithm");
+ // 8 bits can hold CTPOP result of 128 bit int or smaller. Mul with this
+ // bitmask will set 8 msb in ResTmp to sum of all B8Counts in 8 bit blocks.
+ auto MulMask = B.buildConstant(Ty, APInt::getSplat(Size, APInt(8, 0x01)));
+ auto ResTmp = B.buildMul(Ty, B8Count, MulMask);
+
+ // Shift count result from 8 high bits to low bits.
+ auto C_SizeM8 = B.buildConstant(Ty, Size - 8);
+ B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
+
+ MI.eraseFromParent();
return Legalized;
}
}
@@ -3888,6 +4482,7 @@ LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0);
MIRBuilder.buildAdd(Dst, V, R);
+ MI.eraseFromParent();
return Legalized;
}
@@ -3960,6 +4555,7 @@ LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S,
MIRBuilder.buildConstant(S64, 0));
MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R);
+ MI.eraseFromParent();
return Legalized;
}
@@ -4010,6 +4606,195 @@ LegalizerHelper::lowerFPTOUI(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return Legalized;
}
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOSI(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+ const LLT S64 = LLT::scalar(64);
+ const LLT S32 = LLT::scalar(32);
+
+ // FIXME: Only f32 to i64 conversions are supported.
+ if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64)
+ return UnableToLegalize;
+
+ // Expand f32 -> i64 conversion
+ // This algorithm comes from compiler-rt's implementation of fixsfdi:
+ // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c
+
+ unsigned SrcEltBits = SrcTy.getScalarSizeInBits();
+
+ auto ExponentMask = MIRBuilder.buildConstant(SrcTy, 0x7F800000);
+ auto ExponentLoBit = MIRBuilder.buildConstant(SrcTy, 23);
+
+ auto AndExpMask = MIRBuilder.buildAnd(SrcTy, Src, ExponentMask);
+ auto ExponentBits = MIRBuilder.buildLShr(SrcTy, AndExpMask, ExponentLoBit);
+
+ auto SignMask = MIRBuilder.buildConstant(SrcTy,
+ APInt::getSignMask(SrcEltBits));
+ auto AndSignMask = MIRBuilder.buildAnd(SrcTy, Src, SignMask);
+ auto SignLowBit = MIRBuilder.buildConstant(SrcTy, SrcEltBits - 1);
+ auto Sign = MIRBuilder.buildAShr(SrcTy, AndSignMask, SignLowBit);
+ Sign = MIRBuilder.buildSExt(DstTy, Sign);
+
+ auto MantissaMask = MIRBuilder.buildConstant(SrcTy, 0x007FFFFF);
+ auto AndMantissaMask = MIRBuilder.buildAnd(SrcTy, Src, MantissaMask);
+ auto K = MIRBuilder.buildConstant(SrcTy, 0x00800000);
+
+ auto R = MIRBuilder.buildOr(SrcTy, AndMantissaMask, K);
+ R = MIRBuilder.buildZExt(DstTy, R);
+
+ auto Bias = MIRBuilder.buildConstant(SrcTy, 127);
+ auto Exponent = MIRBuilder.buildSub(SrcTy, ExponentBits, Bias);
+ auto SubExponent = MIRBuilder.buildSub(SrcTy, Exponent, ExponentLoBit);
+ auto ExponentSub = MIRBuilder.buildSub(SrcTy, ExponentLoBit, Exponent);
+
+ auto Shl = MIRBuilder.buildShl(DstTy, R, SubExponent);
+ auto Srl = MIRBuilder.buildLShr(DstTy, R, ExponentSub);
+
+ const LLT S1 = LLT::scalar(1);
+ auto CmpGt = MIRBuilder.buildICmp(CmpInst::ICMP_SGT,
+ S1, Exponent, ExponentLoBit);
+
+ R = MIRBuilder.buildSelect(DstTy, CmpGt, Shl, Srl);
+
+ auto XorSign = MIRBuilder.buildXor(DstTy, R, Sign);
+ auto Ret = MIRBuilder.buildSub(DstTy, XorSign, Sign);
+
+ auto ZeroSrcTy = MIRBuilder.buildConstant(SrcTy, 0);
+
+ auto ExponentLt0 = MIRBuilder.buildICmp(CmpInst::ICMP_SLT,
+ S1, Exponent, ZeroSrcTy);
+
+ auto ZeroDstTy = MIRBuilder.buildConstant(DstTy, 0);
+ MIRBuilder.buildSelect(Dst, ExponentLt0, ZeroDstTy, Ret);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+// f64 -> f16 conversion using round-to-nearest-even rounding mode.
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+
+ if (MRI.getType(Src).isVector()) // TODO: Handle vectors directly.
+ return UnableToLegalize;
+
+ const unsigned ExpMask = 0x7ff;
+ const unsigned ExpBiasf64 = 1023;
+ const unsigned ExpBiasf16 = 15;
+ const LLT S32 = LLT::scalar(32);
+ const LLT S1 = LLT::scalar(1);
+
+ auto Unmerge = MIRBuilder.buildUnmerge(S32, Src);
+ Register U = Unmerge.getReg(0);
+ Register UH = Unmerge.getReg(1);
+
+ auto E = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 20));
+ E = MIRBuilder.buildAnd(S32, E, MIRBuilder.buildConstant(S32, ExpMask));
+
+ // Subtract the fp64 exponent bias (1023) to get the real exponent and
+ // add the f16 bias (15) to get the biased exponent for the f16 format.
+ E = MIRBuilder.buildAdd(
+ S32, E, MIRBuilder.buildConstant(S32, -ExpBiasf64 + ExpBiasf16));
+
+ auto M = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 8));
+ M = MIRBuilder.buildAnd(S32, M, MIRBuilder.buildConstant(S32, 0xffe));
+
+ auto MaskedSig = MIRBuilder.buildAnd(S32, UH,
+ MIRBuilder.buildConstant(S32, 0x1ff));
+ MaskedSig = MIRBuilder.buildOr(S32, MaskedSig, U);
+
+ auto Zero = MIRBuilder.buildConstant(S32, 0);
+ auto SigCmpNE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, MaskedSig, Zero);
+ auto Lo40Set = MIRBuilder.buildZExt(S32, SigCmpNE0);
+ M = MIRBuilder.buildOr(S32, M, Lo40Set);
+
+ // (M != 0 ? 0x0200 : 0) | 0x7c00;
+ auto Bits0x200 = MIRBuilder.buildConstant(S32, 0x0200);
+ auto CmpM_NE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, M, Zero);
+ auto SelectCC = MIRBuilder.buildSelect(S32, CmpM_NE0, Bits0x200, Zero);
+
+ auto Bits0x7c00 = MIRBuilder.buildConstant(S32, 0x7c00);
+ auto I = MIRBuilder.buildOr(S32, SelectCC, Bits0x7c00);
+
+ // N = M | (E << 12);
+ auto EShl12 = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 12));
+ auto N = MIRBuilder.buildOr(S32, M, EShl12);
+
+ // B = clamp(1-E, 0, 13);
+ auto One = MIRBuilder.buildConstant(S32, 1);
+ auto OneSubExp = MIRBuilder.buildSub(S32, One, E);
+ auto B = MIRBuilder.buildSMax(S32, OneSubExp, Zero);
+ B = MIRBuilder.buildSMin(S32, B, MIRBuilder.buildConstant(S32, 13));
+
+ auto SigSetHigh = MIRBuilder.buildOr(S32, M,
+ MIRBuilder.buildConstant(S32, 0x1000));
+
+ auto D = MIRBuilder.buildLShr(S32, SigSetHigh, B);
+ auto D0 = MIRBuilder.buildShl(S32, D, B);
+
+ auto D0_NE_SigSetHigh = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1,
+ D0, SigSetHigh);
+ auto D1 = MIRBuilder.buildZExt(S32, D0_NE_SigSetHigh);
+ D = MIRBuilder.buildOr(S32, D, D1);
+
+ auto CmpELtOne = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, S1, E, One);
+ auto V = MIRBuilder.buildSelect(S32, CmpELtOne, D, N);
+
+ auto VLow3 = MIRBuilder.buildAnd(S32, V, MIRBuilder.buildConstant(S32, 7));
+ V = MIRBuilder.buildLShr(S32, V, MIRBuilder.buildConstant(S32, 2));
+
+ auto VLow3Eq3 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, VLow3,
+ MIRBuilder.buildConstant(S32, 3));
+ auto V0 = MIRBuilder.buildZExt(S32, VLow3Eq3);
+
+ auto VLow3Gt5 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, VLow3,
+ MIRBuilder.buildConstant(S32, 5));
+ auto V1 = MIRBuilder.buildZExt(S32, VLow3Gt5);
+
+ V1 = MIRBuilder.buildOr(S32, V0, V1);
+ V = MIRBuilder.buildAdd(S32, V, V1);
+
+ auto CmpEGt30 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1,
+ E, MIRBuilder.buildConstant(S32, 30));
+ V = MIRBuilder.buildSelect(S32, CmpEGt30,
+ MIRBuilder.buildConstant(S32, 0x7c00), V);
+
+ auto CmpEGt1039 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1,
+ E, MIRBuilder.buildConstant(S32, 1039));
+ V = MIRBuilder.buildSelect(S32, CmpEGt1039, I, V);
+
+ // Extract the sign bit.
+ auto Sign = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 16));
+ Sign = MIRBuilder.buildAnd(S32, Sign, MIRBuilder.buildConstant(S32, 0x8000));
+
+ // Insert the sign bit
+ V = MIRBuilder.buildOr(S32, Sign, V);
+
+ MIRBuilder.buildTrunc(Dst, V);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFPTRUNC(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+ const LLT S64 = LLT::scalar(64);
+ const LLT S16 = LLT::scalar(16);
+
+ if (DstTy.getScalarType() == S16 && SrcTy.getScalarType() == S64)
+ return lowerFPTRUNC_F64_TO_F16(MI);
+
+ return UnableToLegalize;
+}
+
static CmpInst::Predicate minMaxToCompare(unsigned Opc) {
switch (Opc) {
case TargetOpcode::G_SMIN:
@@ -4063,7 +4848,7 @@ LegalizerHelper::lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
MachineInstr *Or;
if (Src0Ty == Src1Ty) {
- auto And1 = MIRBuilder.buildAnd(Src1Ty, Src0, SignBitMask);
+ auto And1 = MIRBuilder.buildAnd(Src1Ty, Src1, SignBitMask);
Or = MIRBuilder.buildOr(Dst, And0, And1);
} else if (Src0Size > Src1Size) {
auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size);
@@ -4136,6 +4921,39 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) {
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
Register DstReg = MI.getOperand(0).getReg();
+ Register X = MI.getOperand(1).getReg();
+ const unsigned Flags = MI.getFlags();
+ const LLT Ty = MRI.getType(DstReg);
+ const LLT CondTy = Ty.changeElementSize(1);
+
+ // round(x) =>
+ // t = trunc(x);
+ // d = fabs(x - t);
+ // o = copysign(1.0f, x);
+ // return t + (d >= 0.5 ? o : 0.0);
+
+ auto T = MIRBuilder.buildIntrinsicTrunc(Ty, X, Flags);
+
+ auto Diff = MIRBuilder.buildFSub(Ty, X, T, Flags);
+ auto AbsDiff = MIRBuilder.buildFAbs(Ty, Diff, Flags);
+ auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
+ auto One = MIRBuilder.buildFConstant(Ty, 1.0);
+ auto Half = MIRBuilder.buildFConstant(Ty, 0.5);
+ auto SignOne = MIRBuilder.buildFCopysign(Ty, One, X);
+
+ auto Cmp = MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half,
+ Flags);
+ auto Sel = MIRBuilder.buildSelect(Ty, Cmp, SignOne, Zero, Flags);
+
+ MIRBuilder.buildFAdd(DstReg, T, Sel, Flags);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFFloor(MachineInstr &MI) {
+ Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
unsigned Flags = MI.getFlags();
LLT Ty = MRI.getType(DstReg);
@@ -4145,8 +4963,8 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
// if (src < 0.0 && src != result)
// result += -1.0.
- auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
auto Trunc = MIRBuilder.buildIntrinsicTrunc(Ty, SrcReg, Flags);
+ auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
auto Lt0 = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, CondTy,
SrcReg, Zero, Flags);
@@ -4155,7 +4973,48 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
auto And = MIRBuilder.buildAnd(CondTy, Lt0, NeTrunc);
auto AddVal = MIRBuilder.buildSITOFP(Ty, And);
- MIRBuilder.buildFAdd(DstReg, Trunc, AddVal);
+ MIRBuilder.buildFAdd(DstReg, Trunc, AddVal, Flags);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerMergeValues(MachineInstr &MI) {
+ const unsigned NumOps = MI.getNumOperands();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register Src0Reg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(Src0Reg);
+ unsigned PartSize = SrcTy.getSizeInBits();
+
+ LLT WideTy = LLT::scalar(DstTy.getSizeInBits());
+ Register ResultReg = MIRBuilder.buildZExt(WideTy, Src0Reg).getReg(0);
+
+ for (unsigned I = 2; I != NumOps; ++I) {
+ const unsigned Offset = (I - 1) * PartSize;
+
+ Register SrcReg = MI.getOperand(I).getReg();
+ auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
+
+ Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg :
+ MRI.createGenericVirtualRegister(WideTy);
+
+ auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset);
+ auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt);
+ MIRBuilder.buildOr(NextResult, ResultReg, Shl);
+ ResultReg = NextResult;
+ }
+
+ if (DstTy.isPointer()) {
+ if (MIRBuilder.getDataLayout().isNonIntegralAddressSpace(
+ DstTy.getAddressSpace())) {
+ LLVM_DEBUG(dbgs() << "Not casting nonintegral address space\n");
+ return UnableToLegalize;
+ }
+
+ MIRBuilder.buildIntToPtr(DstReg, ResultReg);
+ }
+
MI.eraseFromParent();
return Legalized;
}
@@ -4163,34 +5022,31 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
const unsigned NumDst = MI.getNumOperands() - 1;
- const Register SrcReg = MI.getOperand(NumDst).getReg();
- LLT SrcTy = MRI.getType(SrcReg);
-
+ Register SrcReg = MI.getOperand(NumDst).getReg();
Register Dst0Reg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst0Reg);
+ if (DstTy.isPointer())
+ return UnableToLegalize; // TODO
+ SrcReg = coerceToScalar(SrcReg);
+ if (!SrcReg)
+ return UnableToLegalize;
// Expand scalarizing unmerge as bitcast to integer and shift.
- if (!DstTy.isVector() && SrcTy.isVector() &&
- SrcTy.getElementType() == DstTy) {
- LLT IntTy = LLT::scalar(SrcTy.getSizeInBits());
- Register Cast = MIRBuilder.buildBitcast(IntTy, SrcReg).getReg(0);
-
- MIRBuilder.buildTrunc(Dst0Reg, Cast);
-
- const unsigned DstSize = DstTy.getSizeInBits();
- unsigned Offset = DstSize;
- for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
- auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
- auto Shift = MIRBuilder.buildLShr(IntTy, Cast, ShiftAmt);
- MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
- }
+ LLT IntTy = MRI.getType(SrcReg);
- MI.eraseFromParent();
- return Legalized;
+ MIRBuilder.buildTrunc(Dst0Reg, SrcReg);
+
+ const unsigned DstSize = DstTy.getSizeInBits();
+ unsigned Offset = DstSize;
+ for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
+ auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
+ auto Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt);
+ MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
}
- return UnableToLegalize;
+ MI.eraseFromParent();
+ return Legalized;
}
LegalizerHelper::LegalizeResult
@@ -4251,16 +5107,19 @@ LegalizerHelper::lowerShuffleVector(MachineInstr &MI) {
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
+ const auto &MF = *MI.getMF();
+ const auto &TFI = *MF.getSubtarget().getFrameLowering();
+ if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp)
+ return UnableToLegalize;
+
Register Dst = MI.getOperand(0).getReg();
Register AllocSize = MI.getOperand(1).getReg();
- unsigned Align = MI.getOperand(2).getImm();
-
- const auto &MF = *MI.getMF();
- const auto &TLI = *MF.getSubtarget().getTargetLowering();
+ Align Alignment = assumeAligned(MI.getOperand(2).getImm());
LLT PtrTy = MRI.getType(Dst);
LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
+ const auto &TLI = *MF.getSubtarget().getTargetLowering();
Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg);
SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp);
@@ -4269,8 +5128,8 @@ LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
// have to generate an extra instruction to negate the alloc and then use
// G_PTR_ADD to add the negative offset.
auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize);
- if (Align) {
- APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true);
+ if (Alignment > Align(1)) {
+ APInt AlignMask(IntPtrTy.getSizeInBits(), Alignment.value(), true);
AlignMask.negate();
auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask);
Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst);
@@ -4326,34 +5185,47 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerInsert(MachineInstr &MI) {
LLT DstTy = MRI.getType(Src);
LLT InsertTy = MRI.getType(InsertSrc);
- if (InsertTy.isScalar() &&
- (DstTy.isScalar() ||
- (DstTy.isVector() && DstTy.getElementType() == InsertTy))) {
- LLT IntDstTy = DstTy;
- if (!DstTy.isScalar()) {
- IntDstTy = LLT::scalar(DstTy.getSizeInBits());
- Src = MIRBuilder.buildBitcast(IntDstTy, Src).getReg(0);
- }
+ if (InsertTy.isVector() ||
+ (DstTy.isVector() && DstTy.getElementType() != InsertTy))
+ return UnableToLegalize;
- Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0);
- if (Offset != 0) {
- auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset);
- ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0);
- }
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ if ((DstTy.isPointer() &&
+ DL.isNonIntegralAddressSpace(DstTy.getAddressSpace())) ||
+ (InsertTy.isPointer() &&
+ DL.isNonIntegralAddressSpace(InsertTy.getAddressSpace()))) {
+ LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n");
+ return UnableToLegalize;
+ }
- APInt MaskVal = ~APInt::getBitsSet(DstTy.getSizeInBits(), Offset,
- InsertTy.getSizeInBits());
+ LLT IntDstTy = DstTy;
- auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal);
- auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask);
- auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc);
+ if (!DstTy.isScalar()) {
+ IntDstTy = LLT::scalar(DstTy.getSizeInBits());
+ Src = MIRBuilder.buildCast(IntDstTy, Src).getReg(0);
+ }
- MIRBuilder.buildBitcast(Dst, Or);
- MI.eraseFromParent();
- return Legalized;
+ if (!InsertTy.isScalar()) {
+ const LLT IntInsertTy = LLT::scalar(InsertTy.getSizeInBits());
+ InsertSrc = MIRBuilder.buildPtrToInt(IntInsertTy, InsertSrc).getReg(0);
}
- return UnableToLegalize;
+ Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0);
+ if (Offset != 0) {
+ auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset);
+ ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0);
+ }
+
+ APInt MaskVal = APInt::getBitsSetWithWrap(
+ DstTy.getSizeInBits(), Offset + InsertTy.getSizeInBits(), Offset);
+
+ auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal);
+ auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask);
+ auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc);
+
+ MIRBuilder.buildCast(Dst, Or);
+ MI.eraseFromParent();
+ return Legalized;
}
LegalizerHelper::LegalizeResult
@@ -4397,7 +5269,7 @@ LegalizerHelper::lowerBswap(MachineInstr &MI) {
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
const LLT Ty = MRI.getType(Src);
- unsigned SizeInBytes = Ty.getSizeInBytes();
+ unsigned SizeInBytes = (Ty.getScalarSizeInBits() + 7) / 8;
unsigned BaseShiftAmt = (SizeInBytes - 1) * 8;
// Swap most and least significant byte, set remaining bytes in Res to zero.
@@ -4470,20 +5342,29 @@ LegalizerHelper::lowerBitreverse(MachineInstr &MI) {
}
LegalizerHelper::LegalizeResult
-LegalizerHelper::lowerReadRegister(MachineInstr &MI) {
- Register Dst = MI.getOperand(0).getReg();
- const LLT Ty = MRI.getType(Dst);
- const MDString *RegStr = cast<MDString>(
- cast<MDNode>(MI.getOperand(1).getMetadata())->getOperand(0));
-
+LegalizerHelper::lowerReadWriteRegister(MachineInstr &MI) {
MachineFunction &MF = MIRBuilder.getMF();
const TargetSubtargetInfo &STI = MF.getSubtarget();
const TargetLowering *TLI = STI.getTargetLowering();
- Register Reg = TLI->getRegisterByName(RegStr->getString().data(), Ty, MF);
- if (!Reg.isValid())
+
+ bool IsRead = MI.getOpcode() == TargetOpcode::G_READ_REGISTER;
+ int NameOpIdx = IsRead ? 1 : 0;
+ int ValRegIndex = IsRead ? 0 : 1;
+
+ Register ValReg = MI.getOperand(ValRegIndex).getReg();
+ const LLT Ty = MRI.getType(ValReg);
+ const MDString *RegStr = cast<MDString>(
+ cast<MDNode>(MI.getOperand(NameOpIdx).getMetadata())->getOperand(0));
+
+ Register PhysReg = TLI->getRegisterByName(RegStr->getString().data(), Ty, MF);
+ if (!PhysReg.isValid())
return UnableToLegalize;
- MIRBuilder.buildCopy(Dst, Reg);
+ if (IsRead)
+ MIRBuilder.buildCopy(ValReg, PhysReg);
+ else
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+
MI.eraseFromParent();
return Legalized;
}