summaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/GlobalISel
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp60
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CallLowering.cpp109
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp576
-rw-r--r--llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp8
-rw-r--r--llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp263
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp517
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp667
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp15
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp38
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Legalizer.cpp72
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp1991
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp22
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Localizer.cpp65
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp113
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp177
-rw-r--r--llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp9
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp266
19 files changed, 3660 insertions, 1312 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
index e6abfcdb92cb8..c4d8777615d27 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
@@ -52,6 +52,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) {
case TargetOpcode::G_SREM:
case TargetOpcode::G_CONSTANT:
case TargetOpcode::G_FCONSTANT:
+ case TargetOpcode::G_IMPLICIT_DEF:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ANYEXT:
@@ -64,7 +65,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) {
}
bool CSEConfigConstantOnly::shouldCSEOpc(unsigned Opc) {
- return Opc == TargetOpcode::G_CONSTANT;
+ return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_IMPLICIT_DEF;
}
std::unique_ptr<CSEConfigBase>
@@ -216,9 +217,6 @@ void GISelCSEInfo::handleRecordedInsts() {
}
bool GISelCSEInfo::shouldCSE(unsigned Opc) const {
- // Only GISel opcodes are CSEable
- if (!isPreISelGenericOpcode(Opc))
- return false;
assert(CSEOpt.get() && "CSEConfig not set");
return CSEOpt->shouldCSEOpc(Opc);
}
@@ -260,6 +258,39 @@ void GISelCSEInfo::releaseMemory() {
#endif
}
+Error GISelCSEInfo::verify() {
+#ifndef NDEBUG
+ handleRecordedInsts();
+ // For each instruction in map from MI -> UMI,
+ // Profile(MI) and make sure UMI is found for that profile.
+ for (auto &It : InstrMapping) {
+ FoldingSetNodeID TmpID;
+ GISelInstProfileBuilder(TmpID, *MRI).addNodeID(It.first);
+ void *InsertPos;
+ UniqueMachineInstr *FoundNode =
+ CSEMap.FindNodeOrInsertPos(TmpID, InsertPos);
+ if (FoundNode != It.second)
+ return createStringError(std::errc::not_supported,
+ "CSEMap mismatch, InstrMapping has MIs without "
+ "corresponding Nodes in CSEMap");
+ }
+
+ // For every node in the CSEMap, make sure that the InstrMapping
+ // points to it.
+ for (auto It = CSEMap.begin(), End = CSEMap.end(); It != End; ++It) {
+ const UniqueMachineInstr &UMI = *It;
+ if (!InstrMapping.count(UMI.MI))
+ return createStringError(std::errc::not_supported,
+ "Node in CSE without InstrMapping", UMI.MI);
+
+ if (InstrMapping[UMI.MI] != &UMI)
+ return createStringError(std::make_error_code(std::errc::not_supported),
+ "Mismatch in CSE mapping");
+ }
+#endif
+ return Error::success();
+}
+
void GISelCSEInfo::print() {
LLVM_DEBUG(for (auto &It
: OpcodeHitTable) {
@@ -286,7 +317,7 @@ GISelInstProfileBuilder::addNodeIDOpcode(unsigned Opc) const {
}
const GISelInstProfileBuilder &
-GISelInstProfileBuilder::addNodeIDRegType(const LLT &Ty) const {
+GISelInstProfileBuilder::addNodeIDRegType(const LLT Ty) const {
uint64_t Val = Ty.getUniqueRAWLLTData();
ID.AddInteger(Val);
return *this;
@@ -311,13 +342,13 @@ GISelInstProfileBuilder::addNodeIDImmediate(int64_t Imm) const {
}
const GISelInstProfileBuilder &
-GISelInstProfileBuilder::addNodeIDRegNum(unsigned Reg) const {
+GISelInstProfileBuilder::addNodeIDRegNum(Register Reg) const {
ID.AddInteger(Reg);
return *this;
}
const GISelInstProfileBuilder &
-GISelInstProfileBuilder::addNodeIDRegType(const unsigned Reg) const {
+GISelInstProfileBuilder::addNodeIDRegType(const Register Reg) const {
addNodeIDMachineOperand(MachineOperand::CreateReg(Reg, false));
return *this;
}
@@ -344,12 +375,14 @@ const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand(
LLT Ty = MRI.getType(Reg);
if (Ty.isValid())
addNodeIDRegType(Ty);
- auto *RB = MRI.getRegBankOrNull(Reg);
- if (RB)
- addNodeIDRegType(RB);
- auto *RC = MRI.getRegClassOrNull(Reg);
- if (RC)
- addNodeIDRegType(RC);
+
+ if (const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(Reg)) {
+ if (const auto *RB = RCOrRB.dyn_cast<const RegisterBank *>())
+ addNodeIDRegType(RB);
+ else if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>())
+ addNodeIDRegType(RC);
+ }
+
assert(!MO.isImplicit() && "Unhandled case");
} else if (MO.isImm())
ID.AddInteger(MO.getImm());
@@ -369,6 +402,7 @@ GISelCSEInfo &
GISelCSEAnalysisWrapper::get(std::unique_ptr<CSEConfigBase> CSEOpt,
bool Recompute) {
if (!AlreadyComputed || Recompute) {
+ Info.releaseMemory();
Info.setCSEConfig(std::move(CSEOpt));
Info.analyze(*MF);
AlreadyComputed = true;
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
index 51a74793f0290..88173dc4d302c 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -129,7 +129,7 @@ CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
if (DstOps.size() == 1) {
const DstOp &Op = DstOps[0];
if (Op.getDstOpKind() == DstOp::DstType::Ty_Reg)
- return buildCopy(Op.getReg(), MIB->getOperand(0).getReg());
+ return buildCopy(Op.getReg(), MIB.getReg(0));
}
return MIB;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 4c2dbdd905f3d..a7146515c4c96 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Target/TargetMachine.h"
#define DEBUG_TYPE "call-lowering"
@@ -29,48 +30,50 @@ using namespace llvm;
void CallLowering::anchor() {}
-bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
ArrayRef<Register> ResRegs,
ArrayRef<ArrayRef<Register>> ArgRegs,
Register SwiftErrorVReg,
std::function<unsigned()> GetCalleeReg) const {
CallLoweringInfo Info;
- auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
+ const DataLayout &DL = MIRBuilder.getDataLayout();
// First step is to marshall all the function's parameters into the correct
// physregs and memory locations. Gather the sequence of argument types that
// we'll pass to the assigner function.
unsigned i = 0;
- unsigned NumFixedArgs = CS.getFunctionType()->getNumParams();
- for (auto &Arg : CS.args()) {
+ unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
+ for (auto &Arg : CB.args()) {
ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
i < NumFixedArgs};
- setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
+ setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
Info.OrigArgs.push_back(OrigArg);
++i;
}
- if (const Function *F = CS.getCalledFunction())
+ // Try looking through a bitcast from one function type to another.
+ // Commonly happens with calls to objc_msgSend().
+ const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
+ if (const Function *F = dyn_cast<Function>(CalleeV))
Info.Callee = MachineOperand::CreateGA(F, 0);
else
Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
- Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}};
+ Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
if (!Info.OrigRet.Ty->isVoidTy())
- setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CS);
+ setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
- Info.KnownCallees =
- CS.getInstruction()->getMetadata(LLVMContext::MD_callees);
- Info.CallConv = CS.getCallingConv();
+ MachineFunction &MF = MIRBuilder.getMF();
+ Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
+ Info.CallConv = CB.getCallingConv();
Info.SwiftErrorVReg = SwiftErrorVReg;
- Info.IsMustTailCall = CS.isMustTailCall();
- Info.IsTailCall = CS.isTailCall() &&
- isInTailCallPosition(CS, MIRBuilder.getMF().getTarget()) &&
- (MIRBuilder.getMF()
- .getFunction()
- .getFnAttribute("disable-tail-calls")
- .getValueAsString() != "true");
- Info.IsVarArg = CS.getFunctionType()->isVarArg();
+ Info.IsMustTailCall = CB.isMustTailCall();
+ Info.IsTailCall =
+ CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) &&
+ (MF.getFunction()
+ .getFnAttribute("disable-tail-calls")
+ .getValueAsString() != "true");
+ Info.IsVarArg = CB.getFunctionType()->isVarArg();
return lowerCall(MIRBuilder, Info);
}
@@ -94,10 +97,12 @@ void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
Flags.setSwiftError();
if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
Flags.setByVal();
+ if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated))
+ Flags.setPreallocated();
if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
Flags.setInAlloca();
- if (Flags.isByVal() || Flags.isInAlloca()) {
+ if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
@@ -105,16 +110,16 @@ void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
- unsigned FrameAlign;
- if (FuncInfo.getParamAlignment(OpIdx - 2))
- FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2);
+ Align FrameAlign;
+ if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2))
+ FrameAlign = *ParamAlign;
else
- FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL);
- Flags.setByValAlign(Align(FrameAlign));
+ FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
+ Flags.setByValAlign(FrameAlign);
}
if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
Flags.setNest();
- Flags.setOrigAlign(Align(DL.getABITypeAlignment(Arg.Ty)));
+ Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
}
template void
@@ -123,9 +128,9 @@ CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
const Function &FuncInfo) const;
template void
-CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
+CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
const DataLayout &DL,
- const CallInst &FuncInfo) const;
+ const CallBase &FuncInfo) const;
Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
MachineIRBuilder &MIRBuilder) const {
@@ -157,7 +162,7 @@ void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
MachineIRBuilder &MIRBuilder) const {
assert(DstRegs.size() > 1 && "Nothing to unpack");
- const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
+ const DataLayout &DL = MIRBuilder.getDataLayout();
SmallVector<LLT, 8> LLTs;
SmallVector<uint64_t, 8> Offsets;
@@ -189,11 +194,11 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
unsigned NumArgs = Args.size();
for (unsigned i = 0; i != NumArgs; ++i) {
- MVT CurVT = MVT::getVT(Args[i].Ty);
- if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i],
- Args[i].Flags[0], CCInfo)) {
- if (!CurVT.isValid())
- return false;
+ EVT CurVT = EVT::getEVT(Args[i].Ty);
+ if (!CurVT.isSimple() ||
+ Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(),
+ CCValAssign::Full, Args[i], Args[i].Flags[0],
+ CCInfo)) {
MVT NewVT = TLI->getRegisterTypeForCallingConv(
F.getContext(), F.getCallingConv(), EVT(CurVT));
@@ -239,7 +244,7 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
if (Part == 0) {
Flags.setSplit();
} else {
- Flags.setOrigAlign(Align::None());
+ Flags.setOrigAlign(Align(1));
if (Part == NumParts - 1)
Flags.setSplitEnd();
}
@@ -272,7 +277,7 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
if (PartIdx == 0) {
Flags.setSplit();
} else {
- Flags.setOrigAlign(Align::None());
+ Flags.setOrigAlign(Align(1));
if (PartIdx == NumParts - 1)
Flags.setSplitEnd();
}
@@ -293,15 +298,21 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
if (VA.needsCustom()) {
- j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
+ unsigned NumArgRegs =
+ Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
+ if (!NumArgRegs)
+ return false;
+ j += NumArgRegs;
continue;
}
// FIXME: Pack registers if we have more than one.
Register ArgReg = Args[i].Regs[0];
- MVT OrigVT = MVT::getVT(Args[i].Ty);
- MVT VAVT = VA.getValVT();
+ EVT OrigVT = EVT::getEVT(Args[i].Ty);
+ EVT VAVT = VA.getValVT();
+ const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
+
if (VA.isRegLoc()) {
if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
@@ -323,7 +334,7 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
continue;
}
- const LLT VATy(VAVT);
+ const LLT VATy(VAVT.getSimpleVT());
Register NewReg =
MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
@@ -331,7 +342,6 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
// or do an unmerge to get the lower block of elements.
if (VATy.isVector() &&
VATy.getNumElements() > OrigVT.getVectorNumElements()) {
- const LLT OrigTy(OrigVT);
// Just handle the case where the VA type is 2 * original type.
if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
LLVM_DEBUG(dbgs()
@@ -371,7 +381,7 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
unsigned Offset = VA.getLocMemOffset();
MachinePointerInfo MPO;
Register StackAddr = Handler.getStackAddress(Size, Offset, MPO);
- Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA);
+ Handler.assignValueToAddress(Args[i], StackAddr, Size, MPO, VA);
} else {
// FIXME: Support byvals and other weirdness
return false;
@@ -456,10 +466,19 @@ bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
}
Register CallLowering::ValueHandler::extendRegister(Register ValReg,
- CCValAssign &VA) {
+ CCValAssign &VA,
+ unsigned MaxSizeBits) {
LLT LocTy{VA.getLocVT()};
- if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits())
+ LLT ValTy = MRI.getType(ValReg);
+ if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
return ValReg;
+
+ if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
+ if (MaxSizeBits <= ValTy.getSizeInBits())
+ return ValReg;
+ LocTy = LLT::scalar(MaxSizeBits);
+ }
+
switch (VA.getLocInfo()) {
default: break;
case CCValAssign::Full:
@@ -469,7 +488,7 @@ Register CallLowering::ValueHandler::extendRegister(Register ValReg,
return ValReg;
case CCValAssign::AExt: {
auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
- return MIB->getOperand(0).getReg();
+ return MIB.getReg(0);
}
case CCValAssign::SExt: {
Register NewReg = MRI.createGenericVirtualRegister(LocTy);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index a103e8e4e6e00..194961ae3b216 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -9,6 +9,8 @@
#include "llvm/CodeGen/GlobalISel/Combiner.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -17,11 +19,13 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#define DEBUG_TYPE "gi-combiner"
using namespace llvm;
+using namespace MIPatternMatch;
// Option to allow testing of the combiner while no targets know about indexed
// addressing.
@@ -33,9 +37,10 @@ static cl::opt<bool>
CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
MachineIRBuilder &B, GISelKnownBits *KB,
- MachineDominatorTree *MDT)
+ MachineDominatorTree *MDT,
+ const LegalizerInfo *LI)
: Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
- KB(KB), MDT(MDT) {
+ KB(KB), MDT(MDT), LI(LI) {
(void)this->KB;
}
@@ -74,36 +79,7 @@ bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
return false;
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
-
- // Give up if either DstReg or SrcReg is a physical register.
- if (Register::isPhysicalRegister(DstReg) ||
- Register::isPhysicalRegister(SrcReg))
- return false;
-
- // Give up the types don't match.
- LLT DstTy = MRI.getType(DstReg);
- LLT SrcTy = MRI.getType(SrcReg);
- // Give up if one has a valid LLT, but the other doesn't.
- if (DstTy.isValid() != SrcTy.isValid())
- return false;
- // Give up if the types don't match.
- if (DstTy.isValid() && SrcTy.isValid() && DstTy != SrcTy)
- return false;
-
- // Get the register banks and classes.
- const RegisterBank *DstBank = MRI.getRegBankOrNull(DstReg);
- const RegisterBank *SrcBank = MRI.getRegBankOrNull(SrcReg);
- const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
- const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
-
- // Replace if the register constraints match.
- if ((SrcRC == DstRC) && (SrcBank == DstBank))
- return true;
- // Replace if DstReg has no constraints.
- if (!DstBank && !DstRC)
- return true;
-
- return false;
+ return canReplaceReg(DstReg, SrcReg, MRI);
}
void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
Register DstReg = MI.getOperand(0).getReg();
@@ -294,7 +270,7 @@ namespace {
/// Select a preference between two uses. CurrentUse is the current preference
/// while *ForCandidate is attributes of the candidate under consideration.
PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
- const LLT &TyForCandidate,
+ const LLT TyForCandidate,
unsigned OpcodeForCandidate,
MachineInstr *MIForCandidate) {
if (!CurrentUse.Ty.isValid()) {
@@ -428,10 +404,23 @@ bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;
Preferred = {LLT(), PreferredOpcode, nullptr};
- for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
+ for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) {
if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
- UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
+ (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
+ // Check for legality.
+ if (LI) {
+ LegalityQuery::MemDesc MMDesc;
+ const auto &MMO = **MI.memoperands_begin();
+ MMDesc.SizeInBits = MMO.getSizeInBits();
+ MMDesc.AlignInBits = MMO.getAlign().value() * 8;
+ MMDesc.Ordering = MMO.getOrdering();
+ LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
+ LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
+ if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action !=
+ LegalizeActions::Legal)
+ continue;
+ }
Preferred = ChoosePreferredUse(Preferred,
MRI.getType(UseMI.getOperand(0).getReg()),
UseMI.getOpcode(), &UseMI);
@@ -498,7 +487,7 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
Register UseDstReg = UseMI->getOperand(0).getReg();
MachineOperand &UseSrcMO = UseMI->getOperand(1);
- const LLT &UseDstTy = MRI.getType(UseDstReg);
+ const LLT UseDstTy = MRI.getType(UseDstReg);
if (UseDstReg != ChosenDstReg) {
if (Preferred.Ty == UseDstTy) {
// If the use has the same type as the preferred use, then merge
@@ -559,7 +548,10 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
Observer.changedInstr(MI);
}
-bool CombinerHelper::isPredecessor(MachineInstr &DefMI, MachineInstr &UseMI) {
+bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
+ const MachineInstr &UseMI) {
+ assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
+ "shouldn't consider debug uses");
assert(DefMI.getParent() == UseMI.getParent());
if (&DefMI == &UseMI)
return false;
@@ -572,7 +564,10 @@ bool CombinerHelper::isPredecessor(MachineInstr &DefMI, MachineInstr &UseMI) {
llvm_unreachable("Block must contain instructions");
}
-bool CombinerHelper::dominates(MachineInstr &DefMI, MachineInstr &UseMI) {
+bool CombinerHelper::dominates(const MachineInstr &DefMI,
+ const MachineInstr &UseMI) {
+ assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
+ "shouldn't consider debug uses");
if (MDT)
return MDT->dominates(&DefMI, &UseMI);
else if (DefMI.getParent() != UseMI.getParent())
@@ -581,6 +576,24 @@ bool CombinerHelper::dominates(MachineInstr &DefMI, MachineInstr &UseMI) {
return isPredecessor(DefMI, UseMI);
}
+bool CombinerHelper::matchSextAlreadyExtended(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
+ Register SrcReg = MI.getOperand(1).getReg();
+ unsigned SrcSignBits = KB->computeNumSignBits(SrcReg);
+ unsigned NumSextBits =
+ MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits() -
+ MI.getOperand(2).getImm();
+ return SrcSignBits >= NumSextBits;
+}
+
+bool CombinerHelper::applySextAlreadyExtended(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
+ MachineIRBuilder MIB(MI);
+ MIB.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
+ MI.eraseFromParent();
+ return true;
+}
+
bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
Register &Base, Register &Offset) {
auto &MF = *MI.getParent()->getParent();
@@ -599,7 +612,7 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
- for (auto &Use : MRI.use_instructions(Base)) {
+ for (auto &Use : MRI.use_nodbg_instructions(Base)) {
if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
continue;
@@ -626,7 +639,8 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
// forming an indexed one.
bool MemOpDominatesAddrUses = true;
- for (auto &PtrAddUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
+ for (auto &PtrAddUse :
+ MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
if (!dominates(MI, PtrAddUse)) {
MemOpDominatesAddrUses = false;
break;
@@ -661,7 +675,7 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
Addr = MI.getOperand(1).getReg();
MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
- if (!AddrDef || MRI.hasOneUse(Addr))
+ if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
return false;
Base = AddrDef->getOperand(1).getReg();
@@ -699,7 +713,7 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
// FIXME: check whether all uses of the base pointer are constant PtrAdds.
// That might allow us to end base's liveness here by adjusting the constant.
- for (auto &UseMI : MRI.use_instructions(Addr)) {
+ for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
if (!dominates(MI, UseMI)) {
LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.");
return false;
@@ -811,7 +825,7 @@ bool CombinerHelper::matchElideBrByInvertingCond(MachineInstr &MI) {
MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg());
if (!CmpMI || CmpMI->getOpcode() != TargetOpcode::G_ICMP ||
- !MRI.hasOneUse(CmpMI->getOperand(0).getReg()))
+ !MRI.hasOneNonDBGUse(CmpMI->getOperand(0).getReg()))
return false;
return true;
}
@@ -854,38 +868,32 @@ static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
// Returns a list of types to use for memory op lowering in MemOps. A partial
// port of findOptimalMemOpLowering in TargetLowering.
-static bool findGISelOptimalMemOpLowering(
- std::vector<LLT> &MemOps, unsigned Limit, uint64_t Size, unsigned DstAlign,
- unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
- bool AllowOverlap, unsigned DstAS, unsigned SrcAS,
- const AttributeList &FuncAttributes, const TargetLowering &TLI) {
- // If 'SrcAlign' is zero, that means the memory operation does not need to
- // load the value, i.e. memset or memcpy from constant string. Otherwise,
- // it's the inferred alignment of the source. 'DstAlign', on the other hand,
- // is the specified alignment of the memory operation. If it is zero, that
- // means it's possible to change the alignment of the destination.
- // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
- // not need to be loaded.
- if (SrcAlign != 0 && SrcAlign < DstAlign)
+static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
+ unsigned Limit, const MemOp &Op,
+ unsigned DstAS, unsigned SrcAS,
+ const AttributeList &FuncAttributes,
+ const TargetLowering &TLI) {
+ if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
return false;
- LLT Ty = TLI.getOptimalMemOpLLT(Size, DstAlign, SrcAlign, IsMemset,
- ZeroMemset, MemcpyStrSrc, FuncAttributes);
+ LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
if (Ty == LLT()) {
// Use the largest scalar type whose alignment constraints are satisfied.
// We only need to check DstAlign here as SrcAlign is always greater or
// equal to DstAlign (or zero).
Ty = LLT::scalar(64);
- while (DstAlign && DstAlign < Ty.getSizeInBytes() &&
- !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, DstAlign))
- Ty = LLT::scalar(Ty.getSizeInBytes());
+ if (Op.isFixedDstAlign())
+ while (Op.getDstAlign() < Ty.getSizeInBytes() &&
+ !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
+ Ty = LLT::scalar(Ty.getSizeInBytes());
assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
// FIXME: check for the largest legal type we can load/store to.
}
unsigned NumMemOps = 0;
- while (Size != 0) {
+ uint64_t Size = Op.size();
+ while (Size) {
unsigned TySize = Ty.getSizeInBytes();
while (TySize > Size) {
// For now, only use non-vector load / store's for the left-over pieces.
@@ -903,9 +911,10 @@ static bool findGISelOptimalMemOpLowering(
bool Fast;
// Need to get a VT equivalent for allowMisalignedMemoryAccesses().
MVT VT = getMVTForLLT(Ty);
- if (NumMemOps && AllowOverlap && NewTySize < Size &&
+ if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
TLI.allowsMisalignedMemoryAccesses(
- VT, DstAS, DstAlign, MachineMemOperand::MONone, &Fast) &&
+ VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0,
+ MachineMemOperand::MONone, &Fast) &&
Fast)
TySize = Size;
else {
@@ -926,8 +935,8 @@ static bool findGISelOptimalMemOpLowering(
static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
if (Ty.isVector())
- return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
- Ty.getNumElements());
+ return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
+ Ty.getNumElements());
return IntegerType::get(C, Ty.getSizeInBits());
}
@@ -942,12 +951,14 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
APInt SplatVal = APInt::getSplat(NumBits, Scalar);
return MIB.buildConstant(Ty, SplatVal).getReg(0);
}
- // FIXME: for vector types create a G_BUILD_VECTOR.
- if (Ty.isVector())
- return Register();
// Extend the byte value to the larger type, and then multiply by a magic
// value 0x010101... in order to replicate it across every byte.
+ // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
+ if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
+ return MIB.buildConstant(Ty, 0).getReg(0);
+ }
+
LLT ExtType = Ty.getScalarType();
auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
if (NumBits > 8) {
@@ -956,13 +967,16 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
}
- assert(ExtType == Ty && "Vector memset value type not supported yet");
+ // For vector types create a G_BUILD_VECTOR.
+ if (Ty.isVector())
+ Val = MIB.buildSplatVector(Ty, Val).getReg(0);
+
return Val;
}
-bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
- unsigned KnownLen, unsigned Align,
- bool IsVolatile) {
+bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
+ Register Val, unsigned KnownLen,
+ Align Alignment, bool IsVolatile) {
auto &MF = *MI.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering();
auto &DL = MF.getDataLayout();
@@ -987,24 +1001,25 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
- if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), 0,
- /*IsMemset=*/true,
- /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false,
- /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), ~0u,
- MF.getFunction().getAttributes(), TLI))
+ if (!findGISelOptimalMemOpLowering(MemOps, Limit,
+ MemOp::Set(KnownLen, DstAlignCanChange,
+ Alignment,
+ /*IsZeroMemset=*/IsZeroVal,
+ /*IsVolatile=*/IsVolatile),
+ DstPtrInfo.getAddrSpace(), ~0u,
+ MF.getFunction().getAttributes(), TLI))
return false;
if (DstAlignCanChange) {
// Get an estimate of the type from the LLT.
Type *IRTy = getTypeForLLT(MemOps[0], C);
- unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
- if (NewAlign > Align) {
- Align = NewAlign;
+ Align NewAlign = DL.getABITypeAlign(IRTy);
+ if (NewAlign > Alignment) {
+ Alignment = NewAlign;
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
- if (MFI.getObjectAlignment(FI) < Align)
- MFI.setObjectAlignment(FI, Align);
+ if (MFI.getObjectAlign(FI) < Alignment)
+ MFI.setObjectAlignment(FI, Alignment);
}
}
@@ -1072,10 +1087,9 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
return true;
}
-
bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
Register Src, unsigned KnownLen,
- unsigned DstAlign, unsigned SrcAlign,
+ Align DstAlign, Align SrcAlign,
bool IsVolatile) {
auto &MF = *MI.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering();
@@ -1087,7 +1101,7 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF);
- unsigned Alignment = MinAlign(DstAlign, SrcAlign);
+ Align Alignment = commonAlignment(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@@ -1106,32 +1120,30 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
- SrcAlign,
- /*IsMemset=*/false,
- /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
- /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(),
- SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI))
+ MemOps, Limit,
+ MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
+ IsVolatile),
+ DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
+ MF.getFunction().getAttributes(), TLI))
return false;
if (DstAlignCanChange) {
// Get an estimate of the type from the LLT.
Type *IRTy = getTypeForLLT(MemOps[0], C);
- unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
+ Align NewAlign = DL.getABITypeAlign(IRTy);
// Don't promote to an alignment that would require dynamic stack
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Alignment &&
- DL.exceedsNaturalStackAlignment(Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
+ NewAlign = NewAlign / 2;
if (NewAlign > Alignment) {
Alignment = NewAlign;
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
- if (MFI.getObjectAlignment(FI) < Alignment)
+ if (MFI.getObjectAlign(FI) < Alignment)
MFI.setObjectAlignment(FI, Alignment);
}
}
@@ -1156,7 +1168,7 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
// Construct MMOs for the accesses.
auto *LoadMMO =
MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
- auto *StoreMMO =
+ auto *StoreMMO =
MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
// Create the load.
@@ -1182,9 +1194,9 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
}
bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
- Register Src, unsigned KnownLen,
- unsigned DstAlign, unsigned SrcAlign,
- bool IsVolatile) {
+ Register Src, unsigned KnownLen,
+ Align DstAlign, Align SrcAlign,
+ bool IsVolatile) {
auto &MF = *MI.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering();
auto &DL = MF.getDataLayout();
@@ -1195,7 +1207,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF);
- unsigned Alignment = MinAlign(DstAlign, SrcAlign);
+ Align Alignment = commonAlignment(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@@ -1213,32 +1225,30 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
// to a bug in it's findOptimalMemOpLowering implementation. For now do the
// same thing here.
if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
- SrcAlign,
- /*IsMemset=*/false,
- /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
- /*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(),
- SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI))
+ MemOps, Limit,
+ MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
+ /*IsVolatile*/ true),
+ DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
+ MF.getFunction().getAttributes(), TLI))
return false;
if (DstAlignCanChange) {
// Get an estimate of the type from the LLT.
Type *IRTy = getTypeForLLT(MemOps[0], C);
- unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy);
+ Align NewAlign = DL.getABITypeAlign(IRTy);
// Don't promote to an alignment that would require dynamic stack
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Alignment &&
- DL.exceedsNaturalStackAlignment(Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
+ NewAlign = NewAlign / 2;
if (NewAlign > Alignment) {
Alignment = NewAlign;
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
- if (MFI.getObjectAlignment(FI) < Alignment)
+ if (MFI.getObjectAlign(FI) < Alignment)
MFI.setObjectAlignment(FI, Alignment);
}
}
@@ -1304,8 +1314,8 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (IsVolatile)
return false;
- unsigned DstAlign = MemOp->getBaseAlignment();
- unsigned SrcAlign = 0;
+ Align DstAlign = MemOp->getBaseAlign();
+ Align SrcAlign;
Register Dst = MI.getOperand(1).getReg();
Register Src = MI.getOperand(2).getReg();
Register Len = MI.getOperand(3).getReg();
@@ -1313,7 +1323,7 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (ID != Intrinsic::memset) {
assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
MemOp = *(++MMOIt);
- SrcAlign = MemOp->getBaseAlignment();
+ SrcAlign = MemOp->getBaseAlign();
}
// See if this is a constant length copy
@@ -1385,6 +1395,338 @@ bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
return true;
}
+bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
+ unsigned &ShiftVal) {
+ assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
+ auto MaybeImmVal =
+ getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
+ if (!MaybeImmVal || !isPowerOf2_64(MaybeImmVal->Value))
+ return false;
+ ShiftVal = Log2_64(MaybeImmVal->Value);
+ return true;
+}
+
+bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
+ unsigned &ShiftVal) {
+ assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
+ MachineIRBuilder MIB(MI);
+ LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
+ auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
+ Observer.changingInstr(MI);
+ MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
+ MI.getOperand(2).setReg(ShiftCst.getReg(0));
+ Observer.changedInstr(MI);
+ return true;
+}
+
+bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
+ unsigned TargetShiftSize,
+ unsigned &ShiftVal) {
+ assert((MI.getOpcode() == TargetOpcode::G_SHL ||
+ MI.getOpcode() == TargetOpcode::G_LSHR ||
+ MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
+
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ if (Ty.isVector()) // TODO:
+ return false;
+
+ // Don't narrow further than the requested size.
+ unsigned Size = Ty.getSizeInBits();
+ if (Size <= TargetShiftSize)
+ return false;
+
+ auto MaybeImmVal =
+ getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
+ if (!MaybeImmVal)
+ return false;
+
+ ShiftVal = MaybeImmVal->Value;
+ return ShiftVal >= Size / 2 && ShiftVal < Size;
+}
+
+bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
+ const unsigned &ShiftVal) {
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT Ty = MRI.getType(SrcReg);
+ unsigned Size = Ty.getSizeInBits();
+ unsigned HalfSize = Size / 2;
+ assert(ShiftVal >= HalfSize);
+
+ LLT HalfTy = LLT::scalar(HalfSize);
+
+ Builder.setInstr(MI);
+ auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
+ unsigned NarrowShiftAmt = ShiftVal - HalfSize;
+
+ if (MI.getOpcode() == TargetOpcode::G_LSHR) {
+ Register Narrowed = Unmerge.getReg(1);
+
+ // dst = G_LSHR s64:x, C for C >= 32
+ // =>
+ // lo, hi = G_UNMERGE_VALUES x
+ // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
+
+ if (NarrowShiftAmt != 0) {
+ Narrowed = Builder.buildLShr(HalfTy, Narrowed,
+ Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
+ }
+
+ auto Zero = Builder.buildConstant(HalfTy, 0);
+ Builder.buildMerge(DstReg, { Narrowed, Zero });
+ } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
+ Register Narrowed = Unmerge.getReg(0);
+ // dst = G_SHL s64:x, C for C >= 32
+ // =>
+ // lo, hi = G_UNMERGE_VALUES x
+ // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
+ if (NarrowShiftAmt != 0) {
+ Narrowed = Builder.buildShl(HalfTy, Narrowed,
+ Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
+ }
+
+ auto Zero = Builder.buildConstant(HalfTy, 0);
+ Builder.buildMerge(DstReg, { Zero, Narrowed });
+ } else {
+ assert(MI.getOpcode() == TargetOpcode::G_ASHR);
+ auto Hi = Builder.buildAShr(
+ HalfTy, Unmerge.getReg(1),
+ Builder.buildConstant(HalfTy, HalfSize - 1));
+
+ if (ShiftVal == HalfSize) {
+ // (G_ASHR i64:x, 32) ->
+ // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
+ Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
+ } else if (ShiftVal == Size - 1) {
+ // Don't need a second shift.
+ // (G_ASHR i64:x, 63) ->
+ // %narrowed = (G_ASHR hi_32(x), 31)
+ // G_MERGE_VALUES %narrowed, %narrowed
+ Builder.buildMerge(DstReg, { Hi, Hi });
+ } else {
+ auto Lo = Builder.buildAShr(
+ HalfTy, Unmerge.getReg(1),
+ Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
+
+ // (G_ASHR i64:x, C) ->, for C >= 32
+ // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
+ Builder.buildMerge(DstReg, { Lo, Hi });
+ }
+ }
+
+ MI.eraseFromParent();
+ return true;
+}
+
+bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
+ unsigned TargetShiftAmount) {
+ unsigned ShiftAmt;
+ if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
+ applyCombineShiftToUnmerge(MI, ShiftAmt);
+ return true;
+ }
+
+ return false;
+}
+
+bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
+ return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
+ return MO.isReg() &&
+ getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
+ });
+}
+
+bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
+ return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
+ return !MO.isReg() ||
+ getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
+ });
+}
+
+bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
+ ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
+ return all_of(Mask, [](int Elt) { return Elt < 0; });
+}
+
+bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_STORE);
+ return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
+ MRI);
+}
+
+bool CombinerHelper::eraseInst(MachineInstr &MI) {
+ MI.eraseFromParent();
+ return true;
+}
+
+bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
+ const MachineOperand &MOP2) {
+ if (!MOP1.isReg() || !MOP2.isReg())
+ return false;
+ MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI);
+ if (!I1)
+ return false;
+ MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI);
+ if (!I2)
+ return false;
+
+ // Handle a case like this:
+ //
+ // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
+ //
+ // Even though %0 and %1 are produced by the same instruction they are not
+ // the same values.
+ if (I1 == I2)
+ return MOP1.getReg() == MOP2.getReg();
+
+ // If we have an instruction which loads or stores, we can't guarantee that
+ // it is identical.
+ //
+ // For example, we may have
+ //
+ // %x1 = G_LOAD %addr (load N from @somewhere)
+ // ...
+ // call @foo
+ // ...
+ // %x2 = G_LOAD %addr (load N from @somewhere)
+ // ...
+ // %or = G_OR %x1, %x2
+ //
+ // It's possible that @foo will modify whatever lives at the address we're
+ // loading from. To be safe, let's just assume that all loads and stores
+ // are different (unless we have something which is guaranteed to not
+ // change.)
+ if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
+ return false;
+
+ // Check for physical registers on the instructions first to avoid cases
+ // like this:
+ //
+ // %a = COPY $physreg
+ // ...
+ // SOMETHING implicit-def $physreg
+ // ...
+ // %b = COPY $physreg
+ //
+ // These copies are not equivalent.
+ if (any_of(I1->uses(), [](const MachineOperand &MO) {
+ return MO.isReg() && MO.getReg().isPhysical();
+ })) {
+ // Check if we have a case like this:
+ //
+ // %a = COPY $physreg
+ // %b = COPY %a
+ //
+ // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
+ // From that, we know that they must have the same value, since they must
+ // have come from the same COPY.
+ return I1->isIdenticalTo(*I2);
+ }
+
+ // We don't have any physical registers, so we don't necessarily need the
+ // same vreg defs.
+ //
+ // On the off-chance that there's some target instruction feeding into the
+ // instruction, let's use produceSameValue instead of isIdenticalTo.
+ return Builder.getTII().produceSameValue(*I1, *I2, &MRI);
+}
+
+bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
+ if (!MOP.isReg())
+ return false;
+ // MIPatternMatch doesn't let us look through G_ZEXT etc.
+ auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI);
+ return ValAndVReg && ValAndVReg->Value == C;
+}
+
+bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
+ unsigned OpIdx) {
+ assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
+ Register OldReg = MI.getOperand(0).getReg();
+ Register Replacement = MI.getOperand(OpIdx).getReg();
+ assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
+ MI.eraseFromParent();
+ replaceRegWith(MRI, OldReg, Replacement);
+ return true;
+}
+
+bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_SELECT);
+ // Match (cond ? x : x)
+ return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
+ canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
+ MRI);
+}
+
+bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
+ return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
+ canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
+ MRI);
+}
+
+bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
+ return matchConstantOp(MI.getOperand(OpIdx), 0) &&
+ canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
+ MRI);
+}
+
+bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
+ assert(MI.getNumDefs() == 1 && "Expected only one def?");
+ Builder.setInstr(MI);
+ Builder.buildFConstant(MI.getOperand(0), C);
+ MI.eraseFromParent();
+ return true;
+}
+
+bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
+ assert(MI.getNumDefs() == 1 && "Expected only one def?");
+ Builder.setInstr(MI);
+ Builder.buildConstant(MI.getOperand(0), C);
+ MI.eraseFromParent();
+ return true;
+}
+
+bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
+ assert(MI.getNumDefs() == 1 && "Expected only one def?");
+ Builder.setInstr(MI);
+ Builder.buildUndef(MI.getOperand(0));
+ MI.eraseFromParent();
+ return true;
+}
+
+bool CombinerHelper::matchSimplifyAddToSub(
+ MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
+ Register LHS = MI.getOperand(1).getReg();
+ Register RHS = MI.getOperand(2).getReg();
+ Register &NewLHS = std::get<0>(MatchInfo);
+ Register &NewRHS = std::get<1>(MatchInfo);
+
+ // Helper lambda to check for opportunities for
+ // ((0-A) + B) -> B - A
+ // (A + (0-B)) -> A - B
+ auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
+ int64_t Cst;
+ if (!mi_match(MaybeSub, MRI, m_GSub(m_ICst(Cst), m_Reg(NewRHS))) ||
+ Cst != 0)
+ return false;
+ NewLHS = MaybeNewLHS;
+ return true;
+ };
+
+ return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
+}
+
+bool CombinerHelper::applySimplifyAddToSub(
+ MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
+ Builder.setInstr(MI);
+ Register SubLHS, SubRHS;
+ std::tie(SubLHS, SubRHS) = MatchInfo;
+ Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
+ MI.eraseFromParent();
+ return true;
+}
+
bool CombinerHelper::tryCombine(MachineInstr &MI) {
if (tryCombineCopy(MI))
return true;
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp b/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp
index 62b903c30b89d..bdaa6378e901d 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp
@@ -38,3 +38,11 @@ RAIIDelegateInstaller::RAIIDelegateInstaller(MachineFunction &MF,
}
RAIIDelegateInstaller::~RAIIDelegateInstaller() { MF.resetDelegate(Delegate); }
+
+RAIIMFObserverInstaller::RAIIMFObserverInstaller(MachineFunction &MF,
+ GISelChangeObserver &Observer)
+ : MF(MF) {
+ MF.setObserver(&Observer);
+}
+
+RAIIMFObserverInstaller::~RAIIMFObserverInstaller() { MF.setObserver(nullptr); }
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index 64023ecfad82e..0e9c6e4fab9f9 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -11,6 +11,7 @@
//
//===------------------
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -24,54 +25,50 @@ using namespace llvm;
char llvm::GISelKnownBitsAnalysis::ID = 0;
-INITIALIZE_PASS_BEGIN(GISelKnownBitsAnalysis, DEBUG_TYPE,
- "Analysis for ComputingKnownBits", false, true)
-INITIALIZE_PASS_END(GISelKnownBitsAnalysis, DEBUG_TYPE,
- "Analysis for ComputingKnownBits", false, true)
+INITIALIZE_PASS(GISelKnownBitsAnalysis, DEBUG_TYPE,
+ "Analysis for ComputingKnownBits", false, true)
-GISelKnownBits::GISelKnownBits(MachineFunction &MF)
+GISelKnownBits::GISelKnownBits(MachineFunction &MF, unsigned MaxDepth)
: MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
- DL(MF.getFunction().getParent()->getDataLayout()) {}
+ DL(MF.getFunction().getParent()->getDataLayout()), MaxDepth(MaxDepth) {}
-Align GISelKnownBits::inferAlignmentForFrameIdx(int FrameIdx, int Offset,
- const MachineFunction &MF) {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- return commonAlignment(Align(MFI.getObjectAlignment(FrameIdx)), Offset);
- // TODO: How to handle cases with Base + Offset?
-}
-
-MaybeAlign GISelKnownBits::inferPtrAlignment(const MachineInstr &MI) {
- if (MI.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
- int FrameIdx = MI.getOperand(1).getIndex();
- return inferAlignmentForFrameIdx(FrameIdx, 0, *MI.getMF());
+Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) {
+ const MachineInstr *MI = MRI.getVRegDef(R);
+ switch (MI->getOpcode()) {
+ case TargetOpcode::COPY:
+ return computeKnownAlignment(MI->getOperand(1).getReg(), Depth);
+ case TargetOpcode::G_FRAME_INDEX: {
+ int FrameIdx = MI->getOperand(1).getIndex();
+ return MF.getFrameInfo().getObjectAlign(FrameIdx);
+ }
+ case TargetOpcode::G_INTRINSIC:
+ case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
+ default:
+ return TL.computeKnownAlignForTargetInstr(*this, R, MRI, Depth + 1);
}
- return None;
-}
-
-void GISelKnownBits::computeKnownBitsForFrameIndex(Register R, KnownBits &Known,
- const APInt &DemandedElts,
- unsigned Depth) {
- const MachineInstr &MI = *MRI.getVRegDef(R);
- computeKnownBitsForAlignment(Known, inferPtrAlignment(MI));
-}
-
-void GISelKnownBits::computeKnownBitsForAlignment(KnownBits &Known,
- MaybeAlign Alignment) {
- if (Alignment)
- // The low bits are known zero if the pointer is aligned.
- Known.Zero.setLowBits(Log2(Alignment));
}
KnownBits GISelKnownBits::getKnownBits(MachineInstr &MI) {
+ assert(MI.getNumExplicitDefs() == 1 &&
+ "expected single return generic instruction");
return getKnownBits(MI.getOperand(0).getReg());
}
KnownBits GISelKnownBits::getKnownBits(Register R) {
- KnownBits Known;
- LLT Ty = MRI.getType(R);
+ const LLT Ty = MRI.getType(R);
APInt DemandedElts =
Ty.isVector() ? APInt::getAllOnesValue(Ty.getNumElements()) : APInt(1, 1);
+ return getKnownBits(R, DemandedElts);
+}
+
+KnownBits GISelKnownBits::getKnownBits(Register R, const APInt &DemandedElts,
+ unsigned Depth) {
+ // For now, we only maintain the cache during one request.
+ assert(ComputeKnownBitsCache.empty() && "Cache should have been cleared");
+
+ KnownBits Known;
computeKnownBitsImpl(R, Known, DemandedElts);
+ ComputeKnownBitsCache.clear();
return Known;
}
@@ -87,6 +84,17 @@ APInt GISelKnownBits::getKnownZeroes(Register R) {
APInt GISelKnownBits::getKnownOnes(Register R) { return getKnownBits(R).One; }
+LLVM_ATTRIBUTE_UNUSED static void
+dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) {
+ dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth
+ << "] Computed for: " << MI << "[" << Depth << "] Known: 0x"
+ << (Known.Zero | Known.One).toString(16, false) << "\n"
+ << "[" << Depth << "] Zero: 0x" << Known.Zero.toString(16, false)
+ << "\n"
+ << "[" << Depth << "] One: 0x" << Known.One.toString(16, false)
+ << "\n";
+}
+
void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
const APInt &DemandedElts,
unsigned Depth) {
@@ -104,12 +112,28 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
}
unsigned BitWidth = DstTy.getSizeInBits();
+ auto CacheEntry = ComputeKnownBitsCache.find(R);
+ if (CacheEntry != ComputeKnownBitsCache.end()) {
+ Known = CacheEntry->second;
+ LLVM_DEBUG(dbgs() << "Cache hit at ");
+ LLVM_DEBUG(dumpResult(MI, Known, Depth));
+ assert(Known.getBitWidth() == BitWidth && "Cache entry size doesn't match");
+ return;
+ }
Known = KnownBits(BitWidth); // Don't know anything
if (DstTy.isVector())
return; // TODO: Handle vectors.
- if (Depth == getMaxDepth())
+ // Depth may get bigger than max depth if it gets passed to a different
+ // GISelKnownBits object.
+ // This may happen when say a generic part uses a GISelKnownBits object
+ // with some max depth, but then we hit TL.computeKnownBitsForTargetInstr
+ // which creates a new GISelKnownBits object with a different and smaller
+ // depth. If we just check for equality, we would never exit if the depth
+ // that is passed down to the target specific GISelKnownBits object is
+ // already bigger than its max depth.
+ if (Depth >= getMaxDepth())
return;
if (!DemandedElts)
@@ -122,20 +146,53 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI,
Depth);
break;
- case TargetOpcode::COPY: {
- MachineOperand Dst = MI.getOperand(0);
- MachineOperand Src = MI.getOperand(1);
- // Look through trivial copies but don't look through trivial copies of the
- // form `%1:(s32) = OP %0:gpr32` known-bits analysis is currently unable to
- // determine the bit width of a register class.
- //
- // We can't use NoSubRegister by name as it's defined by each target but
- // it's always defined to be 0 by tablegen.
- if (Dst.getSubReg() == 0 /*NoSubRegister*/ && Src.getReg().isVirtual() &&
- Src.getSubReg() == 0 /*NoSubRegister*/ &&
- MRI.getType(Src.getReg()).isValid()) {
- // Don't increment Depth for this one since we didn't do any work.
- computeKnownBitsImpl(Src.getReg(), Known, DemandedElts, Depth);
+ case TargetOpcode::COPY:
+ case TargetOpcode::G_PHI:
+ case TargetOpcode::PHI: {
+ Known.One = APInt::getAllOnesValue(BitWidth);
+ Known.Zero = APInt::getAllOnesValue(BitWidth);
+ // Destination registers should not have subregisters at this
+ // point of the pipeline, otherwise the main live-range will be
+ // defined more than once, which is against SSA.
+ assert(MI.getOperand(0).getSubReg() == 0 && "Is this code in SSA?");
+ // Record in the cache that we know nothing for MI.
+ // This will get updated later and in the meantime, if we reach that
+ // phi again, because of a loop, we will cut the search thanks to this
+ // cache entry.
+ // We could actually build up more information on the phi by not cutting
+ // the search, but that additional information is more a side effect
+ // than an intended choice.
+ // Therefore, for now, save on compile time until we derive a proper way
+ // to derive known bits for PHIs within loops.
+ ComputeKnownBitsCache[R] = KnownBits(BitWidth);
+ // PHI's operand are a mix of registers and basic blocks interleaved.
+ // We only care about the register ones.
+ for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
+ const MachineOperand &Src = MI.getOperand(Idx);
+ Register SrcReg = Src.getReg();
+ // Look through trivial copies and phis but don't look through trivial
+ // copies or phis of the form `%1:(s32) = OP %0:gpr32`, known-bits
+ // analysis is currently unable to determine the bit width of a
+ // register class.
+ //
+ // We can't use NoSubRegister by name as it's defined by each target but
+ // it's always defined to be 0 by tablegen.
+ if (SrcReg.isVirtual() && Src.getSubReg() == 0 /*NoSubRegister*/ &&
+ MRI.getType(SrcReg).isValid()) {
+ // For COPYs we don't do anything, don't increase the depth.
+ computeKnownBitsImpl(SrcReg, Known2, DemandedElts,
+ Depth + (Opcode != TargetOpcode::COPY));
+ Known.One &= Known2.One;
+ Known.Zero &= Known2.Zero;
+ // If we reach a point where we don't know anything
+ // just stop looking through the operands.
+ if (Known.One == 0 && Known.Zero == 0)
+ break;
+ } else {
+ // We know nothing.
+ Known = KnownBits(BitWidth);
+ break;
+ }
}
break;
}
@@ -148,22 +205,17 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
break;
}
case TargetOpcode::G_FRAME_INDEX: {
- computeKnownBitsForFrameIndex(R, Known, DemandedElts);
+ int FrameIdx = MI.getOperand(1).getIndex();
+ TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
break;
}
case TargetOpcode::G_SUB: {
- // If low bits are known to be zero in both operands, then we know they are
- // going to be 0 in the result. Both addition and complement operations
- // preserve the low zero bits.
- computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
+ computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
Depth + 1);
- unsigned KnownZeroLow = Known2.countMinTrailingZeros();
- if (KnownZeroLow == 0)
- break;
computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts,
Depth + 1);
- KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
- Known.Zero.setLowBits(KnownZeroLow);
+ Known = KnownBits::computeForAddSub(/*Add*/ false, /*NSW*/ false, Known,
+ Known2);
break;
}
case TargetOpcode::G_XOR: {
@@ -172,11 +224,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
Depth + 1);
- // Output known-0 bits are known if clear or set in both the LHS & RHS.
- APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
- // Output known-1 are known to be set if set in only one of the LHS, RHS.
- Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
- Known.Zero = KnownZeroOut;
+ Known ^= Known2;
break;
}
case TargetOpcode::G_PTR_ADD: {
@@ -187,24 +235,12 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
LLVM_FALLTHROUGH;
}
case TargetOpcode::G_ADD: {
- // Output known-0 bits are known if clear or set in both the low clear bits
- // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
- // low 3 bits clear.
- // Output known-0 bits are also known if the top bits of each input are
- // known to be clear. For example, if one input has the top 10 bits clear
- // and the other has the top 8 bits clear, we know the top 7 bits of the
- // output must be clear.
- computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
+ computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
Depth + 1);
- unsigned KnownZeroHigh = Known2.countMinLeadingZeros();
- unsigned KnownZeroLow = Known2.countMinTrailingZeros();
computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts,
Depth + 1);
- KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros());
- KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
- Known.Zero.setLowBits(KnownZeroLow);
- if (KnownZeroHigh > 1)
- Known.Zero.setHighBits(KnownZeroHigh - 1);
+ Known =
+ KnownBits::computeForAddSub(/*Add*/ true, /*NSW*/ false, Known, Known2);
break;
}
case TargetOpcode::G_AND: {
@@ -214,10 +250,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
Depth + 1);
- // Output known-1 bits are only known if set in both the LHS & RHS.
- Known.One &= Known2.One;
- // Output known-0 are known to be clear if zero in either the LHS | RHS.
- Known.Zero |= Known2.Zero;
+ Known &= Known2;
break;
}
case TargetOpcode::G_OR: {
@@ -227,10 +260,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
Depth + 1);
- // Output known-0 bits are only known if clear in both the LHS & RHS.
- Known.Zero &= Known2.Zero;
- // Output known-1 are known to be set if set in either the LHS | RHS.
- Known.One |= Known2.One;
+ Known |= Known2;
break;
}
case TargetOpcode::G_MUL: {
@@ -287,7 +317,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
case TargetOpcode::G_ANYEXT: {
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
Depth + 1);
- Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
+ Known = Known.zext(BitWidth);
break;
}
case TargetOpcode::G_LOAD: {
@@ -353,9 +383,9 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
? DL.getIndexSizeInBits(SrcTy.getAddressSpace())
: SrcTy.getSizeInBits();
assert(SrcBitWidth && "SrcBitWidth can't be zero");
- Known = Known.zextOrTrunc(SrcBitWidth, true);
+ Known = Known.zextOrTrunc(SrcBitWidth);
computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1);
- Known = Known.zextOrTrunc(BitWidth, true);
+ Known = Known.zextOrTrunc(BitWidth);
if (BitWidth > SrcBitWidth)
Known.Zero.setBitsFrom(SrcBitWidth);
break;
@@ -363,14 +393,10 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
}
assert(!Known.hasConflict() && "Bits known to be one AND zero?");
- LLVM_DEBUG(dbgs() << "[" << Depth << "] Compute known bits: " << MI << "["
- << Depth << "] Computed for: " << MI << "[" << Depth
- << "] Known: 0x"
- << (Known.Zero | Known.One).toString(16, false) << "\n"
- << "[" << Depth << "] Zero: 0x"
- << Known.Zero.toString(16, false) << "\n"
- << "[" << Depth << "] One: 0x"
- << Known.One.toString(16, false) << "\n");
+ LLVM_DEBUG(dumpResult(MI, Known, Depth));
+
+ // Update the cache.
+ ComputeKnownBitsCache[R] = Known;
}
unsigned GISelKnownBits::computeNumSignBits(Register R,
@@ -389,6 +415,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
return 1; // No demanded elts, better to assume we don't know anything.
LLT DstTy = MRI.getType(R);
+ const unsigned TyBits = DstTy.getScalarSizeInBits();
// Handle the case where this is called on a register that does not have a
// type constraint. This is unlikely to occur except by looking through copies
@@ -397,6 +424,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
if (!DstTy.isValid())
return 1;
+ unsigned FirstAnswer = 1;
switch (Opcode) {
case TargetOpcode::COPY: {
MachineOperand &Src = MI.getOperand(1);
@@ -414,6 +442,16 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
unsigned Tmp = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits();
return computeNumSignBits(Src, DemandedElts, Depth + 1) + Tmp;
}
+ case TargetOpcode::G_SEXTLOAD: {
+ Register Dst = MI.getOperand(0).getReg();
+ LLT Ty = MRI.getType(Dst);
+ // TODO: add vector support
+ if (Ty.isVector())
+ break;
+ if (MI.hasOneMemOperand())
+ return Ty.getSizeInBits() - (*MI.memoperands_begin())->getSizeInBits();
+ break;
+ }
case TargetOpcode::G_TRUNC: {
Register Src = MI.getOperand(1).getReg();
LLT SrcTy = MRI.getType(Src);
@@ -426,13 +464,34 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
return NumSrcSignBits - (NumSrcBits - DstTyBits);
break;
}
- default:
+ case TargetOpcode::G_INTRINSIC:
+ case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
+ default: {
+ unsigned NumBits =
+ TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth);
+ if (NumBits > 1)
+ FirstAnswer = std::max(FirstAnswer, NumBits);
break;
}
+ }
+
+ // Finally, if we can prove that the top bits of the result are 0's or 1's,
+ // use this information.
+ KnownBits Known = getKnownBits(R, DemandedElts, Depth);
+ APInt Mask;
+ if (Known.isNonNegative()) { // sign bit is 0
+ Mask = Known.Zero;
+ } else if (Known.isNegative()) { // sign bit is 1;
+ Mask = Known.One;
+ } else {
+ // Nothing known.
+ return FirstAnswer;
+ }
- // TODO: Handle target instructions
- // TODO: Fall back to known bits
- return 1;
+ // Okay, we know that the sign bit in Mask is set. Use CLO to determine
+ // the number of identical bits in the top of the input value.
+ Mask <<= Mask.getBitWidth() - TyBits;
+ return std::max(FirstAnswer, Mask.countLeadingOnes());
}
unsigned GISelKnownBits::computeNumSignBits(Register R, unsigned Depth) {
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 17eca2b0301c5..8f6643b2f1935 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -16,12 +16,13 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/Analysis.h"
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -47,7 +48,6 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/InlineAsm.h"
-#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
@@ -232,46 +232,35 @@ int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
// Always allocate at least one byte.
Size = std::max<uint64_t>(Size, 1u);
- unsigned Alignment = AI.getAlignment();
- if (!Alignment)
- Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
-
int &FI = FrameIndices[&AI];
- FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
+ FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
return FI;
}
-unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
- unsigned Alignment = 0;
- Type *ValTy = nullptr;
- if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
- Alignment = SI->getAlignment();
- ValTy = SI->getValueOperand()->getType();
- } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
- Alignment = LI->getAlignment();
- ValTy = LI->getType();
- } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+Align IRTranslator::getMemOpAlign(const Instruction &I) {
+ if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
+ return SI->getAlign();
+ if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ return LI->getAlign();
+ }
+ if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
// TODO(PR27168): This instruction has no alignment attribute, but unlike
// the default alignment for load/store, the default here is to assume
// it has NATURAL alignment, not DataLayout-specified alignment.
const DataLayout &DL = AI->getModule()->getDataLayout();
- Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
- ValTy = AI->getCompareOperand()->getType();
- } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
+ return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType()));
+ }
+ if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
// TODO(PR27168): This instruction has no alignment attribute, but unlike
// the default alignment for load/store, the default here is to assume
// it has NATURAL alignment, not DataLayout-specified alignment.
const DataLayout &DL = AI->getModule()->getDataLayout();
- Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
- ValTy = AI->getType();
- } else {
- OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
- R << "unable to translate memop: " << ore::NV("Opcode", &I);
- reportTranslationError(*MF, *TPC, *ORE, R);
- return 1;
+ return Align(DL.getTypeStoreSize(AI->getValOperand()->getType()));
}
-
- return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
+ OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
+ R << "unable to translate memop: " << ore::NV("Opcode", &I);
+ reportTranslationError(*MF, *TPC, *ORE, R);
+ return Align(1);
}
MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
@@ -316,7 +305,7 @@ bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
// Negate the last operand of the FSUB
- MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
+ MIRBuilder.buildFNeg(Res, Op1, Flags);
return true;
}
return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
@@ -330,7 +319,7 @@ bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
const Instruction &I = cast<Instruction>(U);
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
- MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
+ MIRBuilder.buildFNeg(Res, Op0, Flags);
return true;
}
@@ -353,8 +342,8 @@ bool IRTranslator::translateCompare(const User &U,
Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
else {
assert(CI && "Instruction should be CmpInst");
- MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
- MachineInstr::copyFlagsFromInstruction(*CI));
+ MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
+ MachineInstr::copyFlagsFromInstruction(*CI));
}
return true;
@@ -603,7 +592,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
Cond =
MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
} else {
- const LLT &CmpTy = MRI->getType(CmpOpReg);
+ const LLT CmpTy = MRI->getType(CmpOpReg);
auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
auto Diff = MIB.buildConstant(CmpTy, High - Low);
Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
@@ -631,8 +620,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
if (CB.TrueBB == CB.ThisBB->getNextNode()) {
std::swap(CB.TrueBB, CB.FalseBB);
auto True = MIB.buildConstant(i1Ty, 1);
- Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
- .getReg(0);
+ Cond = MIB.buildXor(i1Ty, Cond, True).getReg(0);
}
MIB.buildBrCond(Cond, *CB.TrueBB);
@@ -842,9 +830,16 @@ bool IRTranslator::translateIndirectBr(const User &U,
MIRBuilder.buildBrIndirect(Tgt);
// Link successors.
+ SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
MachineBasicBlock &CurBB = MIRBuilder.getMBB();
- for (const BasicBlock *Succ : successors(&BrInst))
+ for (const BasicBlock *Succ : successors(&BrInst)) {
+ // It's legal for indirectbr instructions to have duplicate blocks in the
+ // destination list. We don't allow this in MIR. Skip anything that's
+ // already a successor.
+ if (!AddedSuccessors.insert(Succ).second)
+ continue;
CurBB.addSuccessor(&getMBB(*Succ));
+ }
return true;
}
@@ -859,11 +854,6 @@ static bool isSwiftError(const Value *V) {
bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
const LoadInst &LI = cast<LoadInst>(U);
-
- auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
- : MachineMemOperand::MONone;
- Flags |= MachineMemOperand::MOLoad;
-
if (DL->getTypeStoreSize(LI.getType()) == 0)
return true;
@@ -882,6 +872,9 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
return true;
}
+ auto &TLI = *MF->getSubtarget().getTargetLowering();
+ MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
+
const MDNode *Ranges =
Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
for (unsigned i = 0; i < Regs.size(); ++i) {
@@ -889,12 +882,12 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
- unsigned BaseAlign = getMemOpAlignment(LI);
+ Align BaseAlign = getMemOpAlign(LI);
AAMDNodes AAMetadata;
LI.getAAMetadata(AAMetadata);
auto MMO = MF->getMachineMemOperand(
- Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
- MinAlign(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
+ Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(),
+ commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
LI.getSyncScopeID(), LI.getOrdering());
MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
}
@@ -904,10 +897,6 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
const StoreInst &SI = cast<StoreInst>(U);
- auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
- : MachineMemOperand::MONone;
- Flags |= MachineMemOperand::MOStore;
-
if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
return true;
@@ -927,17 +916,20 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
return true;
}
+ auto &TLI = *MF->getSubtarget().getTargetLowering();
+ MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
+
for (unsigned i = 0; i < Vals.size(); ++i) {
Register Addr;
MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
- unsigned BaseAlign = getMemOpAlignment(SI);
+ Align BaseAlign = getMemOpAlign(SI);
AAMDNodes AAMetadata;
SI.getAAMetadata(AAMetadata);
auto MMO = MF->getMachineMemOperand(
- Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
- MinAlign(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
+ Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(),
+ commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
SI.getSyncScopeID(), SI.getOrdering());
MIRBuilder.buildStore(Vals[i], Addr, *MMO);
}
@@ -1010,36 +1002,39 @@ bool IRTranslator::translateSelect(const User &U,
ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
- const SelectInst &SI = cast<SelectInst>(U);
uint16_t Flags = 0;
- if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
- Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
+ if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
+ Flags = MachineInstr::copyFlagsFromInstruction(*SI);
for (unsigned i = 0; i < ResRegs.size(); ++i) {
- MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
- {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
+ MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
}
return true;
}
+bool IRTranslator::translateCopy(const User &U, const Value &V,
+ MachineIRBuilder &MIRBuilder) {
+ Register Src = getOrCreateVReg(V);
+ auto &Regs = *VMap.getVRegs(U);
+ if (Regs.empty()) {
+ Regs.push_back(Src);
+ VMap.getOffsets(U)->push_back(0);
+ } else {
+ // If we already assigned a vreg for this instruction, we can't change that.
+ // Emit a copy to satisfy the users we already emitted.
+ MIRBuilder.buildCopy(Regs[0], Src);
+ }
+ return true;
+}
+
bool IRTranslator::translateBitCast(const User &U,
MachineIRBuilder &MIRBuilder) {
// If we're bitcasting to the source type, we can reuse the source vreg.
if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
- getLLTForType(*U.getType(), *DL)) {
- Register SrcReg = getOrCreateVReg(*U.getOperand(0));
- auto &Regs = *VMap.getVRegs(U);
- // If we already assigned a vreg for this bitcast, we can't change that.
- // Emit a copy to satisfy the users we already emitted.
- if (!Regs.empty())
- MIRBuilder.buildCopy(Regs[0], SrcReg);
- else {
- Regs.push_back(SrcReg);
- VMap.getOffsets(U)->push_back(0);
- }
- return true;
- }
+ getLLTForType(*U.getType(), *DL))
+ return translateCopy(U, *U.getOperand(0), MIRBuilder);
+
return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
}
@@ -1053,10 +1048,6 @@ bool IRTranslator::translateCast(unsigned Opcode, const User &U,
bool IRTranslator::translateGetElementPtr(const User &U,
MachineIRBuilder &MIRBuilder) {
- // FIXME: support vector GEPs.
- if (U.getType()->isVectorTy())
- return false;
-
Value &Op0 = *U.getOperand(0);
Register BaseReg = getOrCreateVReg(Op0);
Type *PtrIRTy = Op0.getType();
@@ -1064,6 +1055,24 @@ bool IRTranslator::translateGetElementPtr(const User &U,
Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
+ // Normalize Vector GEP - all scalar operands should be converted to the
+ // splat vector.
+ unsigned VectorWidth = 0;
+ if (auto *VT = dyn_cast<VectorType>(U.getType()))
+ VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
+
+ // We might need to splat the base pointer into a vector if the offsets
+ // are vectors.
+ if (VectorWidth && !PtrTy.isVector()) {
+ BaseReg =
+ MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg)
+ .getReg(0);
+ PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
+ PtrTy = getLLTForType(*PtrIRTy, *DL);
+ OffsetIRTy = DL->getIntPtrType(PtrIRTy);
+ OffsetTy = getLLTForType(*OffsetIRTy, *DL);
+ }
+
int64_t Offset = 0;
for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
GTI != E; ++GTI) {
@@ -1083,7 +1092,6 @@ bool IRTranslator::translateGetElementPtr(const User &U,
}
if (Offset != 0) {
- LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
.getReg(0);
@@ -1091,8 +1099,15 @@ bool IRTranslator::translateGetElementPtr(const User &U,
}
Register IdxReg = getOrCreateVReg(*Idx);
- if (MRI->getType(IdxReg) != OffsetTy)
+ LLT IdxTy = MRI->getType(IdxReg);
+ if (IdxTy != OffsetTy) {
+ if (!IdxTy.isVector() && VectorWidth) {
+ IdxReg = MIRBuilder.buildSplatVector(
+ OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
+ }
+
IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
+ }
// N = N + Idx * ElementSize;
// Avoid doing it for ElementSize of 1.
@@ -1101,7 +1116,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
auto ElementSizeMIB = MIRBuilder.buildConstant(
getLLTForType(*OffsetIRTy, *DL), ElementSize);
GepOffsetReg =
- MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
+ MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
} else
GepOffsetReg = IdxReg;
@@ -1111,7 +1126,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
if (Offset != 0) {
auto OffsetMIB =
- MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
+ MIRBuilder.buildConstant(OffsetTy, Offset);
MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
return true;
}
@@ -1133,20 +1148,21 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
ICall.addUse(getOrCreateVReg(**AI));
- unsigned DstAlign = 0, SrcAlign = 0;
+ Align DstAlign;
+ Align SrcAlign;
unsigned IsVol =
cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
->getZExtValue();
if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
- DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1);
- SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1);
+ DstAlign = MCI->getDestAlign().valueOrOne();
+ SrcAlign = MCI->getSourceAlign().valueOrOne();
} else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
- DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1);
- SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1);
+ DstAlign = MMI->getDestAlign().valueOrOne();
+ SrcAlign = MMI->getSourceAlign().valueOrOne();
} else {
auto *MSI = cast<MemSetInst>(&CI);
- DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
+ DstAlign = MSI->getDestAlign().valueOrOne();
}
// We need to propagate the tail call flag from the IR inst as an argument.
@@ -1171,8 +1187,8 @@ void IRTranslator::getStackGuard(Register DstReg,
MachineIRBuilder &MIRBuilder) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
- auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
- MIB.addDef(DstReg);
+ auto MIB =
+ MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
auto &TLI = *MF->getSubtarget().getTargetLowering();
Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
@@ -1184,18 +1200,16 @@ void IRTranslator::getStackGuard(Register DstReg,
MachineMemOperand::MODereferenceable;
MachineMemOperand *MemRef =
MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
- DL->getPointerABIAlignment(0).value());
+ DL->getPointerABIAlignment(0));
MIB.setMemRefs({MemRef});
}
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
- MIRBuilder.buildInstr(Op)
- .addDef(ResRegs[0])
- .addDef(ResRegs[1])
- .addUse(getOrCreateVReg(*CI.getOperand(0)))
- .addUse(getOrCreateVReg(*CI.getOperand(1)));
+ MIRBuilder.buildInstr(
+ Op, {ResRegs[0], ResRegs[1]},
+ {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
return true;
}
@@ -1206,8 +1220,12 @@ unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
break;
case Intrinsic::bswap:
return TargetOpcode::G_BSWAP;
- case Intrinsic::bitreverse:
+ case Intrinsic::bitreverse:
return TargetOpcode::G_BITREVERSE;
+ case Intrinsic::fshl:
+ return TargetOpcode::G_FSHL;
+ case Intrinsic::fshr:
+ return TargetOpcode::G_FSHR;
case Intrinsic::ceil:
return TargetOpcode::G_FCEIL;
case Intrinsic::cos:
@@ -1258,6 +1276,8 @@ unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
return TargetOpcode::G_INTRINSIC_TRUNC;
case Intrinsic::readcyclecounter:
return TargetOpcode::G_READCYCLECOUNTER;
+ case Intrinsic::ptrmask:
+ return TargetOpcode::G_PTRMASK;
}
return Intrinsic::not_intrinsic;
}
@@ -1282,6 +1302,51 @@ bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
return true;
}
+// TODO: Include ConstainedOps.def when all strict instructions are defined.
+static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
+ switch (ID) {
+ case Intrinsic::experimental_constrained_fadd:
+ return TargetOpcode::G_STRICT_FADD;
+ case Intrinsic::experimental_constrained_fsub:
+ return TargetOpcode::G_STRICT_FSUB;
+ case Intrinsic::experimental_constrained_fmul:
+ return TargetOpcode::G_STRICT_FMUL;
+ case Intrinsic::experimental_constrained_fdiv:
+ return TargetOpcode::G_STRICT_FDIV;
+ case Intrinsic::experimental_constrained_frem:
+ return TargetOpcode::G_STRICT_FREM;
+ case Intrinsic::experimental_constrained_fma:
+ return TargetOpcode::G_STRICT_FMA;
+ case Intrinsic::experimental_constrained_sqrt:
+ return TargetOpcode::G_STRICT_FSQRT;
+ default:
+ return 0;
+ }
+}
+
+bool IRTranslator::translateConstrainedFPIntrinsic(
+ const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
+ fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
+
+ unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
+ if (!Opcode)
+ return false;
+
+ unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
+ if (EB == fp::ExceptionBehavior::ebIgnore)
+ Flags |= MachineInstr::NoFPExcept;
+
+ SmallVector<llvm::SrcOp, 4> VRegs;
+ VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
+ if (!FPI.isUnaryOp())
+ VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
+ if (FPI.isTernaryOp())
+ VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
+
+ MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
+ return true;
+}
+
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MachineIRBuilder &MIRBuilder) {
@@ -1369,10 +1434,10 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
// FIXME: Get alignment
- MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
- .addUse(getOrCreateVReg(*Ptr))
- .addMemOperand(MF->getMachineMemOperand(
- MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
+ MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
+ .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
+ MachineMemOperand::MOStore,
+ ListSize, Align(1)));
return true;
}
case Intrinsic::dbg_value: {
@@ -1385,7 +1450,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- MIRBuilder.buildDirectDbgValue(0, DI.getVariable(), DI.getExpression());
+ MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
} else if (const auto *CI = dyn_cast<Constant>(V)) {
MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
} else {
@@ -1411,6 +1476,14 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
case Intrinsic::smul_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
+ case Intrinsic::uadd_sat:
+ return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
+ case Intrinsic::sadd_sat:
+ return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
+ case Intrinsic::usub_sat:
+ return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
+ case Intrinsic::ssub_sat:
+ return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
case Intrinsic::fmuladd: {
const TargetMachine &TM = MF->getTarget();
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
@@ -1423,14 +1496,14 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
TLI.getValueType(*DL, CI.getType()))) {
// TODO: Revisit this to see if we should move this part of the
// lowering to the combiner.
- MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
- MachineInstr::copyFlagsFromInstruction(CI));
+ MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
+ MachineInstr::copyFlagsFromInstruction(CI));
} else {
LLT Ty = getLLTForType(*CI.getType(), *DL);
- auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
- MachineInstr::copyFlagsFromInstruction(CI));
- MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
- MachineInstr::copyFlagsFromInstruction(CI));
+ auto FMul = MIRBuilder.buildFMul(
+ Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
+ MIRBuilder.buildFAdd(Dst, FMul, Op2,
+ MachineInstr::copyFlagsFromInstruction(CI));
}
return true;
}
@@ -1468,7 +1541,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
*MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile,
- PtrTy.getSizeInBits() / 8, 8));
+ PtrTy.getSizeInBits() / 8, Align(8)));
return true;
}
case Intrinsic::stacksave: {
@@ -1508,9 +1581,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
: TargetOpcode::G_CTTZ_ZERO_UNDEF
: Cst->isZero() ? TargetOpcode::G_CTLZ
: TargetOpcode::G_CTLZ_ZERO_UNDEF;
- MIRBuilder.buildInstr(Opcode)
- .addDef(getOrCreateVReg(CI))
- .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
+ {getOrCreateVReg(*CI.getArgOperand(0))});
return true;
}
case Intrinsic::invariant_start: {
@@ -1526,54 +1598,63 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
case Intrinsic::sideeffect:
// Discard annotate attributes, assumptions, and artificial side-effects.
return true;
+ case Intrinsic::read_volatile_register:
case Intrinsic::read_register: {
Value *Arg = CI.getArgOperand(0);
- MIRBuilder.buildInstr(TargetOpcode::G_READ_REGISTER)
- .addDef(getOrCreateVReg(CI))
- .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
+ MIRBuilder
+ .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
+ .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
+ return true;
+ }
+ case Intrinsic::write_register: {
+ Value *Arg = CI.getArgOperand(0);
+ MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
+ .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
+ .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
return true;
}
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC:
+#include "llvm/IR/ConstrainedOps.def"
+ return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
+ MIRBuilder);
+
}
return false;
}
-bool IRTranslator::translateInlineAsm(const CallInst &CI,
+bool IRTranslator::translateInlineAsm(const CallBase &CB,
MachineIRBuilder &MIRBuilder) {
- const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
- if (!IA.getConstraintString().empty())
- return false;
- unsigned ExtraInfo = 0;
- if (IA.hasSideEffects())
- ExtraInfo |= InlineAsm::Extra_HasSideEffects;
- if (IA.getDialect() == InlineAsm::AD_Intel)
- ExtraInfo |= InlineAsm::Extra_AsmDialect;
+ const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
- MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
- .addExternalSymbol(IA.getAsmString().c_str())
- .addImm(ExtraInfo);
+ if (!ALI) {
+ LLVM_DEBUG(
+ dbgs() << "Inline asm lowering is not supported for this target yet\n");
+ return false;
+ }
- return true;
+ return ALI->lowerInlineAsm(
+ MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
}
-bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
+bool IRTranslator::translateCallBase(const CallBase &CB,
MachineIRBuilder &MIRBuilder) {
- const Instruction &I = *CS.getInstruction();
- ArrayRef<Register> Res = getOrCreateVRegs(I);
+ ArrayRef<Register> Res = getOrCreateVRegs(CB);
SmallVector<ArrayRef<Register>, 8> Args;
Register SwiftInVReg = 0;
Register SwiftErrorVReg = 0;
- for (auto &Arg : CS.args()) {
+ for (auto &Arg : CB.args()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
assert(SwiftInVReg == 0 && "Expected only one swift error argument");
LLT Ty = getLLTForType(*Arg->getType(), *DL);
SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
- &I, &MIRBuilder.getMBB(), Arg));
+ &CB, &MIRBuilder.getMBB(), Arg));
Args.emplace_back(makeArrayRef(SwiftInVReg));
SwiftErrorVReg =
- SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
+ SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
continue;
}
Args.push_back(getOrCreateVRegs(*Arg));
@@ -1583,8 +1664,8 @@ bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
// optimize into tail calls. Instead, we defer that to selection where a final
// scan is done to check if any instructions are calls.
bool Success =
- CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
- [&]() { return getOrCreateVReg(*CS.getCalledValue()); });
+ CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
+ [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
// Check if we just inserted a tail call.
if (Success) {
@@ -1622,7 +1703,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
}
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
- return translateCallSite(&CI, MIRBuilder);
+ return translateCallBase(CI, MIRBuilder);
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
@@ -1670,14 +1751,12 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
TargetLowering::IntrinsicInfo Info;
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
- MaybeAlign Align = Info.align;
- if (!Align)
- Align = MaybeAlign(
- DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())));
+ Align Alignment = Info.align.getValueOr(
+ DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
uint64_t Size = Info.memVT.getStoreSize();
- MIB.addMemOperand(MF->getMachineMemOperand(
- MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value()));
+ MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
+ Info.flags, Size, Alignment));
}
return true;
@@ -1691,9 +1770,8 @@ bool IRTranslator::translateInvoke(const User &U,
const BasicBlock *ReturnBB = I.getSuccessor(0);
const BasicBlock *EHPadBB = I.getSuccessor(1);
- const Value *Callee = I.getCalledValue();
- const Function *Fn = dyn_cast<Function>(Callee);
- if (isa<InlineAsm>(Callee))
+ const Function *Fn = I.getCalledFunction();
+ if (I.isInlineAsm())
return false;
// FIXME: support invoking patchpoint and statepoint intrinsics.
@@ -1717,7 +1795,7 @@ bool IRTranslator::translateInvoke(const User &U,
MCSymbol *BeginSymbol = Context.createTempSymbol();
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
- if (!translateCallSite(&I, MIRBuilder))
+ if (!translateCallBase(I, MIRBuilder))
return false;
MCSymbol *EndSymbol = Context.createTempSymbol();
@@ -1817,12 +1895,7 @@ bool IRTranslator::translateAlloca(const User &U,
return false;
// Now we're in the harder dynamic case.
- Type *Ty = AI.getAllocatedType();
- unsigned Align =
- std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
-
Register NumElts = getOrCreateVReg(*AI.getArraySize());
-
Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
if (MRI->getType(NumElts) != IntPtrTy) {
@@ -1831,29 +1904,30 @@ bool IRTranslator::translateAlloca(const User &U,
NumElts = ExtElts;
}
+ Type *Ty = AI.getAllocatedType();
+
Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
Register TySize =
getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
- unsigned StackAlign =
- MF->getSubtarget().getFrameLowering()->getStackAlignment();
- if (Align <= StackAlign)
- Align = 0;
-
// Round the size of the allocation up to the stack alignment size
// by add SA-1 to the size. This doesn't overflow because we're computing
// an address inside an alloca.
- auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1);
+ Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
+ auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
MachineInstr::NoUWrap);
auto AlignCst =
- MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1));
+ MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
- MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align);
+ Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
+ if (Alignment <= StackAlign)
+ Alignment = Align(1);
+ MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
- MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
+ MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
assert(MF->getFrameInfo().hasVarSizedObjects());
return true;
}
@@ -1863,10 +1937,9 @@ bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
// we're completely discarding the i64/double distinction here (amongst
// others). Fortunately the ABIs I know of where that matters don't use va_arg
// anyway but that's not guaranteed.
- MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
- .addDef(getOrCreateVReg(U))
- .addUse(getOrCreateVReg(*U.getOperand(0)))
- .addImm(DL->getABITypeAlignment(U.getType()));
+ MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
+ {getOrCreateVReg(*U.getOperand(0)),
+ DL->getABITypeAlign(U.getType()).value()});
return true;
}
@@ -1874,17 +1947,8 @@ bool IRTranslator::translateInsertElement(const User &U,
MachineIRBuilder &MIRBuilder) {
// If it is a <1 x Ty> vector, use the scalar as it is
// not a legal vector type in LLT.
- if (U.getType()->getVectorNumElements() == 1) {
- Register Elt = getOrCreateVReg(*U.getOperand(1));
- auto &Regs = *VMap.getVRegs(U);
- if (Regs.empty()) {
- Regs.push_back(Elt);
- VMap.getOffsets(U)->push_back(0);
- } else {
- MIRBuilder.buildCopy(Regs[0], Elt);
- }
- return true;
- }
+ if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
+ return translateCopy(U, *U.getOperand(1), MIRBuilder);
Register Res = getOrCreateVReg(U);
Register Val = getOrCreateVReg(*U.getOperand(0));
@@ -1898,17 +1962,9 @@ bool IRTranslator::translateExtractElement(const User &U,
MachineIRBuilder &MIRBuilder) {
// If it is a <1 x Ty> vector, use the scalar as it is
// not a legal vector type in LLT.
- if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
- Register Elt = getOrCreateVReg(*U.getOperand(0));
- auto &Regs = *VMap.getVRegs(U);
- if (Regs.empty()) {
- Regs.push_back(Elt);
- VMap.getOffsets(U)->push_back(0);
- } else {
- MIRBuilder.buildCopy(Regs[0], Elt);
- }
- return true;
- }
+ if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
+ return translateCopy(U, *U.getOperand(0), MIRBuilder);
+
Register Res = getOrCreateVReg(U);
Register Val = getOrCreateVReg(*U.getOperand(0));
const auto &TLI = *MF->getSubtarget().getTargetLowering();
@@ -1924,8 +1980,8 @@ bool IRTranslator::translateExtractElement(const User &U,
if (!Idx)
Idx = getOrCreateVReg(*U.getOperand(1));
if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
- const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
- Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
+ const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
+ Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
}
MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
return true;
@@ -1933,13 +1989,16 @@ bool IRTranslator::translateExtractElement(const User &U,
bool IRTranslator::translateShuffleVector(const User &U,
MachineIRBuilder &MIRBuilder) {
- SmallVector<int, 8> Mask;
- ShuffleVectorInst::getShuffleMask(cast<Constant>(U.getOperand(2)), Mask);
+ ArrayRef<int> Mask;
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
+ Mask = SVI->getShuffleMask();
+ else
+ Mask = cast<ConstantExpr>(U).getShuffleMask();
ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
- MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
- .addDef(getOrCreateVReg(U))
- .addUse(getOrCreateVReg(*U.getOperand(0)))
- .addUse(getOrCreateVReg(*U.getOperand(1)))
+ MIRBuilder
+ .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
+ {getOrCreateVReg(*U.getOperand(0)),
+ getOrCreateVReg(*U.getOperand(1))})
.addShuffleMask(MaskAlloc);
return true;
}
@@ -1961,12 +2020,8 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
MachineIRBuilder &MIRBuilder) {
const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
- if (I.isWeak())
- return false;
-
- auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
- : MachineMemOperand::MONone;
- Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ auto &TLI = *MF->getSubtarget().getTargetLowering();
+ auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
Type *ResType = I.getType();
Type *ValType = ResType->Type::getStructElementType(0);
@@ -1983,21 +2038,18 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
MIRBuilder.buildAtomicCmpXchgWithSuccess(
OldValRes, SuccessRes, Addr, Cmp, NewVal,
- *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
- Flags, DL->getTypeStoreSize(ValType),
- getMemOpAlignment(I), AAMetadata, nullptr,
- I.getSyncScopeID(), I.getSuccessOrdering(),
- I.getFailureOrdering()));
+ *MF->getMachineMemOperand(
+ MachinePointerInfo(I.getPointerOperand()), Flags,
+ DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr,
+ I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering()));
return true;
}
bool IRTranslator::translateAtomicRMW(const User &U,
MachineIRBuilder &MIRBuilder) {
const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
-
- auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
- : MachineMemOperand::MONone;
- Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+ auto &TLI = *MF->getSubtarget().getTargetLowering();
+ auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
Type *ResType = I.getType();
@@ -2057,8 +2109,8 @@ bool IRTranslator::translateAtomicRMW(const User &U,
Opcode, Res, Addr, Val,
*MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
Flags, DL->getTypeStoreSize(ResType),
- getMemOpAlignment(I), AAMetadata,
- nullptr, I.getSyncScopeID(), I.getOrdering()));
+ getMemOpAlign(I), AAMetadata, nullptr,
+ I.getSyncScopeID(), I.getOrdering()));
return true;
}
@@ -2070,6 +2122,21 @@ bool IRTranslator::translateFence(const User &U,
return true;
}
+bool IRTranslator::translateFreeze(const User &U,
+ MachineIRBuilder &MIRBuilder) {
+ const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
+ const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
+
+ assert(DstRegs.size() == SrcRegs.size() &&
+ "Freeze with different source and destination type?");
+
+ for (unsigned I = 0; I < DstRegs.size(); ++I) {
+ MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
+ }
+
+ return true;
+}
+
void IRTranslator::finishPendingPhis() {
#ifndef NDEBUG
DILocationVerifier Verifier;
@@ -2122,6 +2189,10 @@ bool IRTranslator::translate(const Instruction &Inst) {
else
EntryBuilder->setDebugLoc(DebugLoc());
+ auto &TLI = *MF->getSubtarget().getTargetLowering();
+ if (TLI.fallBackToDAGISel(Inst))
+ return false;
+
switch (Inst.getOpcode()) {
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE: \
@@ -2139,22 +2210,16 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
EntryBuilder->buildFConstant(Reg, *CF);
else if (isa<UndefValue>(C))
EntryBuilder->buildUndef(Reg);
- else if (isa<ConstantPointerNull>(C)) {
- // As we are trying to build a constant val of 0 into a pointer,
- // insert a cast to make them correct with respect to types.
- unsigned NullSize = DL->getTypeSizeInBits(C.getType());
- auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
- auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
- Register ZeroReg = getOrCreateVReg(*ZeroVal);
- EntryBuilder->buildCast(Reg, ZeroReg);
- } else if (auto GV = dyn_cast<GlobalValue>(&C))
+ else if (isa<ConstantPointerNull>(C))
+ EntryBuilder->buildConstant(Reg, 0);
+ else if (auto GV = dyn_cast<GlobalValue>(&C))
EntryBuilder->buildGlobalValue(Reg, GV);
else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
if (!CAZ->getType()->isVectorTy())
return false;
// Return the scalar if it is a <1 x Ty> vector.
if (CAZ->getNumElements() == 1)
- return translate(*CAZ->getElementValue(0u), Reg);
+ return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get());
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
Constant &Elt = *CAZ->getElementValue(i);
@@ -2164,7 +2229,8 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
} else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
// Return the scalar if it is a <1 x Ty> vector.
if (CV->getNumElements() == 1)
- return translate(*CV->getElementAsConstant(0), Reg);
+ return translateCopy(C, *CV->getElementAsConstant(0),
+ *EntryBuilder.get());
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumElements(); ++i) {
Constant &Elt = *CV->getElementAsConstant(i);
@@ -2182,7 +2248,7 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
}
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
- return translate(*CV->getOperand(0), Reg);
+ return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get());
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
@@ -2319,10 +2385,18 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
// Make our arguments/constants entry block fallthrough to the IR entry block.
EntryBB->addSuccessor(&getMBB(F.front()));
+ if (CLI->fallBackToDAGISel(F)) {
+ OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
+ F.getSubprogram(), &F.getEntryBlock());
+ R << "unable to lower function: " << ore::NV("Prototype", F.getType());
+ reportTranslationError(*MF, *TPC, *ORE, R);
+ return false;
+ }
+
// Lower the actual args into this basic block.
SmallVector<ArrayRef<Register>, 8> VRegArgs;
for (const Argument &Arg: F.args()) {
- if (DL->getTypeStoreSize(Arg.getType()) == 0)
+ if (DL->getTypeStoreSize(Arg.getType()).isZero())
continue; // Don't handle zero sized types.
ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
VRegArgs.push_back(VRegs);
@@ -2352,6 +2426,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
WrapperObserver.addObserver(&Verifier);
#endif // ifndef NDEBUG
RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
+ RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
for (const BasicBlock *BB : RPOT) {
MachineBasicBlock &MBB = getMBB(*BB);
// Set the insertion point of all the following translations to
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
new file mode 100644
index 0000000000000..2ce1d414e7550
--- /dev/null
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -0,0 +1,667 @@
+//===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+
+#define DEBUG_TYPE "inline-asm-lowering"
+
+using namespace llvm;
+
+void InlineAsmLowering::anchor() {}
+
+namespace {
+
+/// GISelAsmOperandInfo - This contains information for each constraint that we
+/// are lowering.
+class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
+public:
+ /// Regs - If this is a register or register class operand, this
+ /// contains the set of assigned registers corresponding to the operand.
+ SmallVector<Register, 1> Regs;
+
+ explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
+ : TargetLowering::AsmOperandInfo(Info) {}
+};
+
+using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
+
+class ExtraFlags {
+ unsigned Flags = 0;
+
+public:
+ explicit ExtraFlags(const CallBase &CB) {
+ const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
+ if (IA->hasSideEffects())
+ Flags |= InlineAsm::Extra_HasSideEffects;
+ if (IA->isAlignStack())
+ Flags |= InlineAsm::Extra_IsAlignStack;
+ if (CB.isConvergent())
+ Flags |= InlineAsm::Extra_IsConvergent;
+ Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
+ }
+
+ void update(const TargetLowering::AsmOperandInfo &OpInfo) {
+ // Ideally, we would only check against memory constraints. However, the
+ // meaning of an Other constraint can be target-specific and we can't easily
+ // reason about it. Therefore, be conservative and set MayLoad/MayStore
+ // for Other constraints as well.
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
+ OpInfo.ConstraintType == TargetLowering::C_Other) {
+ if (OpInfo.Type == InlineAsm::isInput)
+ Flags |= InlineAsm::Extra_MayLoad;
+ else if (OpInfo.Type == InlineAsm::isOutput)
+ Flags |= InlineAsm::Extra_MayStore;
+ else if (OpInfo.Type == InlineAsm::isClobber)
+ Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
+ }
+ }
+
+ unsigned get() const { return Flags; }
+};
+
+} // namespace
+
+/// Assign virtual/physical registers for the specified register operand.
+static void getRegistersForValue(MachineFunction &MF,
+ MachineIRBuilder &MIRBuilder,
+ GISelAsmOperandInfo &OpInfo,
+ GISelAsmOperandInfo &RefOpInfo) {
+
+ const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+
+ // No work to do for memory operations.
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory)
+ return;
+
+ // If this is a constraint for a single physreg, or a constraint for a
+ // register class, find it.
+ Register AssignedReg;
+ const TargetRegisterClass *RC;
+ std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
+ &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
+ // RC is unset only on failure. Return immediately.
+ if (!RC)
+ return;
+
+ // No need to allocate a matching input constraint since the constraint it's
+ // matching to has already been allocated.
+ if (OpInfo.isMatchingInputConstraint())
+ return;
+
+ // Initialize NumRegs.
+ unsigned NumRegs = 1;
+ if (OpInfo.ConstraintVT != MVT::Other)
+ NumRegs =
+ TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
+
+ // If this is a constraint for a specific physical register, but the type of
+ // the operand requires more than one register to be passed, we allocate the
+ // required amount of physical registers, starting from the selected physical
+ // register.
+ // For this, first retrieve a register iterator for the given register class
+ TargetRegisterClass::iterator I = RC->begin();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+
+ // Advance the iterator to the assigned register (if set)
+ if (AssignedReg) {
+ for (; *I != AssignedReg; ++I)
+ assert(I != RC->end() && "AssignedReg should be a member of provided RC");
+ }
+
+ // Finally, assign the registers. If the AssignedReg isn't set, create virtual
+ // registers with the provided register class
+ for (; NumRegs; --NumRegs, ++I) {
+ assert(I != RC->end() && "Ran out of registers to allocate!");
+ Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
+ OpInfo.Regs.push_back(R);
+ }
+}
+
+/// Return an integer indicating how general CT is.
+static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
+ switch (CT) {
+ case TargetLowering::C_Immediate:
+ case TargetLowering::C_Other:
+ case TargetLowering::C_Unknown:
+ return 0;
+ case TargetLowering::C_Register:
+ return 1;
+ case TargetLowering::C_RegisterClass:
+ return 2;
+ case TargetLowering::C_Memory:
+ return 3;
+ }
+ llvm_unreachable("Invalid constraint type");
+}
+
+static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
+ const TargetLowering *TLI) {
+ assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
+ unsigned BestIdx = 0;
+ TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
+ int BestGenerality = -1;
+
+ // Loop over the options, keeping track of the most general one.
+ for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
+ TargetLowering::ConstraintType CType =
+ TLI->getConstraintType(OpInfo.Codes[i]);
+
+ // Indirect 'other' or 'immediate' constraints are not allowed.
+ if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
+ CType == TargetLowering::C_Register ||
+ CType == TargetLowering::C_RegisterClass))
+ continue;
+
+ // If this is an 'other' or 'immediate' constraint, see if the operand is
+ // valid for it. For example, on X86 we might have an 'rI' constraint. If
+ // the operand is an integer in the range [0..31] we want to use I (saving a
+ // load of a register), otherwise we must use 'r'.
+ if (CType == TargetLowering::C_Other ||
+ CType == TargetLowering::C_Immediate) {
+ assert(OpInfo.Codes[i].size() == 1 &&
+ "Unhandled multi-letter 'other' constraint");
+ // FIXME: prefer immediate constraints if the target allows it
+ }
+
+ // Things with matching constraints can only be registers, per gcc
+ // documentation. This mainly affects "g" constraints.
+ if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
+ continue;
+
+ // This constraint letter is more general than the previous one, use it.
+ int Generality = getConstraintGenerality(CType);
+ if (Generality > BestGenerality) {
+ BestType = CType;
+ BestIdx = i;
+ BestGenerality = Generality;
+ }
+ }
+
+ OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
+ OpInfo.ConstraintType = BestType;
+}
+
+static void computeConstraintToUse(const TargetLowering *TLI,
+ TargetLowering::AsmOperandInfo &OpInfo) {
+ assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
+
+ // Single-letter constraints ('r') are very common.
+ if (OpInfo.Codes.size() == 1) {
+ OpInfo.ConstraintCode = OpInfo.Codes[0];
+ OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
+ } else {
+ chooseConstraint(OpInfo, TLI);
+ }
+
+ // 'X' matches anything.
+ if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
+ // Labels and constants are handled elsewhere ('X' is the only thing
+ // that matches labels). For Functions, the type here is the type of
+ // the result, which is not what we want to look at; leave them alone.
+ Value *Val = OpInfo.CallOperandVal;
+ if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
+ return;
+
+ // Otherwise, try to resolve it to something we know about by looking at
+ // the actual operand type.
+ if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
+ OpInfo.ConstraintCode = Repl;
+ OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
+ }
+ }
+}
+
+static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
+ unsigned Flag = I.getOperand(OpIdx).getImm();
+ return InlineAsm::getNumOperandRegisters(Flag);
+}
+
+static bool buildAnyextOrCopy(Register Dst, Register Src,
+ MachineIRBuilder &MIRBuilder) {
+ const TargetRegisterInfo *TRI =
+ MIRBuilder.getMF().getSubtarget().getRegisterInfo();
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+
+ auto SrcTy = MRI->getType(Src);
+ if (!SrcTy.isValid()) {
+ LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
+ return false;
+ }
+ unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI);
+ unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI);
+
+ if (DstSize < SrcSize) {
+ LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
+ return false;
+ }
+
+ // Attempt to anyext small scalar sources.
+ if (DstSize > SrcSize) {
+ if (!SrcTy.isScalar()) {
+ LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
+ "destination register class\n");
+ return false;
+ }
+ Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0);
+ }
+
+ MIRBuilder.buildCopy(Dst, Src);
+ return true;
+}
+
+bool InlineAsmLowering::lowerInlineAsm(
+ MachineIRBuilder &MIRBuilder, const CallBase &Call,
+ std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
+ const {
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
+
+ /// ConstraintOperands - Information about all of the constraints.
+ GISelAsmOperandInfoVector ConstraintOperands;
+
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = MF.getFunction();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+
+ TargetLowering::AsmOperandInfoVector TargetConstraints =
+ TLI->ParseConstraints(DL, TRI, Call);
+
+ ExtraFlags ExtraInfo(Call);
+ unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
+ unsigned ResNo = 0; // ResNo - The result number of the next output.
+ for (auto &T : TargetConstraints) {
+ ConstraintOperands.push_back(GISelAsmOperandInfo(T));
+ GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
+
+ // Compute the value type for each operand.
+ if (OpInfo.Type == InlineAsm::isInput ||
+ (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
+
+ OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo++));
+
+ if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
+ LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
+ return false;
+ }
+
+ Type *OpTy = OpInfo.CallOperandVal->getType();
+
+ // If this is an indirect operand, the operand is a pointer to the
+ // accessed type.
+ if (OpInfo.isIndirect) {
+ PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
+ if (!PtrTy)
+ report_fatal_error("Indirect operand for inline asm not a pointer!");
+ OpTy = PtrTy->getElementType();
+ }
+
+ // FIXME: Support aggregate input operands
+ if (!OpTy->isSingleValueType()) {
+ LLVM_DEBUG(
+ dbgs() << "Aggregate input operands are not supported yet\n");
+ return false;
+ }
+
+ OpInfo.ConstraintVT = TLI->getValueType(DL, OpTy, true).getSimpleVT();
+
+ } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
+ assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
+ if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
+ OpInfo.ConstraintVT =
+ TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
+ } else {
+ assert(ResNo == 0 && "Asm only has one result!");
+ OpInfo.ConstraintVT = TLI->getSimpleValueType(DL, Call.getType());
+ }
+ ++ResNo;
+ } else {
+ OpInfo.ConstraintVT = MVT::Other;
+ }
+
+ // Compute the constraint code and ConstraintType to use.
+ computeConstraintToUse(TLI, OpInfo);
+
+ // The selected constraint type might expose new sideeffects
+ ExtraInfo.update(OpInfo);
+ }
+
+ // At this point, all operand types are decided.
+ // Create the MachineInstr, but don't insert it yet since input
+ // operands still need to insert instructions before this one
+ auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
+ .addExternalSymbol(IA->getAsmString().c_str())
+ .addImm(ExtraInfo.get());
+
+ // Starting from this operand: flag followed by register(s) will be added as
+ // operands to Inst for each constraint. Used for matching input constraints.
+ unsigned StartIdx = Inst->getNumOperands();
+
+ // Collects the output operands for later processing
+ GISelAsmOperandInfoVector OutputOperands;
+
+ for (auto &OpInfo : ConstraintOperands) {
+ GISelAsmOperandInfo &RefOpInfo =
+ OpInfo.isMatchingInputConstraint()
+ ? ConstraintOperands[OpInfo.getMatchedOperand()]
+ : OpInfo;
+
+ // Assign registers for register operands
+ getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
+
+ switch (OpInfo.Type) {
+ case InlineAsm::isOutput:
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
+ unsigned ConstraintID =
+ TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
+ assert(ConstraintID != InlineAsm::Constraint_Unknown &&
+ "Failed to convert memory constraint code to constraint id.");
+
+ // Add information to the INLINEASM instruction to know about this
+ // output.
+ unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
+ OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
+ Inst.addImm(OpFlags);
+ ArrayRef<Register> SourceRegs =
+ GetOrCreateVRegs(*OpInfo.CallOperandVal);
+ assert(
+ SourceRegs.size() == 1 &&
+ "Expected the memory output to fit into a single virtual register");
+ Inst.addReg(SourceRegs[0]);
+ } else {
+ // Otherwise, this outputs to a register (directly for C_Register /
+ // C_RegisterClass. Find a register that we can use.
+ assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
+ OpInfo.ConstraintType == TargetLowering::C_RegisterClass);
+
+ if (OpInfo.Regs.empty()) {
+ LLVM_DEBUG(dbgs()
+ << "Couldn't allocate output register for constraint\n");
+ return false;
+ }
+
+ // Add information to the INLINEASM instruction to know that this
+ // register is set.
+ unsigned Flag = InlineAsm::getFlagWord(
+ OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
+ : InlineAsm::Kind_RegDef,
+ OpInfo.Regs.size());
+ if (OpInfo.Regs.front().isVirtual()) {
+ // Put the register class of the virtual registers in the flag word.
+ // That way, later passes can recompute register class constraints for
+ // inline assembly as well as normal instructions. Don't do this for
+ // tied operands that can use the regclass information from the def.
+ const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
+ Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
+ }
+
+ Inst.addImm(Flag);
+
+ for (Register Reg : OpInfo.Regs) {
+ Inst.addReg(Reg,
+ RegState::Define | getImplRegState(Reg.isPhysical()) |
+ (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
+ }
+
+ // Remember this output operand for later processing
+ OutputOperands.push_back(OpInfo);
+ }
+
+ break;
+ case InlineAsm::isInput: {
+ if (OpInfo.isMatchingInputConstraint()) {
+ unsigned DefIdx = OpInfo.getMatchedOperand();
+ // Find operand with register def that corresponds to DefIdx.
+ unsigned InstFlagIdx = StartIdx;
+ for (unsigned i = 0; i < DefIdx; ++i)
+ InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1;
+ assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
+
+ unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm();
+ if (InlineAsm::isMemKind(MatchedOperandFlag)) {
+ LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
+ "supported. This should be target specific.\n");
+ return false;
+ }
+ if (!InlineAsm::isRegDefKind(MatchedOperandFlag) &&
+ !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) {
+ LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
+ return false;
+ }
+
+ // We want to tie input to register in next operand.
+ unsigned DefRegIdx = InstFlagIdx + 1;
+ Register Def = Inst->getOperand(DefRegIdx).getReg();
+
+ // Copy input to new vreg with same reg class as Def
+ const TargetRegisterClass *RC = MRI->getRegClass(Def);
+ ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
+ assert(SrcRegs.size() == 1 && "Single register is expected here");
+ Register Tmp = MRI->createVirtualRegister(RC);
+ if (!buildAnyextOrCopy(Tmp, SrcRegs[0], MIRBuilder))
+ return false;
+
+ // Add Flag and input register operand (Tmp) to Inst. Tie Tmp to Def.
+ unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
+ unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx);
+ Inst.addImm(Flag);
+ Inst.addReg(Tmp);
+ Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
+ break;
+ }
+
+ if (OpInfo.ConstraintType == TargetLowering::C_Other &&
+ OpInfo.isIndirect) {
+ LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
+ "not supported yet\n");
+ return false;
+ }
+
+ if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
+ OpInfo.ConstraintType == TargetLowering::C_Other) {
+
+ std::vector<MachineOperand> Ops;
+ if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
+ OpInfo.ConstraintCode, Ops,
+ MIRBuilder)) {
+ LLVM_DEBUG(dbgs() << "Don't support constraint: "
+ << OpInfo.ConstraintCode << " yet\n");
+ return false;
+ }
+
+ assert(Ops.size() > 0 &&
+ "Expected constraint to be lowered to at least one operand");
+
+ // Add information to the INLINEASM node to know about this input.
+ unsigned OpFlags =
+ InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
+ Inst.addImm(OpFlags);
+ Inst.add(Ops);
+ break;
+ }
+
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
+
+ if (!OpInfo.isIndirect) {
+ LLVM_DEBUG(dbgs()
+ << "Cannot indirectify memory input operands yet\n");
+ return false;
+ }
+
+ assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
+
+ unsigned ConstraintID =
+ TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
+ unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
+ OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
+ Inst.addImm(OpFlags);
+ ArrayRef<Register> SourceRegs =
+ GetOrCreateVRegs(*OpInfo.CallOperandVal);
+ assert(
+ SourceRegs.size() == 1 &&
+ "Expected the memory input to fit into a single virtual register");
+ Inst.addReg(SourceRegs[0]);
+ break;
+ }
+
+ assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
+ OpInfo.ConstraintType == TargetLowering::C_Register) &&
+ "Unknown constraint type!");
+
+ if (OpInfo.isIndirect) {
+ LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
+ "for constraint '"
+ << OpInfo.ConstraintCode << "'\n");
+ return false;
+ }
+
+ // Copy the input into the appropriate registers.
+ if (OpInfo.Regs.empty()) {
+ LLVM_DEBUG(
+ dbgs()
+ << "Couldn't allocate input register for register constraint\n");
+ return false;
+ }
+
+ unsigned NumRegs = OpInfo.Regs.size();
+ ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
+ assert(NumRegs == SourceRegs.size() &&
+ "Expected the number of input registers to match the number of "
+ "source registers");
+
+ if (NumRegs > 1) {
+ LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
+ "not supported yet\n");
+ return false;
+ }
+
+ unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs);
+ Inst.addImm(Flag);
+ if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder))
+ return false;
+ Inst.addReg(OpInfo.Regs[0]);
+ break;
+ }
+
+ case InlineAsm::isClobber: {
+
+ unsigned NumRegs = OpInfo.Regs.size();
+ if (NumRegs > 0) {
+ unsigned Flag =
+ InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs);
+ Inst.addImm(Flag);
+
+ for (Register Reg : OpInfo.Regs) {
+ Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
+ getImplRegState(Reg.isPhysical()));
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
+ Inst.addMetadata(SrcLoc);
+
+ // All inputs are handled, insert the instruction now
+ MIRBuilder.insertInstr(Inst);
+
+ // Finally, copy the output operands into the output registers
+ ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
+ if (ResRegs.size() != OutputOperands.size()) {
+ LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
+ "number of destination registers\n");
+ return false;
+ }
+ for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
+ GISelAsmOperandInfo &OpInfo = OutputOperands[i];
+
+ if (OpInfo.Regs.empty())
+ continue;
+
+ switch (OpInfo.ConstraintType) {
+ case TargetLowering::C_Register:
+ case TargetLowering::C_RegisterClass: {
+ if (OpInfo.Regs.size() > 1) {
+ LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
+ "registers are not supported yet\n");
+ return false;
+ }
+
+ Register SrcReg = OpInfo.Regs[0];
+ unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
+ if (MRI->getType(ResRegs[i]).getSizeInBits() < SrcSize) {
+ // First copy the non-typed virtual register into a generic virtual
+ // register
+ Register Tmp1Reg =
+ MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
+ MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
+ // Need to truncate the result of the register
+ MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
+ } else {
+ MIRBuilder.buildCopy(ResRegs[i], SrcReg);
+ }
+ break;
+ }
+ case TargetLowering::C_Immediate:
+ case TargetLowering::C_Other:
+ LLVM_DEBUG(
+ dbgs() << "Cannot lower target specific output constraints yet\n");
+ return false;
+ case TargetLowering::C_Memory:
+ break; // Already handled.
+ case TargetLowering::C_Unknown:
+ LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool InlineAsmLowering::lowerAsmOperandForConstraint(
+ Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
+ MachineIRBuilder &MIRBuilder) const {
+ if (Constraint.size() > 1)
+ return false;
+
+ char ConstraintLetter = Constraint[0];
+ switch (ConstraintLetter) {
+ default:
+ return false;
+ case 'i': // Simple Integer or Relocatable Constant
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
+ assert(CI->getBitWidth() <= 64 &&
+ "expected immediate to fit into 64-bits");
+ // Boolean constants should be zero-extended, others are sign-extended
+ bool IsBool = CI->getBitWidth() == 1;
+ int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
+ Ops.push_back(MachineOperand::CreateImm(ExtVal));
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 7c4fd2d140d36..f32278d070527 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -29,6 +29,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
#define DEBUG_TYPE "instruction-select"
@@ -175,7 +176,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
auto DstRC = MRI.getRegClass(DstReg);
if (SrcRC == DstRC) {
MRI.replaceRegWith(DstReg, SrcReg);
- MI.eraseFromParentAndMarkDBGValuesForRemoval();
+ MI.eraseFromParent();
}
}
}
@@ -222,9 +223,6 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
return false;
}
#endif
- auto &TLI = *MF.getSubtarget().getTargetLowering();
- TLI.finalizeLowering(MF);
-
// Determine if there are any calls in this machine function. Ported from
// SelectionDAG.
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -240,6 +238,9 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
}
}
+ // FIXME: FinalizeISel pass calls finalizeLowering, so it's called twice.
+ auto &TLI = *MF.getSubtarget().getTargetLowering();
+ TLI.finalizeLowering(MF);
LLVM_DEBUG({
dbgs() << "Rules covered by selecting function: " << MF.getName() << ":";
@@ -248,11 +249,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
dbgs() << "\n\n";
});
CoverageInfo.emit(CoveragePrefix,
- MF.getSubtarget()
- .getTargetLowering()
- ->getTargetMachine()
- .getTarget()
- .getBackendName());
+ TLI.getTargetMachine().getTarget().getBackendName());
// If we successfully selected the function nothing is going to use the vreg
// types after us (otherwise MIRPrinter would need them). Make sure the types
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index b9c90e69ddb2e..2fedc034d315f 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -42,7 +42,7 @@ bool InstructionSelector::constrainOperandRegToRegClass(
MachineRegisterInfo &MRI = MF.getRegInfo();
return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC,
- I.getOperand(OpIdx), OpIdx);
+ I.getOperand(OpIdx));
}
bool InstructionSelector::isOperandImmEqual(
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
index 601d50e9806fd..a83742f2138fc 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
@@ -80,22 +80,46 @@ LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx,
};
}
-LegalityPredicate LegalityPredicates::narrowerThan(unsigned TypeIdx,
- unsigned Size) {
+LegalityPredicate LegalityPredicates::elementTypeIs(unsigned TypeIdx,
+ LLT EltTy) {
+ return [=](const LegalityQuery &Query) {
+ const LLT QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isVector() && QueryTy.getElementType() == EltTy;
+ };
+}
+
+LegalityPredicate LegalityPredicates::scalarNarrowerThan(unsigned TypeIdx,
+ unsigned Size) {
return [=](const LegalityQuery &Query) {
const LLT QueryTy = Query.Types[TypeIdx];
return QueryTy.isScalar() && QueryTy.getSizeInBits() < Size;
};
}
-LegalityPredicate LegalityPredicates::widerThan(unsigned TypeIdx,
- unsigned Size) {
+LegalityPredicate LegalityPredicates::scalarWiderThan(unsigned TypeIdx,
+ unsigned Size) {
return [=](const LegalityQuery &Query) {
const LLT QueryTy = Query.Types[TypeIdx];
return QueryTy.isScalar() && QueryTy.getSizeInBits() > Size;
};
}
+LegalityPredicate LegalityPredicates::smallerThan(unsigned TypeIdx0,
+ unsigned TypeIdx1) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx0].getSizeInBits() <
+ Query.Types[TypeIdx1].getSizeInBits();
+ };
+}
+
+LegalityPredicate LegalityPredicates::largerThan(unsigned TypeIdx0,
+ unsigned TypeIdx1) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx0].getSizeInBits() >
+ Query.Types[TypeIdx1].getSizeInBits();
+ };
+}
+
LegalityPredicate LegalityPredicates::scalarOrEltNarrowerThan(unsigned TypeIdx,
unsigned Size) {
return [=](const LegalityQuery &Query) {
@@ -126,6 +150,12 @@ LegalityPredicate LegalityPredicates::sizeNotPow2(unsigned TypeIdx) {
};
}
+LegalityPredicate LegalityPredicates::sizeIs(unsigned TypeIdx, unsigned Size) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx].getSizeInBits() == Size;
+ };
+}
+
LegalityPredicate LegalityPredicates::sameSize(unsigned TypeIdx0,
unsigned TypeIdx1) {
return [=](const LegalityQuery &Query) {
diff --git a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
index e789e4a333dcc..1d7be54de3b04 100644
--- a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
#include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
+#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -28,6 +29,7 @@
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
#include "llvm/Target/TargetMachine.h"
#include <iterator>
@@ -41,6 +43,29 @@ static cl::opt<bool>
cl::desc("Should enable CSE in Legalizer"),
cl::Optional, cl::init(false));
+enum class DebugLocVerifyLevel {
+ None,
+ Legalizations,
+ LegalizationsAndArtifactCombiners,
+};
+#ifndef NDEBUG
+static cl::opt<DebugLocVerifyLevel> VerifyDebugLocs(
+ "verify-legalizer-debug-locs",
+ cl::desc("Verify that debug locations are handled"),
+ cl::values(
+ clEnumValN(DebugLocVerifyLevel::None, "none", "No verification"),
+ clEnumValN(DebugLocVerifyLevel::Legalizations, "legalizations",
+ "Verify legalizations"),
+ clEnumValN(DebugLocVerifyLevel::LegalizationsAndArtifactCombiners,
+ "legalizations+artifactcombiners",
+ "Verify legalizations and artifact combines")),
+ cl::init(DebugLocVerifyLevel::Legalizations));
+#else
+// Always disable it for release builds by preventing the observer from being
+// installed.
+static const DebugLocVerifyLevel VerifyDebugLocs = DebugLocVerifyLevel::None;
+#endif
+
char Legalizer::ID = 0;
INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE,
"Legalize the Machine IR a function's Machine IR", false,
@@ -108,7 +133,6 @@ public:
}
void createdInstr(MachineInstr &MI) override {
- LLVM_DEBUG(dbgs() << ".. .. New MI: " << MI);
LLVM_DEBUG(NewMIs.push_back(&MI));
createdOrChangedInstr(MI);
}
@@ -143,7 +167,9 @@ public:
Legalizer::MFResult
Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
ArrayRef<GISelChangeObserver *> AuxObservers,
+ LostDebugLocObserver &LocObserver,
MachineIRBuilder &MIRBuilder) {
+ MIRBuilder.setMF(MF);
MachineRegisterInfo &MRI = MF.getRegInfo();
// Populate worklists.
@@ -180,7 +206,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
// Now install the observer as the delegate to MF.
// This will keep all the observers notified about new insertions/deletions.
- RAIIDelegateInstaller DelInstall(MF, &WrapperObserver);
+ RAIIMFObsDelInstaller Installer(MF, WrapperObserver);
LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder);
LegalizationArtifactCombiner ArtCombiner(MIRBuilder, MRI, LI);
auto RemoveDeadInstFromLists = [&WrapperObserver](MachineInstr *DeadMI) {
@@ -199,6 +225,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
if (isTriviallyDead(MI, MRI)) {
LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
MI.eraseFromParentAndMarkDBGValuesForRemoval();
+ LocObserver.checkpoint(false);
continue;
}
@@ -224,6 +251,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
return {Changed, &MI};
}
WorkListObserver.printNewInstrs();
+ LocObserver.checkpoint();
Changed |= Res == LegalizerHelper::Legalized;
}
// Try to combine the instructions in RetryList again if there
@@ -238,6 +266,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
return {Changed, RetryList.front()};
}
}
+ LocObserver.checkpoint();
while (!ArtifactList.empty()) {
MachineInstr &MI = *ArtifactList.pop_back_val();
assert(isPreISelGenericOpcode(MI.getOpcode()) &&
@@ -246,6 +275,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
LLVM_DEBUG(dbgs() << MI << "Is dead\n");
RemoveDeadInstFromLists(&MI);
MI.eraseFromParentAndMarkDBGValuesForRemoval();
+ LocObserver.checkpoint(false);
continue;
}
SmallVector<MachineInstr *, 4> DeadInstructions;
@@ -258,6 +288,9 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
RemoveDeadInstFromLists(DeadMI);
DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
}
+ LocObserver.checkpoint(
+ VerifyDebugLocs ==
+ DebugLocVerifyLevel::LegalizationsAndArtifactCombiners);
Changed = true;
continue;
}
@@ -305,9 +338,14 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
// We want CSEInfo in addition to WorkListObserver to observe all changes.
AuxObservers.push_back(CSEInfo);
}
+ assert(!CSEInfo || !errorToBool(CSEInfo->verify()));
+ LostDebugLocObserver LocObserver(DEBUG_TYPE);
+ if (VerifyDebugLocs > DebugLocVerifyLevel::None)
+ AuxObservers.push_back(&LocObserver);
const LegalizerInfo &LI = *MF.getSubtarget().getLegalizerInfo();
- MFResult Result = legalizeMachineFunction(MF, LI, AuxObservers, *MIRBuilder);
+ MFResult Result =
+ legalizeMachineFunction(MF, LI, AuxObservers, LocObserver, *MIRBuilder);
if (Result.FailedOn) {
reportGISelFailure(MF, TPC, MORE, "gisel-legalize",
@@ -324,5 +362,33 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
reportGISelFailure(MF, TPC, MORE, R);
return false;
}
+
+ if (LocObserver.getNumLostDebugLocs()) {
+ MachineOptimizationRemarkMissed R("gisel-legalize", "LostDebugLoc",
+ MF.getFunction().getSubprogram(),
+ /*MBB=*/&*MF.begin());
+ R << "lost "
+ << ore::NV("NumLostDebugLocs", LocObserver.getNumLostDebugLocs())
+ << " debug locations during pass";
+ reportGISelWarning(MF, TPC, MORE, R);
+ // Example remark:
+ // --- !Missed
+ // Pass: gisel-legalize
+ // Name: GISelFailure
+ // DebugLoc: { File: '.../legalize-urem.mir', Line: 1, Column: 0 }
+ // Function: test_urem_s32
+ // Args:
+ // - String: 'lost '
+ // - NumLostDebugLocs: '1'
+ // - String: ' debug locations during pass'
+ // ...
+ }
+
+ // If for some reason CSE was not enabled, make sure that we invalidate the
+ // CSEInfo object (as we currently declare that the analysis is preserved).
+ // The next time get on the wrapper is called, it will force it to recompute
+ // the analysis.
+ if (!EnableCSE)
+ Wrapper.setComputed(false);
return Result.Changed;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 667e1a04dc342..da519f99ad7e8 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -63,30 +63,48 @@ getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) {
return std::make_pair(NumParts, NumLeftover);
}
+static Type *getFloatTypeForLLT(LLVMContext &Ctx, LLT Ty) {
+
+ if (!Ty.isScalar())
+ return nullptr;
+
+ switch (Ty.getSizeInBits()) {
+ case 16:
+ return Type::getHalfTy(Ctx);
+ case 32:
+ return Type::getFloatTy(Ctx);
+ case 64:
+ return Type::getDoubleTy(Ctx);
+ case 128:
+ return Type::getFP128Ty(Ctx);
+ default:
+ return nullptr;
+ }
+}
+
LegalizerHelper::LegalizerHelper(MachineFunction &MF,
GISelChangeObserver &Observer,
MachineIRBuilder &Builder)
- : MIRBuilder(Builder), MRI(MF.getRegInfo()),
- LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) {
- MIRBuilder.setMF(MF);
+ : MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()),
+ LI(*MF.getSubtarget().getLegalizerInfo()) {
MIRBuilder.setChangeObserver(Observer);
}
LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
GISelChangeObserver &Observer,
MachineIRBuilder &B)
- : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) {
- MIRBuilder.setMF(MF);
+ : MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI) {
MIRBuilder.setChangeObserver(Observer);
}
LegalizerHelper::LegalizeResult
LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
- LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Legalizing: " << MI);
+
+ MIRBuilder.setInstrAndDebugLoc(MI);
if (MI.getOpcode() == TargetOpcode::G_INTRINSIC ||
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS)
- return LI.legalizeIntrinsic(MI, MRI, MIRBuilder) ? Legalized
- : UnableToLegalize;
+ return LI.legalizeIntrinsic(*this, MI) ? Legalized : UnableToLegalize;
auto Step = LI.getAction(MI, MRI);
switch (Step.Action) {
case Legal:
@@ -101,6 +119,9 @@ LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
case WidenScalar:
LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
return widenScalar(MI, Step.TypeIdx, Step.NewType);
+ case Bitcast:
+ LLVM_DEBUG(dbgs() << ".. Bitcast type\n");
+ return bitcast(MI, Step.TypeIdx, Step.NewType);
case Lower:
LLVM_DEBUG(dbgs() << ".. Lower\n");
return lower(MI, Step.TypeIdx, Step.NewType);
@@ -112,8 +133,7 @@ LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
return moreElementsVector(MI, Step.TypeIdx, Step.NewType);
case Custom:
LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
- return LI.legalizeCustom(MI, MRI, MIRBuilder, Observer) ? Legalized
- : UnableToLegalize;
+ return LI.legalizeCustom(*this, MI) ? Legalized : UnableToLegalize;
default:
LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
return UnableToLegalize;
@@ -172,26 +192,6 @@ bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
return true;
}
-static LLT getGCDType(LLT OrigTy, LLT TargetTy) {
- if (OrigTy.isVector() && TargetTy.isVector()) {
- assert(OrigTy.getElementType() == TargetTy.getElementType());
- int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
- TargetTy.getNumElements());
- return LLT::scalarOrVector(GCD, OrigTy.getElementType());
- }
-
- if (OrigTy.isVector() && !TargetTy.isVector()) {
- assert(OrigTy.getElementType() == TargetTy);
- return TargetTy;
- }
-
- assert(!OrigTy.isVector() && !TargetTy.isVector());
-
- int GCD = greatestCommonDivisor(OrigTy.getSizeInBits(),
- TargetTy.getSizeInBits());
- return LLT::scalar(GCD);
-}
-
void LegalizerHelper::insertParts(Register DstReg,
LLT ResultTy, LLT PartTy,
ArrayRef<Register> PartRegs,
@@ -237,92 +237,222 @@ void LegalizerHelper::insertParts(Register DstReg,
}
}
+/// Return the result registers of G_UNMERGE_VALUES \p MI in \p Regs
+static void getUnmergeResults(SmallVectorImpl<Register> &Regs,
+ const MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
+
+ const int NumResults = MI.getNumOperands() - 1;
+ Regs.resize(NumResults);
+ for (int I = 0; I != NumResults; ++I)
+ Regs[I] = MI.getOperand(I).getReg();
+}
+
+LLT LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
+ LLT NarrowTy, Register SrcReg) {
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ LLT GCDTy = getGCDType(DstTy, getGCDType(SrcTy, NarrowTy));
+ if (SrcTy == GCDTy) {
+ // If the source already evenly divides the result type, we don't need to do
+ // anything.
+ Parts.push_back(SrcReg);
+ } else {
+ // Need to split into common type sized pieces.
+ auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg);
+ getUnmergeResults(Parts, *Unmerge);
+ }
+
+ return GCDTy;
+}
+
+LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
+ SmallVectorImpl<Register> &VRegs,
+ unsigned PadStrategy) {
+ LLT LCMTy = getLCMType(DstTy, NarrowTy);
+
+ int NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits();
+ int NumSubParts = NarrowTy.getSizeInBits() / GCDTy.getSizeInBits();
+ int NumOrigSrc = VRegs.size();
+
+ Register PadReg;
+
+ // Get a value we can use to pad the source value if the sources won't evenly
+ // cover the result type.
+ if (NumOrigSrc < NumParts * NumSubParts) {
+ if (PadStrategy == TargetOpcode::G_ZEXT)
+ PadReg = MIRBuilder.buildConstant(GCDTy, 0).getReg(0);
+ else if (PadStrategy == TargetOpcode::G_ANYEXT)
+ PadReg = MIRBuilder.buildUndef(GCDTy).getReg(0);
+ else {
+ assert(PadStrategy == TargetOpcode::G_SEXT);
+
+ // Shift the sign bit of the low register through the high register.
+ auto ShiftAmt =
+ MIRBuilder.buildConstant(LLT::scalar(64), GCDTy.getSizeInBits() - 1);
+ PadReg = MIRBuilder.buildAShr(GCDTy, VRegs.back(), ShiftAmt).getReg(0);
+ }
+ }
+
+ // Registers for the final merge to be produced.
+ SmallVector<Register, 4> Remerge(NumParts);
+
+ // Registers needed for intermediate merges, which will be merged into a
+ // source for Remerge.
+ SmallVector<Register, 4> SubMerge(NumSubParts);
+
+ // Once we've fully read off the end of the original source bits, we can reuse
+ // the same high bits for remaining padding elements.
+ Register AllPadReg;
+
+ // Build merges to the LCM type to cover the original result type.
+ for (int I = 0; I != NumParts; ++I) {
+ bool AllMergePartsArePadding = true;
+
+ // Build the requested merges to the requested type.
+ for (int J = 0; J != NumSubParts; ++J) {
+ int Idx = I * NumSubParts + J;
+ if (Idx >= NumOrigSrc) {
+ SubMerge[J] = PadReg;
+ continue;
+ }
+
+ SubMerge[J] = VRegs[Idx];
+
+ // There are meaningful bits here we can't reuse later.
+ AllMergePartsArePadding = false;
+ }
+
+ // If we've filled up a complete piece with padding bits, we can directly
+ // emit the natural sized constant if applicable, rather than a merge of
+ // smaller constants.
+ if (AllMergePartsArePadding && !AllPadReg) {
+ if (PadStrategy == TargetOpcode::G_ANYEXT)
+ AllPadReg = MIRBuilder.buildUndef(NarrowTy).getReg(0);
+ else if (PadStrategy == TargetOpcode::G_ZEXT)
+ AllPadReg = MIRBuilder.buildConstant(NarrowTy, 0).getReg(0);
+
+ // If this is a sign extension, we can't materialize a trivial constant
+ // with the right type and have to produce a merge.
+ }
+
+ if (AllPadReg) {
+ // Avoid creating additional instructions if we're just adding additional
+ // copies of padding bits.
+ Remerge[I] = AllPadReg;
+ continue;
+ }
+
+ if (NumSubParts == 1)
+ Remerge[I] = SubMerge[0];
+ else
+ Remerge[I] = MIRBuilder.buildMerge(NarrowTy, SubMerge).getReg(0);
+
+ // In the sign extend padding case, re-use the first all-signbit merge.
+ if (AllMergePartsArePadding && !AllPadReg)
+ AllPadReg = Remerge[I];
+ }
+
+ VRegs = std::move(Remerge);
+ return LCMTy;
+}
+
+void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
+ ArrayRef<Register> RemergeRegs) {
+ LLT DstTy = MRI.getType(DstReg);
+
+ // Create the merge to the widened source, and extract the relevant bits into
+ // the result.
+
+ if (DstTy == LCMTy) {
+ MIRBuilder.buildMerge(DstReg, RemergeRegs);
+ return;
+ }
+
+ auto Remerge = MIRBuilder.buildMerge(LCMTy, RemergeRegs);
+ if (DstTy.isScalar() && LCMTy.isScalar()) {
+ MIRBuilder.buildTrunc(DstReg, Remerge);
+ return;
+ }
+
+ if (LCMTy.isVector()) {
+ MIRBuilder.buildExtract(DstReg, Remerge, 0);
+ return;
+ }
+
+ llvm_unreachable("unhandled case");
+}
+
static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
+#define RTLIBCASE(LibcallPrefix) \
+ do { \
+ switch (Size) { \
+ case 32: \
+ return RTLIB::LibcallPrefix##32; \
+ case 64: \
+ return RTLIB::LibcallPrefix##64; \
+ case 128: \
+ return RTLIB::LibcallPrefix##128; \
+ default: \
+ llvm_unreachable("unexpected size"); \
+ } \
+ } while (0)
+
+ assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
+
switch (Opcode) {
case TargetOpcode::G_SDIV:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- switch (Size) {
- case 32:
- return RTLIB::SDIV_I32;
- case 64:
- return RTLIB::SDIV_I64;
- case 128:
- return RTLIB::SDIV_I128;
- default:
- llvm_unreachable("unexpected size");
- }
+ RTLIBCASE(SDIV_I);
case TargetOpcode::G_UDIV:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- switch (Size) {
- case 32:
- return RTLIB::UDIV_I32;
- case 64:
- return RTLIB::UDIV_I64;
- case 128:
- return RTLIB::UDIV_I128;
- default:
- llvm_unreachable("unexpected size");
- }
+ RTLIBCASE(UDIV_I);
case TargetOpcode::G_SREM:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32;
+ RTLIBCASE(SREM_I);
case TargetOpcode::G_UREM:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32;
+ RTLIBCASE(UREM_I);
case TargetOpcode::G_CTLZ_ZERO_UNDEF:
- assert(Size == 32 && "Unsupported size");
- return RTLIB::CTLZ_I32;
+ RTLIBCASE(CTLZ_I);
case TargetOpcode::G_FADD:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
+ RTLIBCASE(ADD_F);
case TargetOpcode::G_FSUB:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32;
+ RTLIBCASE(SUB_F);
case TargetOpcode::G_FMUL:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32;
+ RTLIBCASE(MUL_F);
case TargetOpcode::G_FDIV:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32;
+ RTLIBCASE(DIV_F);
case TargetOpcode::G_FEXP:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::EXP_F64 : RTLIB::EXP_F32;
+ RTLIBCASE(EXP_F);
case TargetOpcode::G_FEXP2:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::EXP2_F64 : RTLIB::EXP2_F32;
+ RTLIBCASE(EXP2_F);
case TargetOpcode::G_FREM:
- return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
+ RTLIBCASE(REM_F);
case TargetOpcode::G_FPOW:
- return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
+ RTLIBCASE(POW_F);
case TargetOpcode::G_FMA:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
+ RTLIBCASE(FMA_F);
case TargetOpcode::G_FSIN:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::SIN_F128
- : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32;
+ RTLIBCASE(SIN_F);
case TargetOpcode::G_FCOS:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::COS_F128
- : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32;
+ RTLIBCASE(COS_F);
case TargetOpcode::G_FLOG10:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::LOG10_F128
- : Size == 64 ? RTLIB::LOG10_F64 : RTLIB::LOG10_F32;
+ RTLIBCASE(LOG10_F);
case TargetOpcode::G_FLOG:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::LOG_F128
- : Size == 64 ? RTLIB::LOG_F64 : RTLIB::LOG_F32;
+ RTLIBCASE(LOG_F);
case TargetOpcode::G_FLOG2:
- assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size");
- return Size == 128 ? RTLIB::LOG2_F128
- : Size == 64 ? RTLIB::LOG2_F64 : RTLIB::LOG2_F32;
+ RTLIBCASE(LOG2_F);
case TargetOpcode::G_FCEIL:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::CEIL_F64 : RTLIB::CEIL_F32;
+ RTLIBCASE(CEIL_F);
case TargetOpcode::G_FFLOOR:
- assert((Size == 32 || Size == 64) && "Unsupported size");
- return Size == 64 ? RTLIB::FLOOR_F64 : RTLIB::FLOOR_F32;
+ RTLIBCASE(FLOOR_F);
+ case TargetOpcode::G_FMINNUM:
+ RTLIBCASE(FMIN_F);
+ case TargetOpcode::G_FMAXNUM:
+ RTLIBCASE(FMAX_F);
+ case TargetOpcode::G_FSQRT:
+ RTLIBCASE(SQRT_F);
+ case TargetOpcode::G_FRINT:
+ RTLIBCASE(RINT_F);
+ case TargetOpcode::G_FNEARBYINT:
+ RTLIBCASE(NEARBYINT_F);
}
llvm_unreachable("Unknown libcall function");
}
@@ -330,7 +460,8 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
/// True if an instruction is in tail position in its caller. Intended for
/// legalizing libcalls as tail calls when possible.
static bool isLibCallInTailPosition(MachineInstr &MI) {
- const Function &F = MI.getParent()->getParent()->getFunction();
+ MachineBasicBlock &MBB = *MI.getParent();
+ const Function &F = MBB.getParent()->getFunction();
// Conservatively require the attributes of the call to match those of
// the return. Ignore NoAlias and NonNull because they don't affect the
@@ -349,23 +480,22 @@ static bool isLibCallInTailPosition(MachineInstr &MI) {
// Only tail call if the following instruction is a standard return.
auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
- MachineInstr *Next = MI.getNextNode();
- if (!Next || TII.isTailCall(*Next) || !Next->isReturn())
+ auto Next = next_nodbg(MI.getIterator(), MBB.instr_end());
+ if (Next == MBB.instr_end() || TII.isTailCall(*Next) || !Next->isReturn())
return false;
return true;
}
LegalizerHelper::LegalizeResult
-llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+llvm::createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
const CallLowering::ArgInfo &Result,
- ArrayRef<CallLowering::ArgInfo> Args) {
+ ArrayRef<CallLowering::ArgInfo> Args,
+ const CallingConv::ID CC) {
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
- auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
- const char *Name = TLI.getLibcallName(Libcall);
CallLowering::CallLoweringInfo Info;
- Info.CallConv = TLI.getLibcallCallingConv(Libcall);
+ Info.CallConv = CC;
Info.Callee = MachineOperand::CreateES(Name);
Info.OrigRet = Result;
std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs));
@@ -375,6 +505,16 @@ llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
return LegalizerHelper::Legalized;
}
+LegalizerHelper::LegalizeResult
+llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args) {
+ auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
+ const char *Name = TLI.getLibcallName(Libcall);
+ const CallingConv::ID CC = TLI.getLibcallCallingConv(Libcall);
+ return createLibcall(MIRBuilder, Name, Result, Args, CC);
+}
+
// Useful for libcalls where all operands have the same type.
static LegalizerHelper::LegalizeResult
simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
@@ -428,7 +568,7 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
}
const char *Name = TLI.getLibcallName(RTLibcall);
- MIRBuilder.setInstr(MI);
+ MIRBuilder.setInstrAndDebugLoc(MI);
CallLowering::CallLoweringInfo Info;
Info.CallConv = TLI.getLibcallCallingConv(RTLibcall);
@@ -443,14 +583,16 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
if (Info.LoweredTailCall) {
assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?");
- // We must have a return following the call to get past
+ // We must have a return following the call (or debug insts) to get past
// isLibCallInTailPosition.
- assert(MI.getNextNode() && MI.getNextNode()->isReturn() &&
- "Expected instr following MI to be a return?");
-
- // We lowered a tail call, so the call is now the return from the block.
- // Delete the old return.
- MI.getNextNode()->eraseFromParent();
+ do {
+ MachineInstr *Next = MI.getNextNode();
+ assert(Next && (Next->isReturn() || Next->isDebugInstr()) &&
+ "Expected instr following MI to be return or debug inst?");
+ // We lowered a tail call, so the call is now the return from the block.
+ // Delete the old return.
+ Next->eraseFromParent();
+ } while (MI.getNextNode());
}
return LegalizerHelper::Legalized;
@@ -492,8 +634,6 @@ LegalizerHelper::libcall(MachineInstr &MI) {
unsigned Size = LLTy.getSizeInBits();
auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
- MIRBuilder.setInstr(MI);
-
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
@@ -523,37 +663,29 @@ LegalizerHelper::libcall(MachineInstr &MI) {
case TargetOpcode::G_FEXP:
case TargetOpcode::G_FEXP2:
case TargetOpcode::G_FCEIL:
- case TargetOpcode::G_FFLOOR: {
- if (Size > 64) {
- LLVM_DEBUG(dbgs() << "Size " << Size << " too large to legalize.\n");
+ case TargetOpcode::G_FFLOOR:
+ case TargetOpcode::G_FMINNUM:
+ case TargetOpcode::G_FMAXNUM:
+ case TargetOpcode::G_FSQRT:
+ case TargetOpcode::G_FRINT:
+ case TargetOpcode::G_FNEARBYINT: {
+ Type *HLTy = getFloatTypeForLLT(Ctx, LLTy);
+ if (!HLTy || (Size != 32 && Size != 64 && Size != 128)) {
+ LLVM_DEBUG(dbgs() << "No libcall available for size " << Size << ".\n");
return UnableToLegalize;
}
- Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
if (Status != Legalized)
return Status;
break;
}
- case TargetOpcode::G_FPEXT: {
- // FIXME: Support other floating point types (half, fp128 etc)
- unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
- unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- if (ToSize != 64 || FromSize != 32)
- return UnableToLegalize;
- LegalizeResult Status = conversionLibcall(
- MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
- if (Status != Legalized)
- return Status;
- break;
- }
+ case TargetOpcode::G_FPEXT:
case TargetOpcode::G_FPTRUNC: {
- // FIXME: Support other floating point types (half, fp128 etc)
- unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
- unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- if (ToSize != 32 || FromSize != 64)
+ Type *FromTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(1).getReg()));
+ Type *ToTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(0).getReg()));
+ if (!FromTy || !ToTy)
return UnableToLegalize;
- LegalizeResult Status = conversionLibcall(
- MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
+ LegalizeResult Status = conversionLibcall(MI, MIRBuilder, ToTy, FromTy );
if (Status != Legalized)
return Status;
break;
@@ -597,8 +729,6 @@ LegalizerHelper::libcall(MachineInstr &MI) {
LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned TypeIdx,
LLT NarrowTy) {
- MIRBuilder.setInstr(MI);
-
uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
uint64_t NarrowSize = NarrowTy.getSizeInBits();
@@ -606,19 +736,34 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
default:
return UnableToLegalize;
case TargetOpcode::G_IMPLICIT_DEF: {
- // FIXME: add support for when SizeOp0 isn't an exact multiple of
- // NarrowSize.
- if (SizeOp0 % NarrowSize != 0)
- return UnableToLegalize;
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+
+ // If SizeOp0 is not an exact multiple of NarrowSize, emit
+ // G_ANYEXT(G_IMPLICIT_DEF). Cast result to vector if needed.
+ // FIXME: Although this would also be legal for the general case, it causes
+ // a lot of regressions in the emitted code (superfluous COPYs, artifact
+ // combines not being hit). This seems to be a problem related to the
+ // artifact combiner.
+ if (SizeOp0 % NarrowSize != 0) {
+ LLT ImplicitTy = NarrowTy;
+ if (DstTy.isVector())
+ ImplicitTy = LLT::vector(DstTy.getNumElements(), ImplicitTy);
+
+ Register ImplicitReg = MIRBuilder.buildUndef(ImplicitTy).getReg(0);
+ MIRBuilder.buildAnyExt(DstReg, ImplicitReg);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
int NumParts = SizeOp0 / NarrowSize;
SmallVector<Register, 2> DstRegs;
for (int i = 0; i < NumParts; ++i)
- DstRegs.push_back(
- MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
+ DstRegs.push_back(MIRBuilder.buildUndef(NarrowTy).getReg(0));
- Register DstReg = MI.getOperand(0).getReg();
- if(MRI.getType(DstReg).isVector())
+ if (DstTy.isVector())
MIRBuilder.buildBuildVector(DstReg, DstRegs);
else
MIRBuilder.buildMerge(DstReg, DstRegs);
@@ -657,49 +802,10 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MI.eraseFromParent();
return Legalized;
}
- case TargetOpcode::G_SEXT: {
- if (TypeIdx != 0)
- return UnableToLegalize;
-
- Register SrcReg = MI.getOperand(1).getReg();
- LLT SrcTy = MRI.getType(SrcReg);
-
- // FIXME: support the general case where the requested NarrowTy may not be
- // the same as the source type. E.g. s128 = sext(s32)
- if ((SrcTy.getSizeInBits() != SizeOp0 / 2) ||
- SrcTy.getSizeInBits() != NarrowTy.getSizeInBits()) {
- LLVM_DEBUG(dbgs() << "Can't narrow sext to type " << NarrowTy << "\n");
- return UnableToLegalize;
- }
-
- // Shift the sign bit of the low register through the high register.
- auto ShiftAmt =
- MIRBuilder.buildConstant(LLT::scalar(64), NarrowTy.getSizeInBits() - 1);
- auto Shift = MIRBuilder.buildAShr(NarrowTy, SrcReg, ShiftAmt);
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {SrcReg, Shift.getReg(0)});
- MI.eraseFromParent();
- return Legalized;
- }
- case TargetOpcode::G_ZEXT: {
- if (TypeIdx != 0)
- return UnableToLegalize;
-
- LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
- uint64_t SizeOp1 = SrcTy.getSizeInBits();
- if (SizeOp0 % SizeOp1 != 0)
- return UnableToLegalize;
-
- // Generate a merge where the bottom bits are taken from the source, and
- // zero everything else.
- Register ZeroReg = MIRBuilder.buildConstant(SrcTy, 0).getReg(0);
- unsigned NumParts = SizeOp0 / SizeOp1;
- SmallVector<Register, 4> Srcs = {MI.getOperand(1).getReg()};
- for (unsigned Part = 1; Part < NumParts; ++Part)
- Srcs.push_back(ZeroReg);
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), Srcs);
- MI.eraseFromParent();
- return Legalized;
- }
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_ANYEXT:
+ return narrowScalarExt(MI, TypeIdx, NarrowTy);
case TargetOpcode::G_TRUNC: {
if (TypeIdx != 1)
return UnableToLegalize;
@@ -710,12 +816,15 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
return UnableToLegalize;
}
- auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1).getReg());
- MIRBuilder.buildCopy(MI.getOperand(0).getReg(), Unmerge.getReg(0));
+ auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1));
+ MIRBuilder.buildCopy(MI.getOperand(0), Unmerge.getReg(0));
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_FREEZE:
+ return reduceOperationWidth(MI, TypeIdx, NarrowTy);
+
case TargetOpcode::G_ADD: {
// FIXME: add support for when SizeOp0 isn't an exact multiple of
// NarrowSize.
@@ -779,7 +888,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(DstReg);
BorrowIn = BorrowOut;
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
+ MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -800,7 +909,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (8 * MMO.getSize() != DstTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
auto &MMO = **MI.memoperands_begin();
- MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO);
+ MIRBuilder.buildLoad(TmpReg, MI.getOperand(1), MMO);
MIRBuilder.buildAnyExt(DstReg, TmpReg);
MI.eraseFromParent();
return Legalized;
@@ -819,12 +928,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (MMO.getSizeInBits() == NarrowSize) {
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
} else {
- unsigned ExtLoad = ZExt ? TargetOpcode::G_ZEXTLOAD
- : TargetOpcode::G_SEXTLOAD;
- MIRBuilder.buildInstr(ExtLoad)
- .addDef(TmpReg)
- .addUse(PtrReg)
- .addMemOperand(&MMO);
+ MIRBuilder.buildLoadInstr(MI.getOpcode(), TmpReg, PtrReg, MMO);
}
if (ZExt)
@@ -853,7 +957,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
auto &MMO = **MI.memoperands_begin();
MIRBuilder.buildTrunc(TmpReg, SrcReg);
- MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO);
+ MIRBuilder.buildStore(TmpReg, MI.getOperand(1), MMO);
MI.eraseFromParent();
return Legalized;
}
@@ -885,8 +989,19 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
case TargetOpcode::G_CTTZ:
case TargetOpcode::G_CTTZ_ZERO_UNDEF:
case TargetOpcode::G_CTPOP:
- if (TypeIdx != 0)
- return UnableToLegalize; // TODO
+ if (TypeIdx == 1)
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_CTLZ:
+ case TargetOpcode::G_CTLZ_ZERO_UNDEF:
+ return narrowScalarCTLZ(MI, TypeIdx, NarrowTy);
+ case TargetOpcode::G_CTTZ:
+ case TargetOpcode::G_CTTZ_ZERO_UNDEF:
+ return narrowScalarCTTZ(MI, TypeIdx, NarrowTy);
+ case TargetOpcode::G_CTPOP:
+ return narrowScalarCTPOP(MI, TypeIdx, NarrowTy);
+ default:
+ return UnableToLegalize;
+ }
Observer.changingInstr(MI);
narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT);
@@ -910,10 +1025,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
return Legalized;
case TargetOpcode::G_PHI: {
unsigned NumParts = SizeOp0 / NarrowSize;
- SmallVector<Register, 2> DstRegs;
- SmallVector<SmallVector<Register, 2>, 2> SrcRegs;
- DstRegs.resize(NumParts);
- SrcRegs.resize(MI.getNumOperands() / 2);
+ SmallVector<Register, 2> DstRegs(NumParts);
+ SmallVector<SmallVector<Register, 2>, 2> SrcRegs(MI.getNumOperands() / 2);
Observer.changingInstr(MI);
for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB();
@@ -931,7 +1044,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1));
}
MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
+ MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
Observer.changedInstr(MI);
MI.eraseFromParent();
return Legalized;
@@ -955,11 +1068,11 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
Observer.changingInstr(MI);
Register LHSL = MRI.createGenericVirtualRegister(NarrowTy);
Register LHSH = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2).getReg());
+ MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2));
Register RHSL = MRI.createGenericVirtualRegister(NarrowTy);
Register RHSH = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3).getReg());
+ MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3));
CmpInst::Predicate Pred =
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
@@ -970,14 +1083,14 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH);
MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH);
MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0);
- MIRBuilder.buildICmp(Pred, MI.getOperand(0).getReg(), Or, Zero);
+ MIRBuilder.buildICmp(Pred, MI.getOperand(0), Or, Zero);
} else {
MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH);
MachineInstrBuilder CmpHEQ =
MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH);
MachineInstrBuilder CmpLU = MIRBuilder.buildICmp(
ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), CmpHEQ, CmpLU, CmpH);
+ MIRBuilder.buildSelect(MI.getOperand(0), CmpHEQ, CmpLU, CmpH);
}
Observer.changedInstr(MI);
MI.eraseFromParent();
@@ -987,8 +1100,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (TypeIdx != 0)
return UnableToLegalize;
- if (!MI.getOperand(2).isImm())
- return UnableToLegalize;
int64_t SizeInBits = MI.getOperand(2).getImm();
// So long as the new type has more bits than the bits we're extending we
@@ -998,13 +1109,13 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// We don't lose any non-extension bits by truncating the src and
// sign-extending the dst.
MachineOperand &MO1 = MI.getOperand(1);
- auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1.getReg());
- MO1.setReg(TruncMIB->getOperand(0).getReg());
+ auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1);
+ MO1.setReg(TruncMIB.getReg(0));
MachineOperand &MO2 = MI.getOperand(0);
Register DstExt = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildInstr(TargetOpcode::G_SEXT, {MO2.getReg()}, {DstExt});
+ MIRBuilder.buildSExt(MO2, DstExt);
MO2.setReg(DstExt);
Observer.changedInstr(MI);
return Legalized;
@@ -1031,12 +1142,11 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
}
// Explode the big arguments into smaller chunks.
- MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1).getReg());
+ MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1));
Register AshrCstReg =
MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1)
- ->getOperand(0)
- .getReg();
+ .getReg(0);
Register FullExtensionReg = 0;
Register PartialExtensionReg = 0;
@@ -1051,11 +1161,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(FullExtensionReg);
continue;
}
- DstRegs.push_back(MIRBuilder
- .buildInstr(TargetOpcode::G_ASHR, {NarrowTy},
- {PartialExtensionReg, AshrCstReg})
- ->getOperand(0)
- .getReg());
+ DstRegs.push_back(
+ MIRBuilder.buildAShr(NarrowTy, PartialExtensionReg, AshrCstReg)
+ .getReg(0));
FullExtensionReg = DstRegs.back();
} else {
DstRegs.push_back(
@@ -1063,8 +1171,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
.buildInstr(
TargetOpcode::G_SEXT_INREG, {NarrowTy},
{SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()})
- ->getOperand(0)
- .getReg());
+ .getReg(0));
PartialExtensionReg = DstRegs.back();
}
}
@@ -1091,28 +1198,57 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(DstPart.getReg(0));
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
+ MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
Observer.changedInstr(MI);
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_PTRMASK: {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+ Observer.changingInstr(MI);
+ narrowScalarSrc(MI, NarrowTy, 2);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
}
}
+Register LegalizerHelper::coerceToScalar(Register Val) {
+ LLT Ty = MRI.getType(Val);
+ if (Ty.isScalar())
+ return Val;
+
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ LLT NewTy = LLT::scalar(Ty.getSizeInBits());
+ if (Ty.isPointer()) {
+ if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
+ return Register();
+ return MIRBuilder.buildPtrToInt(NewTy, Val).getReg(0);
+ }
+
+ Register NewVal = Val;
+
+ assert(Ty.isVector());
+ LLT EltTy = Ty.getElementType();
+ if (EltTy.isPointer())
+ NewVal = MIRBuilder.buildPtrToInt(NewTy, NewVal).getReg(0);
+ return MIRBuilder.buildBitcast(NewTy, NewVal).getReg(0);
+}
+
void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
unsigned OpIdx, unsigned ExtOpcode) {
MachineOperand &MO = MI.getOperand(OpIdx);
- auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO.getReg()});
- MO.setReg(ExtB->getOperand(0).getReg());
+ auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO});
+ MO.setReg(ExtB.getReg(0));
}
void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy,
unsigned OpIdx) {
MachineOperand &MO = MI.getOperand(OpIdx);
- auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy},
- {MO.getReg()});
- MO.setReg(ExtB->getOperand(0).getReg());
+ auto ExtB = MIRBuilder.buildTrunc(NarrowTy, MO);
+ MO.setReg(ExtB.getReg(0));
}
void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
@@ -1120,7 +1256,7 @@ void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
MachineOperand &MO = MI.getOperand(OpIdx);
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt});
+ MIRBuilder.buildInstr(TruncOpcode, {MO}, {DstExt});
MO.setReg(DstExt);
}
@@ -1129,7 +1265,7 @@ void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy,
MachineOperand &MO = MI.getOperand(OpIdx);
Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc});
+ MIRBuilder.buildInstr(ExtOpcode, {MO}, {DstTrunc});
MO.setReg(DstTrunc);
}
@@ -1138,7 +1274,7 @@ void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy,
MachineOperand &MO = MI.getOperand(OpIdx);
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
- MIRBuilder.buildExtract(MO.getReg(), DstExt, 0);
+ MIRBuilder.buildExtract(MO, DstExt, 0);
MO.setReg(DstExt);
}
@@ -1172,6 +1308,19 @@ void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
MO.setReg(MoreReg);
}
+void LegalizerHelper::bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx) {
+ MachineOperand &Op = MI.getOperand(OpIdx);
+ Op.setReg(MIRBuilder.buildBitcast(CastTy, Op).getReg(0));
+}
+
+void LegalizerHelper::bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx) {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ Register CastDst = MRI.createGenericVirtualRegister(CastTy);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
+ MIRBuilder.buildBitcast(MO, CastDst);
+ MO.setReg(CastDst);
+}
+
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
@@ -1300,10 +1449,10 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
if (TypeIdx != 0)
return UnableToLegalize;
- unsigned NumDst = MI.getNumOperands() - 1;
+ int NumDst = MI.getNumOperands() - 1;
Register SrcReg = MI.getOperand(NumDst).getReg();
LLT SrcTy = MRI.getType(SrcReg);
- if (!SrcTy.isScalar())
+ if (SrcTy.isVector())
return UnableToLegalize;
Register Dst0Reg = MI.getOperand(0).getReg();
@@ -1311,26 +1460,90 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
if (!DstTy.isScalar())
return UnableToLegalize;
- unsigned NewSrcSize = NumDst * WideTy.getSizeInBits();
- LLT NewSrcTy = LLT::scalar(NewSrcSize);
- unsigned SizeDiff = WideTy.getSizeInBits() - DstTy.getSizeInBits();
+ if (WideTy.getSizeInBits() >= SrcTy.getSizeInBits()) {
+ if (SrcTy.isPointer()) {
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) {
+ LLVM_DEBUG(
+ dbgs() << "Not casting non-integral address space integer\n");
+ return UnableToLegalize;
+ }
+
+ SrcTy = LLT::scalar(SrcTy.getSizeInBits());
+ SrcReg = MIRBuilder.buildPtrToInt(SrcTy, SrcReg).getReg(0);
+ }
+
+ // Widen SrcTy to WideTy. This does not affect the result, but since the
+ // user requested this size, it is probably better handled than SrcTy and
+ // should reduce the total number of legalization artifacts
+ if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
+ SrcTy = WideTy;
+ SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0);
+ }
- auto WideSrc = MIRBuilder.buildZExt(NewSrcTy, SrcReg);
+ // Theres no unmerge type to target. Directly extract the bits from the
+ // source type
+ unsigned DstSize = DstTy.getSizeInBits();
- for (unsigned I = 1; I != NumDst; ++I) {
- auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, SizeDiff * I);
- auto Shl = MIRBuilder.buildShl(NewSrcTy, WideSrc, ShiftAmt);
- WideSrc = MIRBuilder.buildOr(NewSrcTy, WideSrc, Shl);
+ MIRBuilder.buildTrunc(Dst0Reg, SrcReg);
+ for (int I = 1; I != NumDst; ++I) {
+ auto ShiftAmt = MIRBuilder.buildConstant(SrcTy, DstSize * I);
+ auto Shr = MIRBuilder.buildLShr(SrcTy, SrcReg, ShiftAmt);
+ MIRBuilder.buildTrunc(MI.getOperand(I), Shr);
+ }
+
+ MI.eraseFromParent();
+ return Legalized;
}
- Observer.changingInstr(MI);
+ // Extend the source to a wider type.
+ LLT LCMTy = getLCMType(SrcTy, WideTy);
- MI.getOperand(NumDst).setReg(WideSrc->getOperand(0).getReg());
- for (unsigned I = 0; I != NumDst; ++I)
- widenScalarDst(MI, WideTy, I);
+ Register WideSrc = SrcReg;
+ if (LCMTy.getSizeInBits() != SrcTy.getSizeInBits()) {
+ // TODO: If this is an integral address space, cast to integer and anyext.
+ if (SrcTy.isPointer()) {
+ LLVM_DEBUG(dbgs() << "Widening pointer source types not implemented\n");
+ return UnableToLegalize;
+ }
- Observer.changedInstr(MI);
+ WideSrc = MIRBuilder.buildAnyExt(LCMTy, WideSrc).getReg(0);
+ }
+
+ auto Unmerge = MIRBuilder.buildUnmerge(WideTy, WideSrc);
+ // Create a sequence of unmerges to the original results. since we may have
+ // widened the source, we will need to pad the results with dead defs to cover
+ // the source register.
+ // e.g. widen s16 to s32:
+ // %1:_(s16), %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0:_(s48)
+ //
+ // =>
+ // %4:_(s64) = G_ANYEXT %0:_(s48)
+ // %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4 ; Requested unmerge
+ // %1:_(s16), %2:_(s16) = G_UNMERGE_VALUES %5 ; unpack to original regs
+ // %3:_(s16), dead %7 = G_UNMERGE_VALUES %6 ; original reg + extra dead def
+
+ const int NumUnmerge = Unmerge->getNumOperands() - 1;
+ const int PartsPerUnmerge = WideTy.getSizeInBits() / DstTy.getSizeInBits();
+
+ for (int I = 0; I != NumUnmerge; ++I) {
+ auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES);
+
+ for (int J = 0; J != PartsPerUnmerge; ++J) {
+ int Idx = I * PartsPerUnmerge + J;
+ if (Idx < NumDst)
+ MIB.addDef(MI.getOperand(Idx).getReg());
+ else {
+ // Create dead def for excess components.
+ MIB.addDef(MRI.createGenericVirtualRegister(DstTy));
+ }
+ }
+
+ MIB.addUse(Unmerge.getReg(I));
+ }
+
+ MI.eraseFromParent();
return Legalized;
}
@@ -1426,9 +1639,45 @@ LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
-LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
- MIRBuilder.setInstr(MI);
+LegalizerHelper::widenScalarAddSubSat(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy) {
+ bool IsSigned = MI.getOpcode() == TargetOpcode::G_SADDSAT ||
+ MI.getOpcode() == TargetOpcode::G_SSUBSAT;
+ // We can convert this to:
+ // 1. Any extend iN to iM
+ // 2. SHL by M-N
+ // 3. [US][ADD|SUB]SAT
+ // 4. L/ASHR by M-N
+ //
+ // It may be more efficient to lower this to a min and a max operation in
+ // the higher precision arithmetic if the promoted operation isn't legal,
+ // but this decision is up to the target's lowering request.
+ Register DstReg = MI.getOperand(0).getReg();
+
+ unsigned NewBits = WideTy.getScalarSizeInBits();
+ unsigned SHLAmount = NewBits - MRI.getType(DstReg).getScalarSizeInBits();
+
+ auto LHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(1));
+ auto RHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(2));
+ auto ShiftK = MIRBuilder.buildConstant(WideTy, SHLAmount);
+ auto ShiftL = MIRBuilder.buildShl(WideTy, LHS, ShiftK);
+ auto ShiftR = MIRBuilder.buildShl(WideTy, RHS, ShiftK);
+
+ auto WideInst = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy},
+ {ShiftL, ShiftR}, MI.getFlags());
+
+ // Use a shift that will preserve the number of sign bits when the trunc is
+ // folded away.
+ auto Result = IsSigned ? MIRBuilder.buildAShr(WideTy, WideInst, ShiftK)
+ : MIRBuilder.buildLShr(WideTy, WideInst, ShiftK);
+ MIRBuilder.buildTrunc(DstReg, Result);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
@@ -1444,28 +1693,30 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
case TargetOpcode::G_USUBO: {
if (TypeIdx == 1)
return UnableToLegalize; // TODO
- auto LHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
- {MI.getOperand(2).getReg()});
- auto RHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy},
- {MI.getOperand(3).getReg()});
+ auto LHSZext = MIRBuilder.buildZExt(WideTy, MI.getOperand(2));
+ auto RHSZext = MIRBuilder.buildZExt(WideTy, MI.getOperand(3));
unsigned Opcode = MI.getOpcode() == TargetOpcode::G_UADDO
? TargetOpcode::G_ADD
: TargetOpcode::G_SUB;
// Do the arithmetic in the larger type.
auto NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSZext, RHSZext});
LLT OrigTy = MRI.getType(MI.getOperand(0).getReg());
- APInt Mask = APInt::getAllOnesValue(OrigTy.getSizeInBits());
- auto AndOp = MIRBuilder.buildInstr(
- TargetOpcode::G_AND, {WideTy},
- {NewOp, MIRBuilder.buildConstant(WideTy, Mask.getZExtValue())});
+ APInt Mask =
+ APInt::getLowBitsSet(WideTy.getSizeInBits(), OrigTy.getSizeInBits());
+ auto AndOp = MIRBuilder.buildAnd(
+ WideTy, NewOp, MIRBuilder.buildConstant(WideTy, Mask));
// There is no overflow if the AndOp is the same as NewOp.
- MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1).getReg(), NewOp,
- AndOp);
+ MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1), NewOp, AndOp);
// Now trunc the NewOp to the original result.
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), NewOp);
+ MIRBuilder.buildTrunc(MI.getOperand(0), NewOp);
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_SADDSAT:
+ case TargetOpcode::G_SSUBSAT:
+ case TargetOpcode::G_UADDSAT:
+ case TargetOpcode::G_USUBSAT:
+ return widenScalarAddSubSat(MI, TypeIdx, WideTy);
case TargetOpcode::G_CTTZ:
case TargetOpcode::G_CTTZ_ZERO_UNDEF:
case TargetOpcode::G_CTLZ:
@@ -1500,9 +1751,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) {
// The correct result is NewOp - (Difference in widety and current ty).
unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits();
- MIBNewOp = MIRBuilder.buildInstr(
- TargetOpcode::G_SUB, {WideTy},
- {MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)});
+ MIBNewOp = MIRBuilder.buildSub(
+ WideTy, MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff));
}
MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp);
@@ -1525,10 +1775,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
LLT Ty = MRI.getType(DstReg);
unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits();
MIRBuilder.buildConstant(ShiftAmtReg, DiffBits);
- MIRBuilder.buildInstr(TargetOpcode::G_LSHR)
- .addDef(ShrReg)
- .addUse(DstExt)
- .addUse(ShiftAmtReg);
+ MIRBuilder.buildLShr(ShrReg, DstExt, ShiftAmtReg);
MIRBuilder.buildTrunc(DstReg, ShrReg);
Observer.changedInstr(MI);
@@ -1552,6 +1799,13 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
}
+ case TargetOpcode::G_FREEZE:
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ widenScalarDst(MI, WideTy);
+ Observer.changedInstr(MI);
+ return Legalized;
+
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
case TargetOpcode::G_MUL:
@@ -1844,9 +2098,10 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
// TODO: Probably should be zext
widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_SEXT);
Observer.changedInstr(MI);
+ return Legalized;
}
- return Legalized;
+ return UnableToLegalize;
}
case TargetOpcode::G_FADD:
case TargetOpcode::G_FMUL:
@@ -1932,29 +2187,162 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC);
Observer.changedInstr(MI);
return Legalized;
+ case TargetOpcode::G_PTRMASK: {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+ Observer.changingInstr(MI);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ }
+}
+
+static void getUnmergePieces(SmallVectorImpl<Register> &Pieces,
+ MachineIRBuilder &B, Register Src, LLT Ty) {
+ auto Unmerge = B.buildUnmerge(Ty, Src);
+ for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
+ Pieces.push_back(Unmerge.getReg(I));
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerBitcast(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+
+ if (SrcTy.isVector()) {
+ LLT SrcEltTy = SrcTy.getElementType();
+ SmallVector<Register, 8> SrcRegs;
+
+ if (DstTy.isVector()) {
+ int NumDstElt = DstTy.getNumElements();
+ int NumSrcElt = SrcTy.getNumElements();
+
+ LLT DstEltTy = DstTy.getElementType();
+ LLT DstCastTy = DstEltTy; // Intermediate bitcast result type
+ LLT SrcPartTy = SrcEltTy; // Original unmerge result type.
+
+ // If there's an element size mismatch, insert intermediate casts to match
+ // the result element type.
+ if (NumSrcElt < NumDstElt) { // Source element type is larger.
+ // %1:_(<4 x s8>) = G_BITCAST %0:_(<2 x s16>)
+ //
+ // =>
+ //
+ // %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0
+ // %3:_(<2 x s8>) = G_BITCAST %2
+ // %4:_(<2 x s8>) = G_BITCAST %3
+ // %1:_(<4 x s16>) = G_CONCAT_VECTORS %3, %4
+ DstCastTy = LLT::vector(NumDstElt / NumSrcElt, DstEltTy);
+ SrcPartTy = SrcEltTy;
+ } else if (NumSrcElt > NumDstElt) { // Source element type is smaller.
+ //
+ // %1:_(<2 x s16>) = G_BITCAST %0:_(<4 x s8>)
+ //
+ // =>
+ //
+ // %2:_(<2 x s8>), %3:_(<2 x s8>) = G_UNMERGE_VALUES %0
+ // %3:_(s16) = G_BITCAST %2
+ // %4:_(s16) = G_BITCAST %3
+ // %1:_(<2 x s16>) = G_BUILD_VECTOR %3, %4
+ SrcPartTy = LLT::vector(NumSrcElt / NumDstElt, SrcEltTy);
+ DstCastTy = DstEltTy;
+ }
+
+ getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcPartTy);
+ for (Register &SrcReg : SrcRegs)
+ SrcReg = MIRBuilder.buildBitcast(DstCastTy, SrcReg).getReg(0);
+ } else
+ getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcEltTy);
+
+ MIRBuilder.buildMerge(Dst, SrcRegs);
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ if (DstTy.isVector()) {
+ SmallVector<Register, 8> SrcRegs;
+ getUnmergePieces(SrcRegs, MIRBuilder, Src, DstTy.getElementType());
+ MIRBuilder.buildMerge(Dst, SrcRegs);
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::bitcast(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_LOAD: {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Observer.changingInstr(MI);
+ bitcastDst(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ case TargetOpcode::G_STORE: {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Observer.changingInstr(MI);
+ bitcastSrc(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ case TargetOpcode::G_SELECT: {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ if (MRI.getType(MI.getOperand(1).getReg()).isVector()) {
+ LLVM_DEBUG(
+ dbgs() << "bitcast action not implemented for vector select\n");
+ return UnableToLegalize;
+ }
+
+ Observer.changingInstr(MI);
+ bitcastSrc(MI, CastTy, 2);
+ bitcastSrc(MI, CastTy, 3);
+ bitcastDst(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ case TargetOpcode::G_AND:
+ case TargetOpcode::G_OR:
+ case TargetOpcode::G_XOR: {
+ Observer.changingInstr(MI);
+ bitcastSrc(MI, CastTy, 1);
+ bitcastSrc(MI, CastTy, 2);
+ bitcastDst(MI, CastTy, 0);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ default:
+ return UnableToLegalize;
}
}
LegalizerHelper::LegalizeResult
LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
using namespace TargetOpcode;
- MIRBuilder.setInstr(MI);
switch(MI.getOpcode()) {
default:
return UnableToLegalize;
+ case TargetOpcode::G_BITCAST:
+ return lowerBitcast(MI);
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM: {
- Register QuotReg = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
- .addDef(QuotReg)
- .addUse(MI.getOperand(1).getReg())
- .addUse(MI.getOperand(2).getReg());
-
- Register ProdReg = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
- MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
- ProdReg);
+ auto Quot =
+ MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, {Ty},
+ {MI.getOperand(1), MI.getOperand(2)});
+
+ auto Prod = MIRBuilder.buildMul(Ty, Quot, MI.getOperand(2));
+ MIRBuilder.buildSub(MI.getOperand(0), MI.getOperand(1), Prod);
MI.eraseFromParent();
return Legalized;
}
@@ -1970,36 +2358,30 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(2).getReg();
Register RHS = MI.getOperand(3).getReg();
- MIRBuilder.buildMul(Res, LHS, RHS);
-
unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
? TargetOpcode::G_SMULH
: TargetOpcode::G_UMULH;
- Register HiPart = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildInstr(Opcode)
- .addDef(HiPart)
- .addUse(LHS)
- .addUse(RHS);
+ Observer.changingInstr(MI);
+ const auto &TII = MIRBuilder.getTII();
+ MI.setDesc(TII.get(TargetOpcode::G_MUL));
+ MI.RemoveOperand(1);
+ Observer.changedInstr(MI);
- Register Zero = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildConstant(Zero, 0);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
+
+ auto HiPart = MIRBuilder.buildInstr(Opcode, {Ty}, {LHS, RHS});
+ auto Zero = MIRBuilder.buildConstant(Ty, 0);
// For *signed* multiply, overflow is detected by checking:
// (hi != (lo >> bitwidth-1))
if (Opcode == TargetOpcode::G_SMULH) {
- Register Shifted = MRI.createGenericVirtualRegister(Ty);
- Register ShiftAmt = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
- MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
- .addDef(Shifted)
- .addUse(Res)
- .addUse(ShiftAmt);
+ auto ShiftAmt = MIRBuilder.buildConstant(Ty, Ty.getSizeInBits() - 1);
+ auto Shifted = MIRBuilder.buildAShr(Ty, Res, ShiftAmt);
MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
} else {
MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
}
- MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FNEG: {
@@ -2008,31 +2390,16 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
if (Ty.isVector())
return UnableToLegalize;
Register Res = MI.getOperand(0).getReg();
- Type *ZeroTy;
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
- switch (Ty.getSizeInBits()) {
- case 16:
- ZeroTy = Type::getHalfTy(Ctx);
- break;
- case 32:
- ZeroTy = Type::getFloatTy(Ctx);
- break;
- case 64:
- ZeroTy = Type::getDoubleTy(Ctx);
- break;
- case 128:
- ZeroTy = Type::getFP128Ty(Ctx);
- break;
- default:
- llvm_unreachable("unexpected floating-point type");
- }
+ Type *ZeroTy = getFloatTypeForLLT(Ctx, Ty);
+ if (!ZeroTy)
+ return UnableToLegalize;
ConstantFP &ZeroForNegation =
*cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
Register SubByReg = MI.getOperand(1).getReg();
- Register ZeroReg = Zero->getOperand(0).getReg();
- MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg},
- MI.getFlags());
+ Register ZeroReg = Zero.getReg(0);
+ MIRBuilder.buildFSub(Res, ZeroReg, SubByReg, MI.getFlags());
MI.eraseFromParent();
return Legalized;
}
@@ -2046,13 +2413,15 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
Register Neg = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
- MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags());
+ MIRBuilder.buildFNeg(Neg, RHS);
+ MIRBuilder.buildFAdd(Res, LHS, Neg, MI.getFlags());
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FMAD:
return lowerFMad(MI);
+ case TargetOpcode::G_FFLOOR:
+ return lowerFFloor(MI);
case TargetOpcode::G_INTRINSIC_ROUND:
return lowerIntrinsicRound(MI);
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
@@ -2089,7 +2458,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// result values together, before truncating back down to the non-pow-2
// type.
// E.g. v1 = i24 load =>
- // v2 = i32 load (2 byte)
+ // v2 = i32 zextload (2 byte)
// v3 = i32 load (1 byte)
// v4 = i32 shl v3, 16
// v5 = i32 or v4, v2
@@ -2110,11 +2479,11 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
LLT AnyExtTy = LLT::scalar(AnyExtSize);
Register LargeLdReg = MRI.createGenericVirtualRegister(AnyExtTy);
Register SmallLdReg = MRI.createGenericVirtualRegister(AnyExtTy);
- auto LargeLoad =
- MIRBuilder.buildLoad(LargeLdReg, PtrReg, *LargeMMO);
+ auto LargeLoad = MIRBuilder.buildLoadInstr(
+ TargetOpcode::G_ZEXTLOAD, LargeLdReg, PtrReg, *LargeMMO);
- auto OffsetCst =
- MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
+ auto OffsetCst = MIRBuilder.buildConstant(
+ LLT::scalar(PtrTy.getSizeInBits()), LargeSplitSize / 8);
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr =
MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
@@ -2186,8 +2555,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// Generate the PtrAdd and truncating stores.
LLT PtrTy = MRI.getType(PtrReg);
- auto OffsetCst =
- MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
+ auto OffsetCst = MIRBuilder.buildConstant(
+ LLT::scalar(PtrTy.getSizeInBits()), LargeSplitSize / 8);
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr =
MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
@@ -2226,12 +2595,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(2).getReg();
Register RHS = MI.getOperand(3).getReg();
Register CarryIn = MI.getOperand(4).getReg();
+ LLT Ty = MRI.getType(Res);
- Register TmpRes = MRI.createGenericVirtualRegister(Ty);
- Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
-
- MIRBuilder.buildAdd(TmpRes, LHS, RHS);
- MIRBuilder.buildZExt(ZExtCarryIn, CarryIn);
+ auto TmpRes = MIRBuilder.buildAdd(Ty, LHS, RHS);
+ auto ZExtCarryIn = MIRBuilder.buildZExt(Ty, CarryIn);
MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn);
MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS);
@@ -2256,17 +2623,15 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register LHS = MI.getOperand(2).getReg();
Register RHS = MI.getOperand(3).getReg();
Register BorrowIn = MI.getOperand(4).getReg();
+ const LLT CondTy = MRI.getType(BorrowOut);
+ const LLT Ty = MRI.getType(Res);
- Register TmpRes = MRI.createGenericVirtualRegister(Ty);
- Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty);
- Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
- Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
-
- MIRBuilder.buildSub(TmpRes, LHS, RHS);
- MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn);
+ auto TmpRes = MIRBuilder.buildSub(Ty, LHS, RHS);
+ auto ZExtBorrowIn = MIRBuilder.buildZExt(Ty, BorrowIn);
MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn);
- MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LHS_EQ_RHS, LHS, RHS);
- MIRBuilder.buildICmp(CmpInst::ICMP_ULT, LHS_ULT_RHS, LHS, RHS);
+
+ auto LHS_EQ_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, LHS, RHS);
+ auto LHS_ULT_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CondTy, LHS, RHS);
MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS);
MI.eraseFromParent();
@@ -2278,6 +2643,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return lowerSITOFP(MI, TypeIdx, Ty);
case G_FPTOUI:
return lowerFPTOUI(MI, TypeIdx, Ty);
+ case G_FPTOSI:
+ return lowerFPTOSI(MI);
+ case G_FPTRUNC:
+ return lowerFPTRUNC(MI, TypeIdx, Ty);
case G_SMIN:
case G_SMAX:
case G_UMIN:
@@ -2288,6 +2657,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
case G_FMINNUM:
case G_FMAXNUM:
return lowerFMinNumMaxNum(MI);
+ case G_MERGE_VALUES:
+ return lowerMergeValues(MI);
case G_UNMERGE_VALUES:
return lowerUnmergeValues(MI);
case TargetOpcode::G_SEXT_INREG: {
@@ -2300,8 +2671,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register TmpRes = MRI.createGenericVirtualRegister(DstTy);
auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits);
- MIRBuilder.buildInstr(TargetOpcode::G_SHL, {TmpRes}, {SrcReg, MIBSz->getOperand(0).getReg()});
- MIRBuilder.buildInstr(TargetOpcode::G_ASHR, {DstReg}, {TmpRes, MIBSz->getOperand(0).getReg()});
+ MIRBuilder.buildShl(TmpRes, SrcReg, MIBSz->getOperand(0));
+ MIRBuilder.buildAShr(DstReg, TmpRes, MIBSz->getOperand(0));
MI.eraseFromParent();
return Legalized;
}
@@ -2318,7 +2689,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
case G_BITREVERSE:
return lowerBitreverse(MI);
case G_READ_REGISTER:
- return lowerReadRegister(MI);
+ case G_WRITE_REGISTER:
+ return lowerReadWriteRegister(MI);
}
}
@@ -2350,99 +2722,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
return Legalized;
}
-LegalizerHelper::LegalizeResult
-LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy) {
- const unsigned Opc = MI.getOpcode();
- const unsigned NumOps = MI.getNumOperands() - 1;
- const unsigned NarrowSize = NarrowTy.getSizeInBits();
- const Register DstReg = MI.getOperand(0).getReg();
- const unsigned Flags = MI.getFlags();
- const LLT DstTy = MRI.getType(DstReg);
- const unsigned Size = DstTy.getSizeInBits();
- const int NumParts = Size / NarrowSize;
- const LLT EltTy = DstTy.getElementType();
- const unsigned EltSize = EltTy.getSizeInBits();
- const unsigned BitsForNumParts = NarrowSize * NumParts;
-
- // Check if we have any leftovers. If we do, then only handle the case where
- // the leftover is one element.
- if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size)
- return UnableToLegalize;
-
- if (BitsForNumParts != Size) {
- Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy);
- MIRBuilder.buildUndef(AccumDstReg);
-
- // Handle the pieces which evenly divide into the requested type with
- // extract/op/insert sequence.
- for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) {
- SmallVector<SrcOp, 4> SrcOps;
- for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
- Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset);
- SrcOps.push_back(PartOpReg);
- }
-
- Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
-
- Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy);
- MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset);
- AccumDstReg = PartInsertReg;
- }
-
- // Handle the remaining element sized leftover piece.
- SmallVector<SrcOp, 4> SrcOps;
- for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
- Register PartOpReg = MRI.createGenericVirtualRegister(EltTy);
- MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(),
- BitsForNumParts);
- SrcOps.push_back(PartOpReg);
- }
-
- Register PartDstReg = MRI.createGenericVirtualRegister(EltTy);
- MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
- MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts);
- MI.eraseFromParent();
-
- return Legalized;
- }
-
- SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
-
- extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
-
- if (NumOps >= 2)
- extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src1Regs);
-
- if (NumOps >= 3)
- extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs);
-
- for (int i = 0; i < NumParts; ++i) {
- Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
-
- if (NumOps == 1)
- MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags);
- else if (NumOps == 2) {
- MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i], Src1Regs[i]}, Flags);
- } else if (NumOps == 3) {
- MIRBuilder.buildInstr(Opc, {DstReg},
- {Src0Regs[i], Src1Regs[i], Src2Regs[i]}, Flags);
- }
-
- DstRegs.push_back(DstReg);
- }
-
- if (NarrowTy.isVector())
- MIRBuilder.buildConcatVectors(DstReg, DstRegs);
- else
- MIRBuilder.buildBuildVector(DstReg, DstRegs);
-
- MI.eraseFromParent();
- return Legalized;
-}
-
// Handle splitting vector operations which need to have the same number of
// elements in each type index, but each type index may have a different element
// type.
@@ -2482,7 +2761,6 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
SmallVector<Register, 4> PartRegs, LeftoverRegs;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
- LLT LeftoverTy;
Register SrcReg = MI.getOperand(I).getReg();
LLT SrcTyI = MRI.getType(SrcReg);
LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType());
@@ -2571,9 +2849,8 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
for (unsigned I = 0; I < NumParts; ++I) {
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
- MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode())
- .addDef(DstReg)
- .addUse(SrcRegs[I]);
+ MachineInstr *NewInst =
+ MIRBuilder.buildInstr(MI.getOpcode(), {DstReg}, {SrcRegs[I]});
NewInst->setFlags(MI.getFlags());
DstRegs.push_back(DstReg);
@@ -2913,6 +3190,12 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
Register AddrReg = MI.getOperand(1).getReg();
LLT ValTy = MRI.getType(ValReg);
+ // FIXME: Do we need a distinct NarrowMemory legalize action?
+ if (ValTy.getSizeInBits() != 8 * MMO->getSize()) {
+ LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
+ return UnableToLegalize;
+ }
+
int NumParts = -1;
int NumLeftover = -1;
LLT LeftoverTy;
@@ -2981,14 +3264,147 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::reduceOperationWidth(MachineInstr &MI, unsigned int TypeIdx,
+ LLT NarrowTy) {
+ assert(TypeIdx == 0 && "only one type index expected");
+
+ const unsigned Opc = MI.getOpcode();
+ const int NumOps = MI.getNumOperands() - 1;
+ const Register DstReg = MI.getOperand(0).getReg();
+ const unsigned Flags = MI.getFlags();
+ const unsigned NarrowSize = NarrowTy.getSizeInBits();
+ const LLT NarrowScalarTy = LLT::scalar(NarrowSize);
+
+ assert(NumOps <= 3 && "expected instruction with 1 result and 1-3 sources");
+
+ // First of all check whether we are narrowing (changing the element type)
+ // or reducing the vector elements
+ const LLT DstTy = MRI.getType(DstReg);
+ const bool IsNarrow = NarrowTy.getScalarType() != DstTy.getScalarType();
+
+ SmallVector<Register, 8> ExtractedRegs[3];
+ SmallVector<Register, 8> Parts;
+
+ unsigned NarrowElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
+
+ // Break down all the sources into NarrowTy pieces we can operate on. This may
+ // involve creating merges to a wider type, padded with undef.
+ for (int I = 0; I != NumOps; ++I) {
+ Register SrcReg = MI.getOperand(I + 1).getReg();
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ // The type to narrow SrcReg to. For narrowing, this is a smaller scalar.
+ // For fewerElements, this is a smaller vector with the same element type.
+ LLT OpNarrowTy;
+ if (IsNarrow) {
+ OpNarrowTy = NarrowScalarTy;
+
+ // In case of narrowing, we need to cast vectors to scalars for this to
+ // work properly
+ // FIXME: Can we do without the bitcast here if we're narrowing?
+ if (SrcTy.isVector()) {
+ SrcTy = LLT::scalar(SrcTy.getSizeInBits());
+ SrcReg = MIRBuilder.buildBitcast(SrcTy, SrcReg).getReg(0);
+ }
+ } else {
+ OpNarrowTy = LLT::scalarOrVector(NarrowElts, SrcTy.getScalarType());
+ }
+
+ LLT GCDTy = extractGCDType(ExtractedRegs[I], SrcTy, OpNarrowTy, SrcReg);
+
+ // Build a sequence of NarrowTy pieces in ExtractedRegs for this operand.
+ buildLCMMergePieces(SrcTy, OpNarrowTy, GCDTy, ExtractedRegs[I],
+ TargetOpcode::G_ANYEXT);
+ }
+
+ SmallVector<Register, 8> ResultRegs;
+
+ // Input operands for each sub-instruction.
+ SmallVector<SrcOp, 4> InputRegs(NumOps, Register());
+
+ int NumParts = ExtractedRegs[0].size();
+ const unsigned DstSize = DstTy.getSizeInBits();
+ const LLT DstScalarTy = LLT::scalar(DstSize);
+
+ // Narrowing needs to use scalar types
+ LLT DstLCMTy, NarrowDstTy;
+ if (IsNarrow) {
+ DstLCMTy = getLCMType(DstScalarTy, NarrowScalarTy);
+ NarrowDstTy = NarrowScalarTy;
+ } else {
+ DstLCMTy = getLCMType(DstTy, NarrowTy);
+ NarrowDstTy = NarrowTy;
+ }
+
+ // We widened the source registers to satisfy merge/unmerge size
+ // constraints. We'll have some extra fully undef parts.
+ const int NumRealParts = (DstSize + NarrowSize - 1) / NarrowSize;
+
+ for (int I = 0; I != NumRealParts; ++I) {
+ // Emit this instruction on each of the split pieces.
+ for (int J = 0; J != NumOps; ++J)
+ InputRegs[J] = ExtractedRegs[J][I];
+
+ auto Inst = MIRBuilder.buildInstr(Opc, {NarrowDstTy}, InputRegs, Flags);
+ ResultRegs.push_back(Inst.getReg(0));
+ }
+
+ // Fill out the widened result with undef instead of creating instructions
+ // with undef inputs.
+ int NumUndefParts = NumParts - NumRealParts;
+ if (NumUndefParts != 0)
+ ResultRegs.append(NumUndefParts,
+ MIRBuilder.buildUndef(NarrowDstTy).getReg(0));
+
+ // Extract the possibly padded result. Use a scratch register if we need to do
+ // a final bitcast, otherwise use the original result register.
+ Register MergeDstReg;
+ if (IsNarrow && DstTy.isVector())
+ MergeDstReg = MRI.createGenericVirtualRegister(DstScalarTy);
+ else
+ MergeDstReg = DstReg;
+
+ buildWidenedRemergeToDst(MergeDstReg, DstLCMTy, ResultRegs);
+
+ // Recast to vector if we narrowed a vector
+ if (IsNarrow && DstTy.isVector())
+ MIRBuilder.buildBitcast(DstReg, MergeDstReg);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ int64_t Imm = MI.getOperand(2).getImm();
+
+ LLT DstTy = MRI.getType(DstReg);
+
+ SmallVector<Register, 8> Parts;
+ LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
+ LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts);
+
+ for (Register &R : Parts)
+ R = MIRBuilder.buildSExtInReg(NarrowTy, R, Imm).getReg(0);
+
+ buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
using namespace TargetOpcode;
- MIRBuilder.setInstr(MI);
switch (MI.getOpcode()) {
case G_IMPLICIT_DEF:
return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy);
+ case G_TRUNC:
case G_AND:
case G_OR:
case G_XOR:
@@ -3038,7 +3454,14 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_FMAXNUM_IEEE:
case G_FMINIMUM:
case G_FMAXIMUM:
- return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy);
+ case G_FSHL:
+ case G_FSHR:
+ case G_FREEZE:
+ case G_SADDSAT:
+ case G_SSUBSAT:
+ case G_UADDSAT:
+ case G_USUBSAT:
+ return reduceOperationWidth(MI, TypeIdx, NarrowTy);
case G_SHL:
case G_LSHR:
case G_ASHR:
@@ -3076,6 +3499,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_LOAD:
case G_STORE:
return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
+ case G_SEXT_INREG:
+ return fewerElementsVectorSextInReg(MI, TypeIdx, NarrowTy);
default:
return UnableToLegalize;
}
@@ -3087,10 +3512,10 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
Register InL = MRI.createGenericVirtualRegister(HalfTy);
Register InH = MRI.createGenericVirtualRegister(HalfTy);
- MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
+ MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
if (Amt.isNullValue()) {
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH});
+ MIRBuilder.buildMerge(MI.getOperand(0), {InL, InH});
MI.eraseFromParent();
return Legalized;
}
@@ -3163,7 +3588,7 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
}
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()});
+ MIRBuilder.buildMerge(MI.getOperand(0), {Lo, Hi});
MI.eraseFromParent();
return Legalized;
@@ -3211,7 +3636,7 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
Register InL = MRI.createGenericVirtualRegister(HalfTy);
Register InH = MRI.createGenericVirtualRegister(HalfTy);
- MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
+ MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
@@ -3302,7 +3727,6 @@ LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
LegalizerHelper::LegalizeResult
LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
LLT MoreTy) {
- MIRBuilder.setInstr(MI);
unsigned Opc = MI.getOpcode();
switch (Opc) {
case TargetOpcode::G_IMPLICIT_DEF:
@@ -3349,6 +3773,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
Observer.changedInstr(MI);
return Legalized;
case TargetOpcode::G_INSERT:
+ case TargetOpcode::G_FREEZE:
if (TypeIdx != 0)
return UnableToLegalize;
Observer.changingInstr(MI);
@@ -3479,10 +3904,10 @@ LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
- SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
+ SmallVector<Register, 2> Src1Parts, Src2Parts;
+ SmallVector<Register, 2> DstTmpRegs(DstTmpParts);
extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
- DstTmpRegs.resize(DstTmpParts);
multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
// Take only high half of registers if this is high mul.
@@ -3550,10 +3975,12 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
}
Register DstReg = MI.getOperand(0).getReg();
- if(MRI.getType(DstReg).isVector())
+ if (MRI.getType(DstReg).isVector())
MIRBuilder.buildBuildVector(DstReg, DstRegs);
- else
+ else if (DstRegs.size() > 1)
MIRBuilder.buildMerge(DstReg, DstRegs);
+ else
+ MIRBuilder.buildCopy(DstReg, DstRegs[0]);
MI.eraseFromParent();
return Legalized;
}
@@ -3657,14 +4084,14 @@ LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy},
{Src0Regs[I], Src1Regs[I]});
- DstRegs.push_back(Inst->getOperand(0).getReg());
+ DstRegs.push_back(Inst.getReg(0));
}
for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
auto Inst = MIRBuilder.buildInstr(
MI.getOpcode(),
{LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]});
- DstLeftoverRegs.push_back(Inst->getOperand(0).getReg());
+ DstLeftoverRegs.push_back(Inst.getReg(0));
}
insertParts(DstReg, DstTy, NarrowTy, DstRegs,
@@ -3675,6 +4102,28 @@ LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarExt(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+
+ LLT DstTy = MRI.getType(DstReg);
+ if (DstTy.isVector())
+ return UnableToLegalize;
+
+ SmallVector<Register, 8> Parts;
+ LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg);
+ LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, MI.getOpcode());
+ buildWidenedRemergeToDst(DstReg, LCMTy, Parts);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
if (TypeIdx != 0)
@@ -3704,13 +4153,13 @@ LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) {
auto Select = MIRBuilder.buildSelect(NarrowTy,
CondReg, Src1Regs[I], Src2Regs[I]);
- DstRegs.push_back(Select->getOperand(0).getReg());
+ DstRegs.push_back(Select.getReg(0));
}
for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) {
auto Select = MIRBuilder.buildSelect(
LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]);
- DstLeftoverRegs.push_back(Select->getOperand(0).getReg());
+ DstLeftoverRegs.push_back(Select.getReg(0));
}
insertParts(DstReg, DstTy, NarrowTy, DstRegs,
@@ -3721,6 +4170,103 @@ LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned NarrowSize = NarrowTy.getSizeInBits();
+
+ if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
+ const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF;
+
+ MachineIRBuilder &B = MIRBuilder;
+ auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
+ // ctlz(Hi:Lo) -> Hi == 0 ? (NarrowSize + ctlz(Lo)) : ctlz(Hi)
+ auto C_0 = B.buildConstant(NarrowTy, 0);
+ auto HiIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
+ UnmergeSrc.getReg(1), C_0);
+ auto LoCTLZ = IsUndef ?
+ B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0)) :
+ B.buildCTLZ(DstTy, UnmergeSrc.getReg(0));
+ auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize);
+ auto HiIsZeroCTLZ = B.buildAdd(DstTy, LoCTLZ, C_NarrowSize);
+ auto HiCTLZ = B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1));
+ B.buildSelect(DstReg, HiIsZero, HiIsZeroCTLZ, HiCTLZ);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned NarrowSize = NarrowTy.getSizeInBits();
+
+ if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
+ const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTTZ_ZERO_UNDEF;
+
+ MachineIRBuilder &B = MIRBuilder;
+ auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg);
+ // cttz(Hi:Lo) -> Lo == 0 ? (cttz(Hi) + NarrowSize) : cttz(Lo)
+ auto C_0 = B.buildConstant(NarrowTy, 0);
+ auto LoIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
+ UnmergeSrc.getReg(0), C_0);
+ auto HiCTTZ = IsUndef ?
+ B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1)) :
+ B.buildCTTZ(DstTy, UnmergeSrc.getReg(1));
+ auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize);
+ auto LoIsZeroCTTZ = B.buildAdd(DstTy, HiCTTZ, C_NarrowSize);
+ auto LoCTTZ = B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0));
+ B.buildSelect(DstReg, LoIsZero, LoIsZeroCTTZ, LoCTTZ);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy) {
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
+ unsigned NarrowSize = NarrowTy.getSizeInBits();
+
+ if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
+ auto UnmergeSrc = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1));
+
+ auto LoCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(0));
+ auto HiCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(1));
+ MIRBuilder.buildAdd(DstReg, HiCTPOP, LoCTPOP);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
unsigned Opc = MI.getOpcode();
auto &TII = *MI.getMF()->getSubtarget().getInstrInfo();
@@ -3739,18 +4285,20 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return Legalized;
}
case TargetOpcode::G_CTLZ: {
+ Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
- unsigned Len = Ty.getSizeInBits();
- if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) {
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ unsigned Len = SrcTy.getSizeInBits();
+
+ if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {DstTy, SrcTy}})) {
// If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero.
- auto MIBCtlzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF,
- {Ty}, {SrcReg});
- auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
- auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
- auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
- SrcReg, MIBZero);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
- MIBCtlzZU);
+ auto CtlzZU = MIRBuilder.buildCTLZ_ZERO_UNDEF(DstTy, SrcReg);
+ auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0);
+ auto ICmp = MIRBuilder.buildICmp(
+ CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc);
+ auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
+ MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CtlzZU);
MI.eraseFromParent();
return Legalized;
}
@@ -3768,16 +4316,14 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
Register Op = SrcReg;
unsigned NewLen = PowerOf2Ceil(Len);
for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) {
- auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i);
- auto MIBOp = MIRBuilder.buildInstr(
- TargetOpcode::G_OR, {Ty},
- {Op, MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {Ty},
- {Op, MIBShiftAmt})});
- Op = MIBOp->getOperand(0).getReg();
+ auto MIBShiftAmt = MIRBuilder.buildConstant(SrcTy, 1ULL << i);
+ auto MIBOp = MIRBuilder.buildOr(
+ SrcTy, Op, MIRBuilder.buildLShr(SrcTy, Op, MIBShiftAmt));
+ Op = MIBOp.getReg(0);
}
- auto MIBPop = MIRBuilder.buildInstr(TargetOpcode::G_CTPOP, {Ty}, {Op});
- MIRBuilder.buildInstr(TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
- {MIRBuilder.buildConstant(Ty, Len), MIBPop});
+ auto MIBPop = MIRBuilder.buildCTPOP(DstTy, Op);
+ MIRBuilder.buildSub(MI.getOperand(0), MIRBuilder.buildConstant(DstTy, Len),
+ MIBPop);
MI.eraseFromParent();
return Legalized;
}
@@ -3789,19 +4335,21 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return Legalized;
}
case TargetOpcode::G_CTTZ: {
+ Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
- unsigned Len = Ty.getSizeInBits();
- if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) {
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+
+ unsigned Len = SrcTy.getSizeInBits();
+ if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {DstTy, SrcTy}})) {
// If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with
// zero.
- auto MIBCttzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF,
- {Ty}, {SrcReg});
- auto MIBZero = MIRBuilder.buildConstant(Ty, 0);
- auto MIBLen = MIRBuilder.buildConstant(Ty, Len);
- auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1),
- SrcReg, MIBZero);
- MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen,
- MIBCttzZU);
+ auto CttzZU = MIRBuilder.buildCTTZ_ZERO_UNDEF(DstTy, SrcReg);
+ auto Zero = MIRBuilder.buildConstant(SrcTy, 0);
+ auto ICmp = MIRBuilder.buildICmp(
+ CmpInst::ICMP_EQ, DstTy.changeElementSize(1), SrcReg, Zero);
+ auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
+ MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CttzZU);
MI.eraseFromParent();
return Legalized;
}
@@ -3810,24 +4358,70 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// { return 32 - nlz(~x & (x-1)); }
// Ref: "Hacker's Delight" by Henry Warren
auto MIBCstNeg1 = MIRBuilder.buildConstant(Ty, -1);
- auto MIBNot =
- MIRBuilder.buildInstr(TargetOpcode::G_XOR, {Ty}, {SrcReg, MIBCstNeg1});
- auto MIBTmp = MIRBuilder.buildInstr(
- TargetOpcode::G_AND, {Ty},
- {MIBNot, MIRBuilder.buildInstr(TargetOpcode::G_ADD, {Ty},
- {SrcReg, MIBCstNeg1})});
+ auto MIBNot = MIRBuilder.buildXor(Ty, SrcReg, MIBCstNeg1);
+ auto MIBTmp = MIRBuilder.buildAnd(
+ Ty, MIBNot, MIRBuilder.buildAdd(Ty, SrcReg, MIBCstNeg1));
if (!isSupported({TargetOpcode::G_CTPOP, {Ty, Ty}}) &&
isSupported({TargetOpcode::G_CTLZ, {Ty, Ty}})) {
auto MIBCstLen = MIRBuilder.buildConstant(Ty, Len);
- MIRBuilder.buildInstr(
- TargetOpcode::G_SUB, {MI.getOperand(0).getReg()},
- {MIBCstLen,
- MIRBuilder.buildInstr(TargetOpcode::G_CTLZ, {Ty}, {MIBTmp})});
+ MIRBuilder.buildSub(MI.getOperand(0), MIBCstLen,
+ MIRBuilder.buildCTLZ(Ty, MIBTmp));
MI.eraseFromParent();
return Legalized;
}
MI.setDesc(TII.get(TargetOpcode::G_CTPOP));
- MI.getOperand(1).setReg(MIBTmp->getOperand(0).getReg());
+ MI.getOperand(1).setReg(MIBTmp.getReg(0));
+ return Legalized;
+ }
+ case TargetOpcode::G_CTPOP: {
+ unsigned Size = Ty.getSizeInBits();
+ MachineIRBuilder &B = MIRBuilder;
+
+ // Count set bits in blocks of 2 bits. Default approach would be
+ // B2Count = { val & 0x55555555 } + { (val >> 1) & 0x55555555 }
+ // We use following formula instead:
+ // B2Count = val - { (val >> 1) & 0x55555555 }
+ // since it gives same result in blocks of 2 with one instruction less.
+ auto C_1 = B.buildConstant(Ty, 1);
+ auto B2Set1LoTo1Hi = B.buildLShr(Ty, MI.getOperand(1).getReg(), C_1);
+ APInt B2Mask1HiTo0 = APInt::getSplat(Size, APInt(8, 0x55));
+ auto C_B2Mask1HiTo0 = B.buildConstant(Ty, B2Mask1HiTo0);
+ auto B2Count1Hi = B.buildAnd(Ty, B2Set1LoTo1Hi, C_B2Mask1HiTo0);
+ auto B2Count = B.buildSub(Ty, MI.getOperand(1).getReg(), B2Count1Hi);
+
+ // In order to get count in blocks of 4 add values from adjacent block of 2.
+ // B4Count = { B2Count & 0x33333333 } + { (B2Count >> 2) & 0x33333333 }
+ auto C_2 = B.buildConstant(Ty, 2);
+ auto B4Set2LoTo2Hi = B.buildLShr(Ty, B2Count, C_2);
+ APInt B4Mask2HiTo0 = APInt::getSplat(Size, APInt(8, 0x33));
+ auto C_B4Mask2HiTo0 = B.buildConstant(Ty, B4Mask2HiTo0);
+ auto B4HiB2Count = B.buildAnd(Ty, B4Set2LoTo2Hi, C_B4Mask2HiTo0);
+ auto B4LoB2Count = B.buildAnd(Ty, B2Count, C_B4Mask2HiTo0);
+ auto B4Count = B.buildAdd(Ty, B4HiB2Count, B4LoB2Count);
+
+ // For count in blocks of 8 bits we don't have to mask high 4 bits before
+ // addition since count value sits in range {0,...,8} and 4 bits are enough
+ // to hold such binary values. After addition high 4 bits still hold count
+ // of set bits in high 4 bit block, set them to zero and get 8 bit result.
+ // B8Count = { B4Count + (B4Count >> 4) } & 0x0F0F0F0F
+ auto C_4 = B.buildConstant(Ty, 4);
+ auto B8HiB4Count = B.buildLShr(Ty, B4Count, C_4);
+ auto B8CountDirty4Hi = B.buildAdd(Ty, B8HiB4Count, B4Count);
+ APInt B8Mask4HiTo0 = APInt::getSplat(Size, APInt(8, 0x0F));
+ auto C_B8Mask4HiTo0 = B.buildConstant(Ty, B8Mask4HiTo0);
+ auto B8Count = B.buildAnd(Ty, B8CountDirty4Hi, C_B8Mask4HiTo0);
+
+ assert(Size<=128 && "Scalar size is too large for CTPOP lower algorithm");
+ // 8 bits can hold CTPOP result of 128 bit int or smaller. Mul with this
+ // bitmask will set 8 msb in ResTmp to sum of all B8Counts in 8 bit blocks.
+ auto MulMask = B.buildConstant(Ty, APInt::getSplat(Size, APInt(8, 0x01)));
+ auto ResTmp = B.buildMul(Ty, B8Count, MulMask);
+
+ // Shift count result from 8 high bits to low bits.
+ auto C_SizeM8 = B.buildConstant(Ty, Size - 8);
+ B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
+
+ MI.eraseFromParent();
return Legalized;
}
}
@@ -3888,6 +4482,7 @@ LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0);
MIRBuilder.buildAdd(Dst, V, R);
+ MI.eraseFromParent();
return Legalized;
}
@@ -3960,6 +4555,7 @@ LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S,
MIRBuilder.buildConstant(S64, 0));
MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R);
+ MI.eraseFromParent();
return Legalized;
}
@@ -4010,6 +4606,195 @@ LegalizerHelper::lowerFPTOUI(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
return Legalized;
}
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOSI(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+ const LLT S64 = LLT::scalar(64);
+ const LLT S32 = LLT::scalar(32);
+
+ // FIXME: Only f32 to i64 conversions are supported.
+ if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64)
+ return UnableToLegalize;
+
+ // Expand f32 -> i64 conversion
+ // This algorithm comes from compiler-rt's implementation of fixsfdi:
+ // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c
+
+ unsigned SrcEltBits = SrcTy.getScalarSizeInBits();
+
+ auto ExponentMask = MIRBuilder.buildConstant(SrcTy, 0x7F800000);
+ auto ExponentLoBit = MIRBuilder.buildConstant(SrcTy, 23);
+
+ auto AndExpMask = MIRBuilder.buildAnd(SrcTy, Src, ExponentMask);
+ auto ExponentBits = MIRBuilder.buildLShr(SrcTy, AndExpMask, ExponentLoBit);
+
+ auto SignMask = MIRBuilder.buildConstant(SrcTy,
+ APInt::getSignMask(SrcEltBits));
+ auto AndSignMask = MIRBuilder.buildAnd(SrcTy, Src, SignMask);
+ auto SignLowBit = MIRBuilder.buildConstant(SrcTy, SrcEltBits - 1);
+ auto Sign = MIRBuilder.buildAShr(SrcTy, AndSignMask, SignLowBit);
+ Sign = MIRBuilder.buildSExt(DstTy, Sign);
+
+ auto MantissaMask = MIRBuilder.buildConstant(SrcTy, 0x007FFFFF);
+ auto AndMantissaMask = MIRBuilder.buildAnd(SrcTy, Src, MantissaMask);
+ auto K = MIRBuilder.buildConstant(SrcTy, 0x00800000);
+
+ auto R = MIRBuilder.buildOr(SrcTy, AndMantissaMask, K);
+ R = MIRBuilder.buildZExt(DstTy, R);
+
+ auto Bias = MIRBuilder.buildConstant(SrcTy, 127);
+ auto Exponent = MIRBuilder.buildSub(SrcTy, ExponentBits, Bias);
+ auto SubExponent = MIRBuilder.buildSub(SrcTy, Exponent, ExponentLoBit);
+ auto ExponentSub = MIRBuilder.buildSub(SrcTy, ExponentLoBit, Exponent);
+
+ auto Shl = MIRBuilder.buildShl(DstTy, R, SubExponent);
+ auto Srl = MIRBuilder.buildLShr(DstTy, R, ExponentSub);
+
+ const LLT S1 = LLT::scalar(1);
+ auto CmpGt = MIRBuilder.buildICmp(CmpInst::ICMP_SGT,
+ S1, Exponent, ExponentLoBit);
+
+ R = MIRBuilder.buildSelect(DstTy, CmpGt, Shl, Srl);
+
+ auto XorSign = MIRBuilder.buildXor(DstTy, R, Sign);
+ auto Ret = MIRBuilder.buildSub(DstTy, XorSign, Sign);
+
+ auto ZeroSrcTy = MIRBuilder.buildConstant(SrcTy, 0);
+
+ auto ExponentLt0 = MIRBuilder.buildICmp(CmpInst::ICMP_SLT,
+ S1, Exponent, ZeroSrcTy);
+
+ auto ZeroDstTy = MIRBuilder.buildConstant(DstTy, 0);
+ MIRBuilder.buildSelect(Dst, ExponentLt0, ZeroDstTy, Ret);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+// f64 -> f16 conversion using round-to-nearest-even rounding mode.
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+
+ if (MRI.getType(Src).isVector()) // TODO: Handle vectors directly.
+ return UnableToLegalize;
+
+ const unsigned ExpMask = 0x7ff;
+ const unsigned ExpBiasf64 = 1023;
+ const unsigned ExpBiasf16 = 15;
+ const LLT S32 = LLT::scalar(32);
+ const LLT S1 = LLT::scalar(1);
+
+ auto Unmerge = MIRBuilder.buildUnmerge(S32, Src);
+ Register U = Unmerge.getReg(0);
+ Register UH = Unmerge.getReg(1);
+
+ auto E = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 20));
+ E = MIRBuilder.buildAnd(S32, E, MIRBuilder.buildConstant(S32, ExpMask));
+
+ // Subtract the fp64 exponent bias (1023) to get the real exponent and
+ // add the f16 bias (15) to get the biased exponent for the f16 format.
+ E = MIRBuilder.buildAdd(
+ S32, E, MIRBuilder.buildConstant(S32, -ExpBiasf64 + ExpBiasf16));
+
+ auto M = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 8));
+ M = MIRBuilder.buildAnd(S32, M, MIRBuilder.buildConstant(S32, 0xffe));
+
+ auto MaskedSig = MIRBuilder.buildAnd(S32, UH,
+ MIRBuilder.buildConstant(S32, 0x1ff));
+ MaskedSig = MIRBuilder.buildOr(S32, MaskedSig, U);
+
+ auto Zero = MIRBuilder.buildConstant(S32, 0);
+ auto SigCmpNE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, MaskedSig, Zero);
+ auto Lo40Set = MIRBuilder.buildZExt(S32, SigCmpNE0);
+ M = MIRBuilder.buildOr(S32, M, Lo40Set);
+
+ // (M != 0 ? 0x0200 : 0) | 0x7c00;
+ auto Bits0x200 = MIRBuilder.buildConstant(S32, 0x0200);
+ auto CmpM_NE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, M, Zero);
+ auto SelectCC = MIRBuilder.buildSelect(S32, CmpM_NE0, Bits0x200, Zero);
+
+ auto Bits0x7c00 = MIRBuilder.buildConstant(S32, 0x7c00);
+ auto I = MIRBuilder.buildOr(S32, SelectCC, Bits0x7c00);
+
+ // N = M | (E << 12);
+ auto EShl12 = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 12));
+ auto N = MIRBuilder.buildOr(S32, M, EShl12);
+
+ // B = clamp(1-E, 0, 13);
+ auto One = MIRBuilder.buildConstant(S32, 1);
+ auto OneSubExp = MIRBuilder.buildSub(S32, One, E);
+ auto B = MIRBuilder.buildSMax(S32, OneSubExp, Zero);
+ B = MIRBuilder.buildSMin(S32, B, MIRBuilder.buildConstant(S32, 13));
+
+ auto SigSetHigh = MIRBuilder.buildOr(S32, M,
+ MIRBuilder.buildConstant(S32, 0x1000));
+
+ auto D = MIRBuilder.buildLShr(S32, SigSetHigh, B);
+ auto D0 = MIRBuilder.buildShl(S32, D, B);
+
+ auto D0_NE_SigSetHigh = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1,
+ D0, SigSetHigh);
+ auto D1 = MIRBuilder.buildZExt(S32, D0_NE_SigSetHigh);
+ D = MIRBuilder.buildOr(S32, D, D1);
+
+ auto CmpELtOne = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, S1, E, One);
+ auto V = MIRBuilder.buildSelect(S32, CmpELtOne, D, N);
+
+ auto VLow3 = MIRBuilder.buildAnd(S32, V, MIRBuilder.buildConstant(S32, 7));
+ V = MIRBuilder.buildLShr(S32, V, MIRBuilder.buildConstant(S32, 2));
+
+ auto VLow3Eq3 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, VLow3,
+ MIRBuilder.buildConstant(S32, 3));
+ auto V0 = MIRBuilder.buildZExt(S32, VLow3Eq3);
+
+ auto VLow3Gt5 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, VLow3,
+ MIRBuilder.buildConstant(S32, 5));
+ auto V1 = MIRBuilder.buildZExt(S32, VLow3Gt5);
+
+ V1 = MIRBuilder.buildOr(S32, V0, V1);
+ V = MIRBuilder.buildAdd(S32, V, V1);
+
+ auto CmpEGt30 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1,
+ E, MIRBuilder.buildConstant(S32, 30));
+ V = MIRBuilder.buildSelect(S32, CmpEGt30,
+ MIRBuilder.buildConstant(S32, 0x7c00), V);
+
+ auto CmpEGt1039 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1,
+ E, MIRBuilder.buildConstant(S32, 1039));
+ V = MIRBuilder.buildSelect(S32, CmpEGt1039, I, V);
+
+ // Extract the sign bit.
+ auto Sign = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 16));
+ Sign = MIRBuilder.buildAnd(S32, Sign, MIRBuilder.buildConstant(S32, 0x8000));
+
+ // Insert the sign bit
+ V = MIRBuilder.buildOr(S32, Sign, V);
+
+ MIRBuilder.buildTrunc(Dst, V);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFPTRUNC(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+ const LLT S64 = LLT::scalar(64);
+ const LLT S16 = LLT::scalar(16);
+
+ if (DstTy.getScalarType() == S16 && SrcTy.getScalarType() == S64)
+ return lowerFPTRUNC_F64_TO_F16(MI);
+
+ return UnableToLegalize;
+}
+
static CmpInst::Predicate minMaxToCompare(unsigned Opc) {
switch (Opc) {
case TargetOpcode::G_SMIN:
@@ -4063,7 +4848,7 @@ LegalizerHelper::lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
MachineInstr *Or;
if (Src0Ty == Src1Ty) {
- auto And1 = MIRBuilder.buildAnd(Src1Ty, Src0, SignBitMask);
+ auto And1 = MIRBuilder.buildAnd(Src1Ty, Src1, SignBitMask);
Or = MIRBuilder.buildOr(Dst, And0, And1);
} else if (Src0Size > Src1Size) {
auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size);
@@ -4136,6 +4921,39 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) {
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
Register DstReg = MI.getOperand(0).getReg();
+ Register X = MI.getOperand(1).getReg();
+ const unsigned Flags = MI.getFlags();
+ const LLT Ty = MRI.getType(DstReg);
+ const LLT CondTy = Ty.changeElementSize(1);
+
+ // round(x) =>
+ // t = trunc(x);
+ // d = fabs(x - t);
+ // o = copysign(1.0f, x);
+ // return t + (d >= 0.5 ? o : 0.0);
+
+ auto T = MIRBuilder.buildIntrinsicTrunc(Ty, X, Flags);
+
+ auto Diff = MIRBuilder.buildFSub(Ty, X, T, Flags);
+ auto AbsDiff = MIRBuilder.buildFAbs(Ty, Diff, Flags);
+ auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
+ auto One = MIRBuilder.buildFConstant(Ty, 1.0);
+ auto Half = MIRBuilder.buildFConstant(Ty, 0.5);
+ auto SignOne = MIRBuilder.buildFCopysign(Ty, One, X);
+
+ auto Cmp = MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half,
+ Flags);
+ auto Sel = MIRBuilder.buildSelect(Ty, Cmp, SignOne, Zero, Flags);
+
+ MIRBuilder.buildFAdd(DstReg, T, Sel, Flags);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFFloor(MachineInstr &MI) {
+ Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
unsigned Flags = MI.getFlags();
LLT Ty = MRI.getType(DstReg);
@@ -4145,8 +4963,8 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
// if (src < 0.0 && src != result)
// result += -1.0.
- auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
auto Trunc = MIRBuilder.buildIntrinsicTrunc(Ty, SrcReg, Flags);
+ auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
auto Lt0 = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, CondTy,
SrcReg, Zero, Flags);
@@ -4155,7 +4973,48 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
auto And = MIRBuilder.buildAnd(CondTy, Lt0, NeTrunc);
auto AddVal = MIRBuilder.buildSITOFP(Ty, And);
- MIRBuilder.buildFAdd(DstReg, Trunc, AddVal);
+ MIRBuilder.buildFAdd(DstReg, Trunc, AddVal, Flags);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerMergeValues(MachineInstr &MI) {
+ const unsigned NumOps = MI.getNumOperands();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register Src0Reg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(Src0Reg);
+ unsigned PartSize = SrcTy.getSizeInBits();
+
+ LLT WideTy = LLT::scalar(DstTy.getSizeInBits());
+ Register ResultReg = MIRBuilder.buildZExt(WideTy, Src0Reg).getReg(0);
+
+ for (unsigned I = 2; I != NumOps; ++I) {
+ const unsigned Offset = (I - 1) * PartSize;
+
+ Register SrcReg = MI.getOperand(I).getReg();
+ auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg);
+
+ Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg :
+ MRI.createGenericVirtualRegister(WideTy);
+
+ auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset);
+ auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt);
+ MIRBuilder.buildOr(NextResult, ResultReg, Shl);
+ ResultReg = NextResult;
+ }
+
+ if (DstTy.isPointer()) {
+ if (MIRBuilder.getDataLayout().isNonIntegralAddressSpace(
+ DstTy.getAddressSpace())) {
+ LLVM_DEBUG(dbgs() << "Not casting nonintegral address space\n");
+ return UnableToLegalize;
+ }
+
+ MIRBuilder.buildIntToPtr(DstReg, ResultReg);
+ }
+
MI.eraseFromParent();
return Legalized;
}
@@ -4163,34 +5022,31 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
const unsigned NumDst = MI.getNumOperands() - 1;
- const Register SrcReg = MI.getOperand(NumDst).getReg();
- LLT SrcTy = MRI.getType(SrcReg);
-
+ Register SrcReg = MI.getOperand(NumDst).getReg();
Register Dst0Reg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst0Reg);
+ if (DstTy.isPointer())
+ return UnableToLegalize; // TODO
+ SrcReg = coerceToScalar(SrcReg);
+ if (!SrcReg)
+ return UnableToLegalize;
// Expand scalarizing unmerge as bitcast to integer and shift.
- if (!DstTy.isVector() && SrcTy.isVector() &&
- SrcTy.getElementType() == DstTy) {
- LLT IntTy = LLT::scalar(SrcTy.getSizeInBits());
- Register Cast = MIRBuilder.buildBitcast(IntTy, SrcReg).getReg(0);
-
- MIRBuilder.buildTrunc(Dst0Reg, Cast);
-
- const unsigned DstSize = DstTy.getSizeInBits();
- unsigned Offset = DstSize;
- for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
- auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
- auto Shift = MIRBuilder.buildLShr(IntTy, Cast, ShiftAmt);
- MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
- }
+ LLT IntTy = MRI.getType(SrcReg);
- MI.eraseFromParent();
- return Legalized;
+ MIRBuilder.buildTrunc(Dst0Reg, SrcReg);
+
+ const unsigned DstSize = DstTy.getSizeInBits();
+ unsigned Offset = DstSize;
+ for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) {
+ auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset);
+ auto Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt);
+ MIRBuilder.buildTrunc(MI.getOperand(I), Shift);
}
- return UnableToLegalize;
+ MI.eraseFromParent();
+ return Legalized;
}
LegalizerHelper::LegalizeResult
@@ -4251,16 +5107,19 @@ LegalizerHelper::lowerShuffleVector(MachineInstr &MI) {
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
+ const auto &MF = *MI.getMF();
+ const auto &TFI = *MF.getSubtarget().getFrameLowering();
+ if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp)
+ return UnableToLegalize;
+
Register Dst = MI.getOperand(0).getReg();
Register AllocSize = MI.getOperand(1).getReg();
- unsigned Align = MI.getOperand(2).getImm();
-
- const auto &MF = *MI.getMF();
- const auto &TLI = *MF.getSubtarget().getTargetLowering();
+ Align Alignment = assumeAligned(MI.getOperand(2).getImm());
LLT PtrTy = MRI.getType(Dst);
LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
+ const auto &TLI = *MF.getSubtarget().getTargetLowering();
Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg);
SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp);
@@ -4269,8 +5128,8 @@ LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
// have to generate an extra instruction to negate the alloc and then use
// G_PTR_ADD to add the negative offset.
auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize);
- if (Align) {
- APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true);
+ if (Alignment > Align(1)) {
+ APInt AlignMask(IntPtrTy.getSizeInBits(), Alignment.value(), true);
AlignMask.negate();
auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask);
Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst);
@@ -4326,34 +5185,47 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerInsert(MachineInstr &MI) {
LLT DstTy = MRI.getType(Src);
LLT InsertTy = MRI.getType(InsertSrc);
- if (InsertTy.isScalar() &&
- (DstTy.isScalar() ||
- (DstTy.isVector() && DstTy.getElementType() == InsertTy))) {
- LLT IntDstTy = DstTy;
- if (!DstTy.isScalar()) {
- IntDstTy = LLT::scalar(DstTy.getSizeInBits());
- Src = MIRBuilder.buildBitcast(IntDstTy, Src).getReg(0);
- }
+ if (InsertTy.isVector() ||
+ (DstTy.isVector() && DstTy.getElementType() != InsertTy))
+ return UnableToLegalize;
- Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0);
- if (Offset != 0) {
- auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset);
- ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0);
- }
+ const DataLayout &DL = MIRBuilder.getDataLayout();
+ if ((DstTy.isPointer() &&
+ DL.isNonIntegralAddressSpace(DstTy.getAddressSpace())) ||
+ (InsertTy.isPointer() &&
+ DL.isNonIntegralAddressSpace(InsertTy.getAddressSpace()))) {
+ LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n");
+ return UnableToLegalize;
+ }
- APInt MaskVal = ~APInt::getBitsSet(DstTy.getSizeInBits(), Offset,
- InsertTy.getSizeInBits());
+ LLT IntDstTy = DstTy;
- auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal);
- auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask);
- auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc);
+ if (!DstTy.isScalar()) {
+ IntDstTy = LLT::scalar(DstTy.getSizeInBits());
+ Src = MIRBuilder.buildCast(IntDstTy, Src).getReg(0);
+ }
- MIRBuilder.buildBitcast(Dst, Or);
- MI.eraseFromParent();
- return Legalized;
+ if (!InsertTy.isScalar()) {
+ const LLT IntInsertTy = LLT::scalar(InsertTy.getSizeInBits());
+ InsertSrc = MIRBuilder.buildPtrToInt(IntInsertTy, InsertSrc).getReg(0);
}
- return UnableToLegalize;
+ Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0);
+ if (Offset != 0) {
+ auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset);
+ ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0);
+ }
+
+ APInt MaskVal = APInt::getBitsSetWithWrap(
+ DstTy.getSizeInBits(), Offset + InsertTy.getSizeInBits(), Offset);
+
+ auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal);
+ auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask);
+ auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc);
+
+ MIRBuilder.buildCast(Dst, Or);
+ MI.eraseFromParent();
+ return Legalized;
}
LegalizerHelper::LegalizeResult
@@ -4397,7 +5269,7 @@ LegalizerHelper::lowerBswap(MachineInstr &MI) {
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
const LLT Ty = MRI.getType(Src);
- unsigned SizeInBytes = Ty.getSizeInBytes();
+ unsigned SizeInBytes = (Ty.getScalarSizeInBits() + 7) / 8;
unsigned BaseShiftAmt = (SizeInBytes - 1) * 8;
// Swap most and least significant byte, set remaining bytes in Res to zero.
@@ -4470,20 +5342,29 @@ LegalizerHelper::lowerBitreverse(MachineInstr &MI) {
}
LegalizerHelper::LegalizeResult
-LegalizerHelper::lowerReadRegister(MachineInstr &MI) {
- Register Dst = MI.getOperand(0).getReg();
- const LLT Ty = MRI.getType(Dst);
- const MDString *RegStr = cast<MDString>(
- cast<MDNode>(MI.getOperand(1).getMetadata())->getOperand(0));
-
+LegalizerHelper::lowerReadWriteRegister(MachineInstr &MI) {
MachineFunction &MF = MIRBuilder.getMF();
const TargetSubtargetInfo &STI = MF.getSubtarget();
const TargetLowering *TLI = STI.getTargetLowering();
- Register Reg = TLI->getRegisterByName(RegStr->getString().data(), Ty, MF);
- if (!Reg.isValid())
+
+ bool IsRead = MI.getOpcode() == TargetOpcode::G_READ_REGISTER;
+ int NameOpIdx = IsRead ? 1 : 0;
+ int ValRegIndex = IsRead ? 0 : 1;
+
+ Register ValReg = MI.getOperand(ValRegIndex).getReg();
+ const LLT Ty = MRI.getType(ValReg);
+ const MDString *RegStr = cast<MDString>(
+ cast<MDNode>(MI.getOperand(NameOpIdx).getMetadata())->getOperand(0));
+
+ Register PhysReg = TLI->getRegisterByName(RegStr->getString().data(), Ty, MF);
+ if (!PhysReg.isValid())
return UnableToLegalize;
- MIRBuilder.buildCopy(Dst, Reg);
+ if (IsRead)
+ MIRBuilder.buildCopy(ValReg, PhysReg);
+ else
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+
MI.eraseFromParent();
return Legalized;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 02f6b39e09054..4abd0c4df97a2 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -59,6 +59,9 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, LegalizeAction Action) {
case MoreElements:
OS << "MoreElements";
break;
+ case Bitcast:
+ OS << "Bitcast";
+ break;
case Lower:
OS << "Lower";
break;
@@ -173,6 +176,9 @@ static bool mutationIsSane(const LegalizeRule &Rule,
return true;
}
+ case Bitcast: {
+ return OldTy != NewTy && OldTy.getSizeInBits() == NewTy.getSizeInBits();
+ }
default:
return true;
}
@@ -500,8 +506,7 @@ LegalizerInfo::getAction(const MachineInstr &MI,
SmallVector<LegalityQuery::MemDesc, 2> MemDescrs;
for (const auto &MMO : MI.memoperands())
MemDescrs.push_back({8 * MMO->getSize() /* in bits */,
- 8 * MMO->getAlignment(),
- MMO->getOrdering()});
+ 8 * MMO->getAlign().value(), MMO->getOrdering()});
return getAction({MI.getOpcode(), Types, MemDescrs});
}
@@ -519,12 +524,6 @@ bool LegalizerInfo::isLegalOrCustom(const MachineInstr &MI,
return Action == Legal || Action == Custom;
}
-bool LegalizerInfo::legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder,
- GISelChangeObserver &Observer) const {
- return false;
-}
-
LegalizerInfo::SizeAndActionsVec
LegalizerInfo::increaseToLargerTypesAndDecreaseToLargest(
const SizeAndActionsVec &v, LegalizeAction IncreaseAction,
@@ -575,6 +574,7 @@ LegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Size) {
LegalizeAction Action = Vec[VecIdx].second;
switch (Action) {
case Legal:
+ case Bitcast:
case Lower:
case Libcall:
case Custom:
@@ -681,12 +681,6 @@ LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
IntermediateType.getScalarSizeInBits())};
}
-bool LegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
- MachineRegisterInfo &MRI,
- MachineIRBuilder &MIRBuilder) const {
- return true;
-}
-
unsigned LegalizerInfo::getExtOpcodeForWideningConstant(LLT SmallTy) const {
return SmallTy.isByteSized() ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/Localizer.cpp b/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
index 1c4a668e5f316..a07416d086141 100644
--- a/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
@@ -13,6 +13,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/Debug.h"
@@ -40,60 +41,6 @@ void Localizer::init(MachineFunction &MF) {
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(MF.getFunction());
}
-bool Localizer::shouldLocalize(const MachineInstr &MI) {
- // Assuming a spill and reload of a value has a cost of 1 instruction each,
- // this helper function computes the maximum number of uses we should consider
- // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
- // break even in terms of code size when the original MI has 2 users vs
- // choosing to potentially spill. Any more than 2 users we we have a net code
- // size increase. This doesn't take into account register pressure though.
- auto maxUses = [](unsigned RematCost) {
- // A cost of 1 means remats are basically free.
- if (RematCost == 1)
- return UINT_MAX;
- if (RematCost == 2)
- return 2U;
-
- // Remat is too expensive, only sink if there's one user.
- if (RematCost > 2)
- return 1U;
- llvm_unreachable("Unexpected remat cost");
- };
-
- // Helper to walk through uses and terminate if we've reached a limit. Saves
- // us spending time traversing uses if all we want to know is if it's >= min.
- auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) {
- unsigned NumUses = 0;
- auto UI = MRI->use_instr_nodbg_begin(Reg), UE = MRI->use_instr_nodbg_end();
- for (; UI != UE && NumUses < MaxUses; ++UI) {
- NumUses++;
- }
- // If we haven't reached the end yet then there are more than MaxUses users.
- return UI == UE;
- };
-
- switch (MI.getOpcode()) {
- default:
- return false;
- // Constants-like instructions should be close to their users.
- // We don't want long live-ranges for them.
- case TargetOpcode::G_CONSTANT:
- case TargetOpcode::G_FCONSTANT:
- case TargetOpcode::G_FRAME_INDEX:
- case TargetOpcode::G_INTTOPTR:
- return true;
- case TargetOpcode::G_GLOBAL_VALUE: {
- unsigned RematCost = TTI->getGISelRematGlobalCost();
- Register Reg = MI.getOperand(0).getReg();
- unsigned MaxUses = maxUses(RematCost);
- if (MaxUses == UINT_MAX)
- return true; // Remats are "free" so always localize.
- bool B = isUsesAtMost(Reg, MaxUses);
- return B;
- }
- }
-}
-
void Localizer::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetTransformInfoWrapperPass>();
getSelectionDAGFallbackAnalysisUsage(AU);
@@ -119,9 +66,10 @@ bool Localizer::localizeInterBlock(MachineFunction &MF,
// we only localize instructions in the entry block here. This might change if
// we start doing CSE across blocks.
auto &MBB = MF.front();
+ auto &TL = *MF.getSubtarget().getTargetLowering();
for (auto RI = MBB.rbegin(), RE = MBB.rend(); RI != RE; ++RI) {
MachineInstr &MI = *RI;
- if (!shouldLocalize(MI))
+ if (!TL.shouldLocalize(MI, TTI))
continue;
LLVM_DEBUG(dbgs() << "Should localize: " << MI);
assert(MI.getDesc().getNumDefs() == 1 &&
@@ -138,8 +86,13 @@ bool Localizer::localizeInterBlock(MachineFunction &MF,
LLVM_DEBUG(MachineInstr &MIUse = *MOUse.getParent();
dbgs() << "Checking use: " << MIUse
<< " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n');
- if (isLocalUse(MOUse, MI, InsertMBB))
+ if (isLocalUse(MOUse, MI, InsertMBB)) {
+ // Even if we're in the same block, if the block is very large we could
+ // still have many long live ranges. Try to do intra-block localization
+ // too.
+ LocalizedInstrs.insert(&MI);
continue;
+ }
LLVM_DEBUG(dbgs() << "Fixing non-local use\n");
Changed = true;
auto MBBAndReg = std::make_pair(InsertMBB, Reg);
diff --git a/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp b/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp
new file mode 100644
index 0000000000000..6d606e5550f1a
--- /dev/null
+++ b/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp
@@ -0,0 +1,113 @@
+//===----- llvm/CodeGen/GlobalISel/LostDebugLocObserver.cpp -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Tracks DebugLocs between checkpoints and verifies that they are transferred.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
+
+using namespace llvm;
+
+#define LOC_DEBUG(X) DEBUG_WITH_TYPE(DebugType.str().c_str(), X)
+
+void LostDebugLocObserver::analyzeDebugLocations() {
+ if (LostDebugLocs.empty()) {
+ LOC_DEBUG(dbgs() << ".. No debug info was present\n");
+ return;
+ }
+ if (PotentialMIsForDebugLocs.empty()) {
+ LOC_DEBUG(
+ dbgs() << ".. No instructions to carry debug info (dead code?)\n");
+ return;
+ }
+
+ LOC_DEBUG(dbgs() << ".. Searching " << PotentialMIsForDebugLocs.size()
+ << " instrs for " << LostDebugLocs.size() << " locations\n");
+ SmallPtrSet<MachineInstr *, 4> FoundIn;
+ for (MachineInstr *MI : PotentialMIsForDebugLocs) {
+ if (!MI->getDebugLoc())
+ continue;
+ // Check this first in case there's a matching line-0 location on both input
+ // and output.
+ if (MI->getDebugLoc().getLine() == 0) {
+ LOC_DEBUG(
+ dbgs() << ".. Assuming line-0 location covers remainder (if any)\n");
+ return;
+ }
+ if (LostDebugLocs.erase(MI->getDebugLoc())) {
+ LOC_DEBUG(dbgs() << ".. .. found " << MI->getDebugLoc() << " in " << *MI);
+ FoundIn.insert(MI);
+ continue;
+ }
+ }
+ if (LostDebugLocs.empty())
+ return;
+
+ NumLostDebugLocs += LostDebugLocs.size();
+ LOC_DEBUG({
+ dbgs() << ".. Lost locations:\n";
+ for (const DebugLoc &Loc : LostDebugLocs) {
+ dbgs() << ".. .. ";
+ Loc.print(dbgs());
+ dbgs() << "\n";
+ }
+ dbgs() << ".. MIs with matched locations:\n";
+ for (MachineInstr *MI : FoundIn)
+ if (PotentialMIsForDebugLocs.erase(MI))
+ dbgs() << ".. .. " << *MI;
+ dbgs() << ".. Remaining MIs with unmatched/no locations:\n";
+ for (const MachineInstr *MI : PotentialMIsForDebugLocs)
+ dbgs() << ".. .. " << *MI;
+ });
+}
+
+void LostDebugLocObserver::checkpoint(bool CheckDebugLocs) {
+ if (CheckDebugLocs)
+ analyzeDebugLocations();
+ PotentialMIsForDebugLocs.clear();
+ LostDebugLocs.clear();
+}
+
+void LostDebugLocObserver::createdInstr(MachineInstr &MI) {
+ PotentialMIsForDebugLocs.insert(&MI);
+}
+
+static bool irTranslatorNeverAddsLocations(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return false;
+ case TargetOpcode::G_CONSTANT:
+ case TargetOpcode::G_FCONSTANT:
+ case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_GLOBAL_VALUE:
+ return true;
+ }
+}
+
+void LostDebugLocObserver::erasingInstr(MachineInstr &MI) {
+ if (irTranslatorNeverAddsLocations(MI.getOpcode()))
+ return;
+
+ PotentialMIsForDebugLocs.erase(&MI);
+ if (MI.getDebugLoc())
+ LostDebugLocs.insert(MI.getDebugLoc());
+}
+
+void LostDebugLocObserver::changingInstr(MachineInstr &MI) {
+ if (irTranslatorNeverAddsLocations(MI.getOpcode()))
+ return;
+
+ PotentialMIsForDebugLocs.erase(&MI);
+ if (MI.getDebugLoc())
+ LostDebugLocs.insert(MI.getDebugLoc());
+}
+
+void LostDebugLocObserver::changedInstr(MachineInstr &MI) {
+ PotentialMIsForDebugLocs.insert(&MI);
+}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 67d9dacda61b8..10f696d6a3b30 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -33,48 +33,10 @@ void MachineIRBuilder::setMF(MachineFunction &MF) {
State.Observer = nullptr;
}
-void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) {
- State.MBB = &MBB;
- State.II = MBB.end();
- assert(&getMF() == MBB.getParent() &&
- "Basic block is in a different function");
-}
-
-void MachineIRBuilder::setInstr(MachineInstr &MI) {
- assert(MI.getParent() && "Instruction is not part of a basic block");
- setMBB(*MI.getParent());
- State.II = MI.getIterator();
-}
-
-void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
-
-void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II) {
- assert(MBB.getParent() == &getMF() &&
- "Basic block is in a different function");
- State.MBB = &MBB;
- State.II = II;
-}
-
-void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
- if (State.Observer)
- State.Observer->createdInstr(*InsertedInstr);
-}
-
-void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) {
- State.Observer = &Observer;
-}
-
-void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; }
-
//------------------------------------------------------------------------------
// Build instruction variants.
//------------------------------------------------------------------------------
-MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
- return insertInstr(buildInstrNoInsert(Opcode));
-}
-
MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
return MIB;
@@ -107,13 +69,9 @@ MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
assert(
cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
"Expected inlined-at fields to agree");
- // DBG_VALUE insts now carry IR-level indirection in their DIExpression
- // rather than encoding it in the instruction itself.
- const DIExpression *DIExpr = cast<DIExpression>(Expr);
- DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref});
return insertInstr(BuildMI(getMF(), getDL(),
getTII().get(TargetOpcode::DBG_VALUE),
- /*IsIndirect*/ false, Reg, Variable, DIExpr));
+ /*IsIndirect*/ true, Reg, Variable, Expr));
}
MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
@@ -124,15 +82,11 @@ MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
assert(
cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
"Expected inlined-at fields to agree");
- // DBG_VALUE insts now carry IR-level indirection in their DIExpression
- // rather than encoding it in the instruction itself.
- const DIExpression *DIExpr = cast<DIExpression>(Expr);
- DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref});
return buildInstr(TargetOpcode::DBG_VALUE)
.addFrameIndex(FI)
- .addReg(0)
+ .addImm(0)
.addMetadata(Variable)
- .addMetadata(DIExpr);
+ .addMetadata(Expr);
}
MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
@@ -143,7 +97,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
assert(
cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
"Expected inlined-at fields to agree");
- auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
+ auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
if (auto *CI = dyn_cast<ConstantInt>(&C)) {
if (CI->getBitWidth() > 64)
MIB.addCImm(CI);
@@ -156,7 +110,8 @@ MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
MIB.addReg(0U);
}
- return MIB.addReg(0).addMetadata(Variable).addMetadata(Expr);
+ MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
+ return insertInstr(MIB);
}
MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
@@ -170,12 +125,12 @@ MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
const SrcOp &Size,
- unsigned Align) {
+ Align Alignment) {
assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
Res.addDefToMIB(*getMRI(), MIB);
Size.addSrcToMIB(MIB);
- MIB.addImm(Align);
+ MIB.addImm(Alignment.value());
return MIB;
}
@@ -207,14 +162,14 @@ MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
.addJumpTableIndex(JTI);
}
-void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
- const LLT &Op1) {
+void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
+ const LLT Op1) {
assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
assert((Res == Op0 && Res == Op1) && "type mismatch");
}
-void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
- const LLT &Op1) {
+void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
+ const LLT Op1) {
assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
assert((Res == Op0) && "type mismatch");
}
@@ -222,16 +177,16 @@ void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
const SrcOp &Op0,
const SrcOp &Op1) {
- assert(Res.getLLTTy(*getMRI()).isPointer() &&
+ assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
- assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
+ assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
}
Optional<MachineInstrBuilder>
MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
- const LLT &ValueTy, uint64_t Value) {
+ const LLT ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
@@ -245,17 +200,14 @@ MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
return buildPtrAdd(Res, Op0, Cst.getReg(0));
}
-MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res,
- const SrcOp &Op0,
- uint32_t NumBits) {
- assert(Res.getLLTTy(*getMRI()).isPointer() &&
- Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
-
- auto MIB = buildInstr(TargetOpcode::G_PTR_MASK);
- Res.addDefToMIB(*getMRI(), MIB);
- Op0.addSrcToMIB(MIB);
- MIB.addImm(NumBits);
- return MIB;
+MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
+ const SrcOp &Op0,
+ uint32_t NumBits) {
+ LLT PtrTy = Res.getLLTTy(*getMRI());
+ LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
+ Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
+ buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
+ return buildPtrMask(Res, Op0, MaskReg);
}
MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
@@ -298,6 +250,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
}
auto Const = buildInstr(TargetOpcode::G_CONSTANT);
+ Const->setDebugLoc(DebugLoc());
Res.addDefToMIB(*getMRI(), Const);
Const.addCImm(&Val);
return Const;
@@ -331,6 +284,7 @@ MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
}
auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
+ Const->setDebugLoc(DebugLoc());
Res.addDefToMIB(*getMRI(), Const);
Const.addFPImm(&Val);
return Const;
@@ -385,6 +339,23 @@ MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
return MIB;
}
+MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
+ const DstOp &Dst, const SrcOp &BasePtr,
+ MachineMemOperand &BaseMMO, int64_t Offset) {
+ LLT LoadTy = Dst.getLLTTy(*getMRI());
+ MachineMemOperand *OffsetMMO =
+ getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes());
+
+ if (Offset == 0) // This may be a size or type changing load.
+ return buildLoad(Dst, BasePtr, *OffsetMMO);
+
+ LLT PtrTy = BasePtr.getLLTTy(*getMRI());
+ LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
+ auto ConstOffset = buildConstant(OffsetTy, Offset);
+ auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
+ return buildLoad(Dst, Ptr, *OffsetMMO);
+}
+
MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
const SrcOp &Addr,
MachineMemOperand &MMO) {
@@ -398,22 +369,6 @@ MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildUAddo(const DstOp &Res,
- const DstOp &CarryOut,
- const SrcOp &Op0,
- const SrcOp &Op1) {
- return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
-}
-
-MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res,
- const DstOp &CarryOut,
- const SrcOp &Op0,
- const SrcOp &Op1,
- const SrcOp &CarryIn) {
- return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
- {Op0, Op1, CarryIn});
-}
-
MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
const SrcOp &Op) {
return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
@@ -537,7 +492,7 @@ void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
#ifndef NDEBUG
assert(Ops.size() == Indices.size() && "incompatible args");
assert(!Ops.empty() && "invalid trivial sequence");
- assert(std::is_sorted(Indices.begin(), Indices.end()) &&
+ assert(llvm::is_sorted(Indices) &&
"sequence offsets must be in ascending order");
assert(getMRI()->getType(Res).isValid() && "invalid operand type");
@@ -587,6 +542,13 @@ MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
}
+MachineInstrBuilder
+MachineIRBuilder::buildMerge(const DstOp &Res,
+ std::initializer_list<SrcOp> Ops) {
+ assert(Ops.size() > 1);
+ return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
+}
+
MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
const SrcOp &Op) {
// Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
@@ -650,22 +612,20 @@ MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
}
-MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
- Register Op, unsigned Index) {
- assert(Index + getMRI()->getType(Op).getSizeInBits() <=
- getMRI()->getType(Res).getSizeInBits() &&
+MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
+ const SrcOp &Src,
+ const SrcOp &Op,
+ unsigned Index) {
+ assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
+ Res.getLLTTy(*getMRI()).getSizeInBits() &&
"insertion past the end of a register");
- if (getMRI()->getType(Res).getSizeInBits() ==
- getMRI()->getType(Op).getSizeInBits()) {
+ if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
+ Op.getLLTTy(*getMRI()).getSizeInBits()) {
return buildCast(Res, Op);
}
- return buildInstr(TargetOpcode::G_INSERT)
- .addDef(Res)
- .addUse(Src)
- .addUse(Op)
- .addImm(Index);
+ return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
}
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
@@ -915,7 +875,7 @@ MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
}
-void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
+void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
bool IsExtend) {
#ifndef NDEBUG
if (DstTy.isVector()) {
@@ -934,8 +894,8 @@ void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
#endif
}
-void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
- const LLT &Op0Ty, const LLT &Op1Ty) {
+void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
+ const LLT Op0Ty, const LLT Op1Ty) {
#ifndef NDEBUG
assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
"invalid operand type");
@@ -978,7 +938,11 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
case TargetOpcode::G_SMIN:
case TargetOpcode::G_SMAX:
case TargetOpcode::G_UMIN:
- case TargetOpcode::G_UMAX: {
+ case TargetOpcode::G_UMAX:
+ case TargetOpcode::G_UADDSAT:
+ case TargetOpcode::G_SADDSAT:
+ case TargetOpcode::G_USUBSAT:
+ case TargetOpcode::G_SSUBSAT: {
// All these are binary ops.
assert(DstOps.size() == 1 && "Invalid Dst");
assert(SrcOps.size() == 2 && "Invalid Srcs");
@@ -1013,6 +977,13 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
SrcOps[0].getLLTTy(*getMRI()), false);
break;
}
+ case TargetOpcode::G_BITCAST: {
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(SrcOps.size() == 1 && "Invalid Srcs");
+ assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
+ break;
+ }
case TargetOpcode::COPY:
assert(DstOps.size() == 1 && "Invalid Dst");
// If the caller wants to add a subreg source it has to be done separately
diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index 98e48f5fc1d51..356e0e437d32c 100644
--- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -693,6 +693,15 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
if (isTargetSpecificOpcode(MI.getOpcode()) && !MI.isPreISelOpcode())
continue;
+ // Ignore inline asm instructions: they should use physical
+ // registers/regclasses
+ if (MI.isInlineAsm())
+ continue;
+
+ // Ignore debug info.
+ if (MI.isDebugInstr())
+ continue;
+
if (!assignInstr(MI)) {
reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect",
"unable to map instruction", MI);
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index eeec2a5d536ad..8a7fb4fbbf2de 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -12,6 +12,7 @@
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -27,9 +28,9 @@
using namespace llvm;
-unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
+Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
const TargetInstrInfo &TII,
- const RegisterBankInfo &RBI, unsigned Reg,
+ const RegisterBankInfo &RBI, Register Reg,
const TargetRegisterClass &RegClass) {
if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
return MRI.createVirtualRegister(&RegClass);
@@ -37,17 +38,16 @@ unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
return Reg;
}
-unsigned llvm::constrainOperandRegClass(
+Register llvm::constrainOperandRegClass(
const MachineFunction &MF, const TargetRegisterInfo &TRI,
MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
const RegisterBankInfo &RBI, MachineInstr &InsertPt,
- const TargetRegisterClass &RegClass, const MachineOperand &RegMO,
- unsigned OpIdx) {
+ const TargetRegisterClass &RegClass, const MachineOperand &RegMO) {
Register Reg = RegMO.getReg();
// Assume physical registers are properly constrained.
assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
- unsigned ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
+ Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
// If we created a new virtual register because the class is not compatible
// then create a copy between the new and the old register.
if (ConstrainedReg != Reg) {
@@ -63,11 +63,20 @@ unsigned llvm::constrainOperandRegClass(
TII.get(TargetOpcode::COPY), Reg)
.addReg(ConstrainedReg);
}
+ } else {
+ if (GISelChangeObserver *Observer = MF.getObserver()) {
+ if (!RegMO.isDef()) {
+ MachineInstr *RegDef = MRI.getVRegDef(Reg);
+ Observer->changedInstr(*RegDef);
+ }
+ Observer->changingAllUsesOfReg(MRI, Reg);
+ Observer->finishedChangingAllUsesOfReg();
+ }
}
return ConstrainedReg;
}
-unsigned llvm::constrainOperandRegClass(
+Register llvm::constrainOperandRegClass(
const MachineFunction &MF, const TargetRegisterInfo &TRI,
MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
@@ -105,7 +114,7 @@ unsigned llvm::constrainOperandRegClass(
return Reg;
}
return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
- RegMO, OpIdx);
+ RegMO);
}
bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
@@ -155,6 +164,20 @@ bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
return true;
}
+bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
+ MachineRegisterInfo &MRI) {
+ // Give up if either DstReg or SrcReg is a physical register.
+ if (DstReg.isPhysical() || SrcReg.isPhysical())
+ return false;
+ // Give up if the types don't match.
+ if (MRI.getType(DstReg) != MRI.getType(SrcReg))
+ return false;
+ // Replace if either DstReg has no constraints or the register
+ // constraints match.
+ return !MRI.getRegClassOrRegBank(DstReg) ||
+ MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
+}
+
bool llvm::isTriviallyDead(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
// If we can move an instruction, we can remove it. Otherwise, it has
@@ -175,22 +198,37 @@ bool llvm::isTriviallyDead(const MachineInstr &MI,
return true;
}
-void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
- MachineOptimizationRemarkEmitter &MORE,
- MachineOptimizationRemarkMissed &R) {
- MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
-
+static void reportGISelDiagnostic(DiagnosticSeverity Severity,
+ MachineFunction &MF,
+ const TargetPassConfig &TPC,
+ MachineOptimizationRemarkEmitter &MORE,
+ MachineOptimizationRemarkMissed &R) {
+ bool IsFatal = Severity == DS_Error &&
+ TPC.isGlobalISelAbortEnabled();
// Print the function name explicitly if we don't have a debug location (which
// makes the diagnostic less useful) or if we're going to emit a raw error.
- if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
+ if (!R.getLocation().isValid() || IsFatal)
R << (" (in function: " + MF.getName() + ")").str();
- if (TPC.isGlobalISelAbortEnabled())
+ if (IsFatal)
report_fatal_error(R.getMsg());
else
MORE.emit(R);
}
+void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
+ MachineOptimizationRemarkEmitter &MORE,
+ MachineOptimizationRemarkMissed &R) {
+ reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
+}
+
+void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+ MachineOptimizationRemarkEmitter &MORE,
+ MachineOptimizationRemarkMissed &R) {
+ MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
+ reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
+}
+
void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
MachineOptimizationRemarkEmitter &MORE,
const char *PassName, StringRef Msg,
@@ -204,7 +242,7 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
reportGISelFailure(MF, TPC, MORE, R);
}
-Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
+Optional<int64_t> llvm::getConstantVRegVal(Register VReg,
const MachineRegisterInfo &MRI) {
Optional<ValueAndVReg> ValAndVReg =
getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
@@ -216,7 +254,7 @@ Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
}
Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
- unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
+ Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
bool HandleFConstant) {
SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
MachineInstr *MI;
@@ -292,28 +330,51 @@ Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
return ValueAndVReg{Val.getSExtValue(), VReg};
}
-const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg,
- const MachineRegisterInfo &MRI) {
+const llvm::ConstantFP *
+llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
MachineInstr *MI = MRI.getVRegDef(VReg);
if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
return nullptr;
return MI->getOperand(1).getFPImm();
}
-llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
- const MachineRegisterInfo &MRI) {
+namespace {
+struct DefinitionAndSourceRegister {
+ llvm::MachineInstr *MI;
+ Register Reg;
+};
+} // namespace
+
+static llvm::Optional<DefinitionAndSourceRegister>
+getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
+ Register DefSrcReg = Reg;
auto *DefMI = MRI.getVRegDef(Reg);
auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
if (!DstTy.isValid())
- return nullptr;
+ return None;
while (DefMI->getOpcode() == TargetOpcode::COPY) {
Register SrcReg = DefMI->getOperand(1).getReg();
auto SrcTy = MRI.getType(SrcReg);
if (!SrcTy.isValid() || SrcTy != DstTy)
break;
DefMI = MRI.getVRegDef(SrcReg);
+ DefSrcReg = SrcReg;
}
- return DefMI;
+ return DefinitionAndSourceRegister{DefMI, DefSrcReg};
+}
+
+llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
+ const MachineRegisterInfo &MRI) {
+ Optional<DefinitionAndSourceRegister> DefSrcReg =
+ getDefSrcRegIgnoringCopies(Reg, MRI);
+ return DefSrcReg ? DefSrcReg->MI : nullptr;
+}
+
+Register llvm::getSrcRegIgnoringCopies(Register Reg,
+ const MachineRegisterInfo &MRI) {
+ Optional<DefinitionAndSourceRegister> DefSrcReg =
+ getDefSrcRegIgnoringCopies(Reg, MRI);
+ return DefSrcReg ? DefSrcReg->Reg : Register();
}
llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
@@ -335,54 +396,59 @@ APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
return APF;
}
-Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
- const unsigned Op2,
+Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
+ const Register Op2,
const MachineRegisterInfo &MRI) {
- auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
- if (MaybeOp1Cst && MaybeOp2Cst) {
- LLT Ty = MRI.getType(Op1);
- APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
- APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
- switch (Opcode) {
- default:
+ if (!MaybeOp2Cst)
+ return None;
+
+ auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
+ if (!MaybeOp1Cst)
+ return None;
+
+ LLT Ty = MRI.getType(Op1);
+ APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
+ APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
+ switch (Opcode) {
+ default:
+ break;
+ case TargetOpcode::G_ADD:
+ return C1 + C2;
+ case TargetOpcode::G_AND:
+ return C1 & C2;
+ case TargetOpcode::G_ASHR:
+ return C1.ashr(C2);
+ case TargetOpcode::G_LSHR:
+ return C1.lshr(C2);
+ case TargetOpcode::G_MUL:
+ return C1 * C2;
+ case TargetOpcode::G_OR:
+ return C1 | C2;
+ case TargetOpcode::G_SHL:
+ return C1 << C2;
+ case TargetOpcode::G_SUB:
+ return C1 - C2;
+ case TargetOpcode::G_XOR:
+ return C1 ^ C2;
+ case TargetOpcode::G_UDIV:
+ if (!C2.getBoolValue())
break;
- case TargetOpcode::G_ADD:
- return C1 + C2;
- case TargetOpcode::G_AND:
- return C1 & C2;
- case TargetOpcode::G_ASHR:
- return C1.ashr(C2);
- case TargetOpcode::G_LSHR:
- return C1.lshr(C2);
- case TargetOpcode::G_MUL:
- return C1 * C2;
- case TargetOpcode::G_OR:
- return C1 | C2;
- case TargetOpcode::G_SHL:
- return C1 << C2;
- case TargetOpcode::G_SUB:
- return C1 - C2;
- case TargetOpcode::G_XOR:
- return C1 ^ C2;
- case TargetOpcode::G_UDIV:
- if (!C2.getBoolValue())
- break;
- return C1.udiv(C2);
- case TargetOpcode::G_SDIV:
- if (!C2.getBoolValue())
- break;
- return C1.sdiv(C2);
- case TargetOpcode::G_UREM:
- if (!C2.getBoolValue())
- break;
- return C1.urem(C2);
- case TargetOpcode::G_SREM:
- if (!C2.getBoolValue())
- break;
- return C1.srem(C2);
- }
+ return C1.udiv(C2);
+ case TargetOpcode::G_SDIV:
+ if (!C2.getBoolValue())
+ break;
+ return C1.sdiv(C2);
+ case TargetOpcode::G_UREM:
+ if (!C2.getBoolValue())
+ break;
+ return C1.urem(C2);
+ case TargetOpcode::G_SREM:
+ if (!C2.getBoolValue())
+ break;
+ return C1.srem(C2);
}
+
return None;
}
@@ -411,7 +477,19 @@ bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
return false;
}
-Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const unsigned Op1,
+Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
+ const MachinePointerInfo &MPO) {
+ auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
+ if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
+ MPO.Offset);
+ }
+
+ return Align(1);
+}
+
+Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
uint64_t Imm,
const MachineRegisterInfo &MRI) {
auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
@@ -431,3 +509,55 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const unsigned Op1,
void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
AU.addPreserved<StackProtector>();
}
+
+LLT llvm::getLCMType(LLT Ty0, LLT Ty1) {
+ if (!Ty0.isVector() && !Ty1.isVector()) {
+ unsigned Mul = Ty0.getSizeInBits() * Ty1.getSizeInBits();
+ int GCDSize = greatestCommonDivisor(Ty0.getSizeInBits(),
+ Ty1.getSizeInBits());
+ return LLT::scalar(Mul / GCDSize);
+ }
+
+ if (Ty0.isVector() && !Ty1.isVector()) {
+ assert(Ty0.getElementType() == Ty1 && "not yet handled");
+ return Ty0;
+ }
+
+ if (Ty1.isVector() && !Ty0.isVector()) {
+ assert(Ty1.getElementType() == Ty0 && "not yet handled");
+ return Ty1;
+ }
+
+ if (Ty0.isVector() && Ty1.isVector()) {
+ assert(Ty0.getElementType() == Ty1.getElementType() && "not yet handled");
+
+ int GCDElts = greatestCommonDivisor(Ty0.getNumElements(),
+ Ty1.getNumElements());
+
+ int Mul = Ty0.getNumElements() * Ty1.getNumElements();
+ return LLT::vector(Mul / GCDElts, Ty0.getElementType());
+ }
+
+ llvm_unreachable("not yet handled");
+}
+
+LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
+ if (OrigTy.isVector() && TargetTy.isVector()) {
+ assert(OrigTy.getElementType() == TargetTy.getElementType());
+ int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
+ TargetTy.getNumElements());
+ return LLT::scalarOrVector(GCD, OrigTy.getElementType());
+ }
+
+ if (OrigTy.isVector() && !TargetTy.isVector()) {
+ assert(OrigTy.getElementType() == TargetTy);
+ return TargetTy;
+ }
+
+ assert(!OrigTy.isVector() && !TargetTy.isVector() &&
+ "GCD type of vector and scalar not implemented");
+
+ int GCD = greatestCommonDivisor(OrigTy.getSizeInBits(),
+ TargetTy.getSizeInBits());
+ return LLT::scalar(GCD);
+}