diff options
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel')
22 files changed, 15172 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp new file mode 100644 index 000000000000..7d9d812d34bc --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp @@ -0,0 +1,382 @@ +//===- CSEInfo.cpp ------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// +#include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +#define DEBUG_TYPE "cseinfo" + +using namespace llvm; +char llvm::GISelCSEAnalysisWrapperPass::ID = 0; +INITIALIZE_PASS_BEGIN(GISelCSEAnalysisWrapperPass, DEBUG_TYPE, + "Analysis containing CSE Info", false, true) +INITIALIZE_PASS_END(GISelCSEAnalysisWrapperPass, DEBUG_TYPE, + "Analysis containing CSE Info", false, true) + +/// -------- UniqueMachineInstr -------------// + +void UniqueMachineInstr::Profile(FoldingSetNodeID &ID) { + GISelInstProfileBuilder(ID, MI->getMF()->getRegInfo()).addNodeID(MI); +} +/// ----------------------------------------- + +/// --------- CSEConfigFull ---------- /// +bool CSEConfigFull::shouldCSEOpc(unsigned Opc) { + switch (Opc) { + default: + break; + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SHL: + case TargetOpcode::G_SUB: + case TargetOpcode::G_XOR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_SREM: + case TargetOpcode::G_CONSTANT: + case TargetOpcode::G_FCONSTANT: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ANYEXT: + case TargetOpcode::G_UNMERGE_VALUES: + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_GEP: + return true; + } + return false; +} + +bool CSEConfigConstantOnly::shouldCSEOpc(unsigned Opc) { + return Opc == TargetOpcode::G_CONSTANT; +} + +std::unique_ptr<CSEConfigBase> +llvm::getStandardCSEConfigForOpt(CodeGenOpt::Level Level) { + std::unique_ptr<CSEConfigBase> Config; + if (Level == CodeGenOpt::None) + Config = std::make_unique<CSEConfigConstantOnly>(); + else + Config = std::make_unique<CSEConfigFull>(); + return Config; +} + +/// ----------------------------------------- + +/// -------- GISelCSEInfo -------------// +void GISelCSEInfo::setMF(MachineFunction &MF) { + this->MF = &MF; + this->MRI = &MF.getRegInfo(); +} + +GISelCSEInfo::~GISelCSEInfo() {} + +bool GISelCSEInfo::isUniqueMachineInstValid( + const UniqueMachineInstr &UMI) const { + // Should we check here and assert that the instruction has been fully + // constructed? + // FIXME: Any other checks required to be done here? Remove this method if + // none. + return true; +} + +void GISelCSEInfo::invalidateUniqueMachineInstr(UniqueMachineInstr *UMI) { + bool Removed = CSEMap.RemoveNode(UMI); + (void)Removed; + assert(Removed && "Invalidation called on invalid UMI"); + // FIXME: Should UMI be deallocated/destroyed? +} + +UniqueMachineInstr *GISelCSEInfo::getNodeIfExists(FoldingSetNodeID &ID, + MachineBasicBlock *MBB, + void *&InsertPos) { + auto *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); + if (Node) { + if (!isUniqueMachineInstValid(*Node)) { + invalidateUniqueMachineInstr(Node); + return nullptr; + } + + if (Node->MI->getParent() != MBB) + return nullptr; + } + return Node; +} + +void GISelCSEInfo::insertNode(UniqueMachineInstr *UMI, void *InsertPos) { + handleRecordedInsts(); + assert(UMI); + UniqueMachineInstr *MaybeNewNode = UMI; + if (InsertPos) + CSEMap.InsertNode(UMI, InsertPos); + else + MaybeNewNode = CSEMap.GetOrInsertNode(UMI); + if (MaybeNewNode != UMI) { + // A similar node exists in the folding set. Let's ignore this one. + return; + } + assert(InstrMapping.count(UMI->MI) == 0 && + "This instruction should not be in the map"); + InstrMapping[UMI->MI] = MaybeNewNode; +} + +UniqueMachineInstr *GISelCSEInfo::getUniqueInstrForMI(const MachineInstr *MI) { + assert(shouldCSE(MI->getOpcode()) && "Trying to CSE an unsupported Node"); + auto *Node = new (UniqueInstrAllocator) UniqueMachineInstr(MI); + return Node; +} + +void GISelCSEInfo::insertInstr(MachineInstr *MI, void *InsertPos) { + assert(MI); + // If it exists in temporary insts, remove it. + TemporaryInsts.remove(MI); + auto *Node = getUniqueInstrForMI(MI); + insertNode(Node, InsertPos); +} + +MachineInstr *GISelCSEInfo::getMachineInstrIfExists(FoldingSetNodeID &ID, + MachineBasicBlock *MBB, + void *&InsertPos) { + handleRecordedInsts(); + if (auto *Inst = getNodeIfExists(ID, MBB, InsertPos)) { + LLVM_DEBUG(dbgs() << "CSEInfo::Found Instr " << *Inst->MI;); + return const_cast<MachineInstr *>(Inst->MI); + } + return nullptr; +} + +void GISelCSEInfo::countOpcodeHit(unsigned Opc) { +#ifndef NDEBUG + if (OpcodeHitTable.count(Opc)) + OpcodeHitTable[Opc] += 1; + else + OpcodeHitTable[Opc] = 1; +#endif + // Else do nothing. +} + +void GISelCSEInfo::recordNewInstruction(MachineInstr *MI) { + if (shouldCSE(MI->getOpcode())) { + TemporaryInsts.insert(MI); + LLVM_DEBUG(dbgs() << "CSEInfo::Recording new MI " << *MI); + } +} + +void GISelCSEInfo::handleRecordedInst(MachineInstr *MI) { + assert(shouldCSE(MI->getOpcode()) && "Invalid instruction for CSE"); + auto *UMI = InstrMapping.lookup(MI); + LLVM_DEBUG(dbgs() << "CSEInfo::Handling recorded MI " << *MI); + if (UMI) { + // Invalidate this MI. + invalidateUniqueMachineInstr(UMI); + InstrMapping.erase(MI); + } + /// Now insert the new instruction. + if (UMI) { + /// We'll reuse the same UniqueMachineInstr to avoid the new + /// allocation. + *UMI = UniqueMachineInstr(MI); + insertNode(UMI, nullptr); + } else { + /// This is a new instruction. Allocate a new UniqueMachineInstr and + /// Insert. + insertInstr(MI); + } +} + +void GISelCSEInfo::handleRemoveInst(MachineInstr *MI) { + if (auto *UMI = InstrMapping.lookup(MI)) { + invalidateUniqueMachineInstr(UMI); + InstrMapping.erase(MI); + } + TemporaryInsts.remove(MI); +} + +void GISelCSEInfo::handleRecordedInsts() { + while (!TemporaryInsts.empty()) { + auto *MI = TemporaryInsts.pop_back_val(); + handleRecordedInst(MI); + } +} + +bool GISelCSEInfo::shouldCSE(unsigned Opc) const { + // Only GISel opcodes are CSEable + if (!isPreISelGenericOpcode(Opc)) + return false; + assert(CSEOpt.get() && "CSEConfig not set"); + return CSEOpt->shouldCSEOpc(Opc); +} + +void GISelCSEInfo::erasingInstr(MachineInstr &MI) { handleRemoveInst(&MI); } +void GISelCSEInfo::createdInstr(MachineInstr &MI) { recordNewInstruction(&MI); } +void GISelCSEInfo::changingInstr(MachineInstr &MI) { + // For now, perform erase, followed by insert. + erasingInstr(MI); + createdInstr(MI); +} +void GISelCSEInfo::changedInstr(MachineInstr &MI) { changingInstr(MI); } + +void GISelCSEInfo::analyze(MachineFunction &MF) { + setMF(MF); + for (auto &MBB : MF) { + if (MBB.empty()) + continue; + for (MachineInstr &MI : MBB) { + if (!shouldCSE(MI.getOpcode())) + continue; + LLVM_DEBUG(dbgs() << "CSEInfo::Add MI: " << MI); + insertInstr(&MI); + } + } +} + +void GISelCSEInfo::releaseMemory() { + print(); + CSEMap.clear(); + InstrMapping.clear(); + UniqueInstrAllocator.Reset(); + TemporaryInsts.clear(); + CSEOpt.reset(); + MRI = nullptr; + MF = nullptr; +#ifndef NDEBUG + OpcodeHitTable.clear(); +#endif +} + +void GISelCSEInfo::print() { + LLVM_DEBUG(for (auto &It + : OpcodeHitTable) { + dbgs() << "CSEInfo::CSE Hit for Opc " << It.first << " : " << It.second + << "\n"; + };); +} +/// ----------------------------------------- +// ---- Profiling methods for FoldingSetNode --- // +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeID(const MachineInstr *MI) const { + addNodeIDMBB(MI->getParent()); + addNodeIDOpcode(MI->getOpcode()); + for (auto &Op : MI->operands()) + addNodeIDMachineOperand(Op); + addNodeIDFlag(MI->getFlags()); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDOpcode(unsigned Opc) const { + ID.AddInteger(Opc); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const LLT &Ty) const { + uint64_t Val = Ty.getUniqueRAWLLTData(); + ID.AddInteger(Val); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const TargetRegisterClass *RC) const { + ID.AddPointer(RC); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const RegisterBank *RB) const { + ID.AddPointer(RB); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDImmediate(int64_t Imm) const { + ID.AddInteger(Imm); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegNum(unsigned Reg) const { + ID.AddInteger(Reg); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const unsigned Reg) const { + addNodeIDMachineOperand(MachineOperand::CreateReg(Reg, false)); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDMBB(const MachineBasicBlock *MBB) const { + ID.AddPointer(MBB); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDFlag(unsigned Flag) const { + if (Flag) + ID.AddInteger(Flag); + return *this; +} + +const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand( + const MachineOperand &MO) const { + if (MO.isReg()) { + Register Reg = MO.getReg(); + if (!MO.isDef()) + addNodeIDRegNum(Reg); + LLT Ty = MRI.getType(Reg); + if (Ty.isValid()) + addNodeIDRegType(Ty); + auto *RB = MRI.getRegBankOrNull(Reg); + if (RB) + addNodeIDRegType(RB); + auto *RC = MRI.getRegClassOrNull(Reg); + if (RC) + addNodeIDRegType(RC); + assert(!MO.isImplicit() && "Unhandled case"); + } else if (MO.isImm()) + ID.AddInteger(MO.getImm()); + else if (MO.isCImm()) + ID.AddPointer(MO.getCImm()); + else if (MO.isFPImm()) + ID.AddPointer(MO.getFPImm()); + else if (MO.isPredicate()) + ID.AddInteger(MO.getPredicate()); + else + llvm_unreachable("Unhandled operand type"); + // Handle other types + return *this; +} + +GISelCSEInfo & +GISelCSEAnalysisWrapper::get(std::unique_ptr<CSEConfigBase> CSEOpt, + bool Recompute) { + if (!AlreadyComputed || Recompute) { + Info.setCSEConfig(std::move(CSEOpt)); + Info.analyze(*MF); + AlreadyComputed = true; + } + return Info; +} +void GISelCSEAnalysisWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool GISelCSEAnalysisWrapperPass::runOnMachineFunction(MachineFunction &MF) { + releaseMemory(); + Wrapper.setMF(MF); + return false; +} diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp new file mode 100644 index 000000000000..51a74793f029 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp @@ -0,0 +1,255 @@ +//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.cpp - MIBuilder--*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the CSEMIRBuilder class which CSEs as it builds +/// instructions. +//===----------------------------------------------------------------------===// +// + +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" + +using namespace llvm; + +bool CSEMIRBuilder::dominates(MachineBasicBlock::const_iterator A, + MachineBasicBlock::const_iterator B) const { + auto MBBEnd = getMBB().end(); + if (B == MBBEnd) + return true; + assert(A->getParent() == B->getParent() && + "Iterators should be in same block"); + const MachineBasicBlock *BBA = A->getParent(); + MachineBasicBlock::const_iterator I = BBA->begin(); + for (; &*I != A && &*I != B; ++I) + ; + return &*I == A; +} + +MachineInstrBuilder +CSEMIRBuilder::getDominatingInstrForID(FoldingSetNodeID &ID, + void *&NodeInsertPos) { + GISelCSEInfo *CSEInfo = getCSEInfo(); + assert(CSEInfo && "Can't get here without setting CSEInfo"); + MachineBasicBlock *CurMBB = &getMBB(); + MachineInstr *MI = + CSEInfo->getMachineInstrIfExists(ID, CurMBB, NodeInsertPos); + if (MI) { + CSEInfo->countOpcodeHit(MI->getOpcode()); + auto CurrPos = getInsertPt(); + if (!dominates(MI, CurrPos)) + CurMBB->splice(CurrPos, CurMBB, MI); + return MachineInstrBuilder(getMF(), MI); + } + return MachineInstrBuilder(); +} + +bool CSEMIRBuilder::canPerformCSEForOpc(unsigned Opc) const { + const GISelCSEInfo *CSEInfo = getCSEInfo(); + if (!CSEInfo || !CSEInfo->shouldCSE(Opc)) + return false; + return true; +} + +void CSEMIRBuilder::profileDstOp(const DstOp &Op, + GISelInstProfileBuilder &B) const { + switch (Op.getDstOpKind()) { + case DstOp::DstType::Ty_RC: + B.addNodeIDRegType(Op.getRegClass()); + break; + default: + B.addNodeIDRegType(Op.getLLTTy(*getMRI())); + break; + } +} + +void CSEMIRBuilder::profileSrcOp(const SrcOp &Op, + GISelInstProfileBuilder &B) const { + switch (Op.getSrcOpKind()) { + case SrcOp::SrcType::Ty_Predicate: + B.addNodeIDImmediate(static_cast<int64_t>(Op.getPredicate())); + break; + default: + B.addNodeIDRegType(Op.getReg()); + break; + } +} + +void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B, + unsigned Opc) const { + // First add the MBB (Local CSE). + B.addNodeIDMBB(&getMBB()); + // Then add the opcode. + B.addNodeIDOpcode(Opc); +} + +void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + Optional<unsigned> Flags, + GISelInstProfileBuilder &B) const { + + profileMBBOpcode(B, Opc); + // Then add the DstOps. + profileDstOps(DstOps, B); + // Then add the SrcOps. + profileSrcOps(SrcOps, B); + // Add Flags if passed in. + if (Flags) + B.addNodeIDFlag(*Flags); +} + +MachineInstrBuilder CSEMIRBuilder::memoizeMI(MachineInstrBuilder MIB, + void *NodeInsertPos) { + assert(canPerformCSEForOpc(MIB->getOpcode()) && + "Attempting to CSE illegal op"); + MachineInstr *MIBInstr = MIB; + getCSEInfo()->insertInstr(MIBInstr, NodeInsertPos); + return MIB; +} + +bool CSEMIRBuilder::checkCopyToDefsPossible(ArrayRef<DstOp> DstOps) { + if (DstOps.size() == 1) + return true; // always possible to emit copy to just 1 vreg. + + return std::all_of(DstOps.begin(), DstOps.end(), [](const DstOp &Op) { + DstOp::DstType DT = Op.getDstOpKind(); + return DT == DstOp::DstType::Ty_LLT || DT == DstOp::DstType::Ty_RC; + }); +} + +MachineInstrBuilder +CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps, + MachineInstrBuilder &MIB) { + assert(checkCopyToDefsPossible(DstOps) && + "Impossible return a single MIB with copies to multiple defs"); + if (DstOps.size() == 1) { + const DstOp &Op = DstOps[0]; + if (Op.getDstOpKind() == DstOp::DstType::Ty_Reg) + return buildCopy(Op.getReg(), MIB->getOperand(0).getReg()); + } + return MIB; +} + +MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, + ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + Optional<unsigned> Flag) { + switch (Opc) { + default: + break; + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SHL: + case TargetOpcode::G_SUB: + case TargetOpcode::G_XOR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_SREM: { + // Try to constant fold these. + assert(SrcOps.size() == 2 && "Invalid sources"); + assert(DstOps.size() == 1 && "Invalid dsts"); + if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(), + SrcOps[1].getReg(), *getMRI())) + return buildConstant(DstOps[0], Cst->getSExtValue()); + break; + } + case TargetOpcode::G_SEXT_INREG: { + assert(DstOps.size() == 1 && "Invalid dst ops"); + assert(SrcOps.size() == 2 && "Invalid src ops"); + const DstOp &Dst = DstOps[0]; + const SrcOp &Src0 = SrcOps[0]; + const SrcOp &Src1 = SrcOps[1]; + if (auto MaybeCst = + ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI())) + return buildConstant(Dst, MaybeCst->getSExtValue()); + break; + } + } + bool CanCopy = checkCopyToDefsPossible(DstOps); + if (!canPerformCSEForOpc(Opc)) + return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); + // If we can CSE this instruction, but involves generating copies to multiple + // regs, give up. This frequently happens to UNMERGEs. + if (!CanCopy) { + auto MIB = MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); + // CSEInfo would have tracked this instruction. Remove it from the temporary + // insts. + getCSEInfo()->handleRemoveInst(&*MIB); + return MIB; + } + FoldingSetNodeID ID; + GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); + void *InsertPos = nullptr; + profileEverything(Opc, DstOps, SrcOps, Flag, ProfBuilder); + MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); + if (MIB) { + // Handle generating copies here. + return generateCopiesIfRequired(DstOps, MIB); + } + // This instruction does not exist in the CSEInfo. Build it and CSE it. + MachineInstrBuilder NewMIB = + MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); + return memoizeMI(NewMIB, InsertPos); +} + +MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res, + const ConstantInt &Val) { + constexpr unsigned Opc = TargetOpcode::G_CONSTANT; + if (!canPerformCSEForOpc(Opc)) + return MachineIRBuilder::buildConstant(Res, Val); + + // For vectors, CSE the element only for now. + LLT Ty = Res.getLLTTy(*getMRI()); + if (Ty.isVector()) + return buildSplatVector(Res, buildConstant(Ty.getElementType(), Val)); + + FoldingSetNodeID ID; + GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); + void *InsertPos = nullptr; + profileMBBOpcode(ProfBuilder, Opc); + profileDstOp(Res, ProfBuilder); + ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateCImm(&Val)); + MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); + if (MIB) { + // Handle generating copies here. + return generateCopiesIfRequired({Res}, MIB); + } + + MachineInstrBuilder NewMIB = MachineIRBuilder::buildConstant(Res, Val); + return memoizeMI(NewMIB, InsertPos); +} + +MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res, + const ConstantFP &Val) { + constexpr unsigned Opc = TargetOpcode::G_FCONSTANT; + if (!canPerformCSEForOpc(Opc)) + return MachineIRBuilder::buildFConstant(Res, Val); + + // For vectors, CSE the element only for now. + LLT Ty = Res.getLLTTy(*getMRI()); + if (Ty.isVector()) + return buildSplatVector(Res, buildFConstant(Ty.getElementType(), Val)); + + FoldingSetNodeID ID; + GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); + void *InsertPos = nullptr; + profileMBBOpcode(ProfBuilder, Opc); + profileDstOp(Res, ProfBuilder); + ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateFPImm(&Val)); + MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); + if (MIB) { + // Handle generating copies here. + return generateCopiesIfRequired({Res}, MIB); + } + MachineInstrBuilder NewMIB = MachineIRBuilder::buildFConstant(Res, Val); + return memoizeMI(NewMIB, InsertPos); +} diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp new file mode 100644 index 000000000000..cdad92f7db4f --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -0,0 +1,484 @@ +//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file implements some simple delegations needed for call lowering. +/// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/Analysis.h" +#include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" + +#define DEBUG_TYPE "call-lowering" + +using namespace llvm; + +void CallLowering::anchor() {} + +bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, + ArrayRef<Register> ResRegs, + ArrayRef<ArrayRef<Register>> ArgRegs, + Register SwiftErrorVReg, + std::function<unsigned()> GetCalleeReg) const { + CallLoweringInfo Info; + auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); + + // First step is to marshall all the function's parameters into the correct + // physregs and memory locations. Gather the sequence of argument types that + // we'll pass to the assigner function. + unsigned i = 0; + unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); + for (auto &Arg : CS.args()) { + ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, + i < NumFixedArgs}; + setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); + Info.OrigArgs.push_back(OrigArg); + ++i; + } + + if (const Function *F = CS.getCalledFunction()) + Info.Callee = MachineOperand::CreateGA(F, 0); + else + Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); + + Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; + if (!Info.OrigRet.Ty->isVoidTy()) + setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CS); + + Info.KnownCallees = + CS.getInstruction()->getMetadata(LLVMContext::MD_callees); + Info.CallConv = CS.getCallingConv(); + Info.SwiftErrorVReg = SwiftErrorVReg; + Info.IsMustTailCall = CS.isMustTailCall(); + Info.IsTailCall = CS.isTailCall() && + isInTailCallPosition(CS, MIRBuilder.getMF().getTarget()); + Info.IsVarArg = CS.getFunctionType()->isVarArg(); + return lowerCall(MIRBuilder, Info); +} + +template <typename FuncInfoTy> +void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, + const DataLayout &DL, + const FuncInfoTy &FuncInfo) const { + auto &Flags = Arg.Flags[0]; + const AttributeList &Attrs = FuncInfo.getAttributes(); + if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) + Flags.setZExt(); + if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) + Flags.setSExt(); + if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) + Flags.setInReg(); + if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) + Flags.setSRet(); + if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) + Flags.setSwiftSelf(); + if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) + Flags.setSwiftError(); + if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) + Flags.setByVal(); + if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) + Flags.setInAlloca(); + + if (Flags.isByVal() || Flags.isInAlloca()) { + Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); + + auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); + Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); + + // For ByVal, alignment should be passed from FE. BE will guess if + // this info is not there but there are cases it cannot get right. + unsigned FrameAlign; + if (FuncInfo.getParamAlignment(OpIdx - 2)) + FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); + else + FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); + Flags.setByValAlign(Align(FrameAlign)); + } + if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) + Flags.setNest(); + Flags.setOrigAlign(Align(DL.getABITypeAlignment(Arg.Ty))); +} + +template void +CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, + const DataLayout &DL, + const Function &FuncInfo) const; + +template void +CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, + const DataLayout &DL, + const CallInst &FuncInfo) const; + +Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, + MachineIRBuilder &MIRBuilder) const { + assert(SrcRegs.size() > 1 && "Nothing to pack"); + + const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); + MachineRegisterInfo *MRI = MIRBuilder.getMRI(); + + LLT PackedLLT = getLLTForType(*PackedTy, DL); + + SmallVector<LLT, 8> LLTs; + SmallVector<uint64_t, 8> Offsets; + computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); + assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); + + Register Dst = MRI->createGenericVirtualRegister(PackedLLT); + MIRBuilder.buildUndef(Dst); + for (unsigned i = 0; i < SrcRegs.size(); ++i) { + Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); + MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); + Dst = NewDst; + } + + return Dst; +} + +void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, + Type *PackedTy, + MachineIRBuilder &MIRBuilder) const { + assert(DstRegs.size() > 1 && "Nothing to unpack"); + + const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); + + SmallVector<LLT, 8> LLTs; + SmallVector<uint64_t, 8> Offsets; + computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); + assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); + + for (unsigned i = 0; i < DstRegs.size(); ++i) + MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); +} + +bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, + SmallVectorImpl<ArgInfo> &Args, + ValueHandler &Handler) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); + return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); +} + +bool CallLowering::handleAssignments(CCState &CCInfo, + SmallVectorImpl<CCValAssign> &ArgLocs, + MachineIRBuilder &MIRBuilder, + SmallVectorImpl<ArgInfo> &Args, + ValueHandler &Handler) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + const DataLayout &DL = F.getParent()->getDataLayout(); + + unsigned NumArgs = Args.size(); + for (unsigned i = 0; i != NumArgs; ++i) { + MVT CurVT = MVT::getVT(Args[i].Ty); + if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], + Args[i].Flags[0], CCInfo)) { + if (!CurVT.isValid()) + return false; + MVT NewVT = TLI->getRegisterTypeForCallingConv( + F.getContext(), F.getCallingConv(), EVT(CurVT)); + + // If we need to split the type over multiple regs, check it's a scenario + // we currently support. + unsigned NumParts = TLI->getNumRegistersForCallingConv( + F.getContext(), F.getCallingConv(), CurVT); + if (NumParts > 1) { + // For now only handle exact splits. + if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits()) + return false; + } + + // For incoming arguments (physregs to vregs), we could have values in + // physregs (or memlocs) which we want to extract and copy to vregs. + // During this, we might have to deal with the LLT being split across + // multiple regs, so we have to record this information for later. + // + // If we have outgoing args, then we have the opposite case. We have a + // vreg with an LLT which we want to assign to a physical location, and + // we might have to record that the value has to be split later. + if (Handler.isIncomingArgumentHandler()) { + if (NumParts == 1) { + // Try to use the register type if we couldn't assign the VT. + if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i], + Args[i].Flags[0], CCInfo)) + return false; + } else { + // We're handling an incoming arg which is split over multiple regs. + // E.g. passing an s128 on AArch64. + ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; + Args[i].OrigRegs.push_back(Args[i].Regs[0]); + Args[i].Regs.clear(); + Args[i].Flags.clear(); + LLT NewLLT = getLLTForMVT(NewVT); + // For each split register, create and assign a vreg that will store + // the incoming component of the larger value. These will later be + // merged to form the final vreg. + for (unsigned Part = 0; Part < NumParts; ++Part) { + Register Reg = + MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT); + ISD::ArgFlagsTy Flags = OrigFlags; + if (Part == 0) { + Flags.setSplit(); + } else { + Flags.setOrigAlign(Align::None()); + if (Part == NumParts - 1) + Flags.setSplitEnd(); + } + Args[i].Regs.push_back(Reg); + Args[i].Flags.push_back(Flags); + if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full, + Args[i], Args[i].Flags[Part], CCInfo)) { + // Still couldn't assign this smaller part type for some reason. + return false; + } + } + } + } else { + // Handling an outgoing arg that might need to be split. + if (NumParts < 2) + return false; // Don't know how to deal with this type combination. + + // This type is passed via multiple registers in the calling convention. + // We need to extract the individual parts. + Register LargeReg = Args[i].Regs[0]; + LLT SmallTy = LLT::scalar(NewVT.getSizeInBits()); + auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg); + assert(Unmerge->getNumOperands() == NumParts + 1); + ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; + // We're going to replace the regs and flags with the split ones. + Args[i].Regs.clear(); + Args[i].Flags.clear(); + for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) { + ISD::ArgFlagsTy Flags = OrigFlags; + if (PartIdx == 0) { + Flags.setSplit(); + } else { + Flags.setOrigAlign(Align::None()); + if (PartIdx == NumParts - 1) + Flags.setSplitEnd(); + } + Args[i].Regs.push_back(Unmerge.getReg(PartIdx)); + Args[i].Flags.push_back(Flags); + if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full, + Args[i], Args[i].Flags[PartIdx], CCInfo)) + return false; + } + } + } + } + + for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { + assert(j < ArgLocs.size() && "Skipped too many arg locs"); + + CCValAssign &VA = ArgLocs[j]; + assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); + + if (VA.needsCustom()) { + j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); + continue; + } + + // FIXME: Pack registers if we have more than one. + Register ArgReg = Args[i].Regs[0]; + + MVT OrigVT = MVT::getVT(Args[i].Ty); + MVT VAVT = VA.getValVT(); + if (VA.isRegLoc()) { + if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) { + if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) { + // Expected to be multiple regs for a single incoming arg. + unsigned NumArgRegs = Args[i].Regs.size(); + if (NumArgRegs < 2) + return false; + + assert((j + (NumArgRegs - 1)) < ArgLocs.size() && + "Too many regs for number of args"); + for (unsigned Part = 0; Part < NumArgRegs; ++Part) { + // There should be Regs.size() ArgLocs per argument. + VA = ArgLocs[j + Part]; + Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); + } + j += NumArgRegs - 1; + // Merge the split registers into the expected larger result vreg + // of the original call. + MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs); + continue; + } + const LLT VATy(VAVT); + Register NewReg = + MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); + Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); + // If it's a vector type, we either need to truncate the elements + // or do an unmerge to get the lower block of elements. + if (VATy.isVector() && + VATy.getNumElements() > OrigVT.getVectorNumElements()) { + const LLT OrigTy(OrigVT); + // Just handle the case where the VA type is 2 * original type. + if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { + LLVM_DEBUG(dbgs() + << "Incoming promoted vector arg has too many elts"); + return false; + } + auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); + MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); + } else { + MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); + } + } else if (!Handler.isIncomingArgumentHandler()) { + assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() && + "Too many regs for number of args"); + // This is an outgoing argument that might have been split. + for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) { + // There should be Regs.size() ArgLocs per argument. + VA = ArgLocs[j + Part]; + Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); + } + j += Args[i].Regs.size() - 1; + } else { + Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); + } + } else if (VA.isMemLoc()) { + // Don't currently support loading/storing a type that needs to be split + // to the stack. Should be easy, just not implemented yet. + if (Args[i].Regs.size() > 1) { + LLVM_DEBUG( + dbgs() + << "Load/store a split arg to/from the stack not implemented yet"); + return false; + } + MVT VT = MVT::getVT(Args[i].Ty); + unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() + : alignTo(VT.getSizeInBits(), 8) / 8; + unsigned Offset = VA.getLocMemOffset(); + MachinePointerInfo MPO; + Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); + Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA); + } else { + // FIXME: Support byvals and other weirdness + return false; + } + } + return true; +} + +bool CallLowering::analyzeArgInfo(CCState &CCState, + SmallVectorImpl<ArgInfo> &Args, + CCAssignFn &AssignFnFixed, + CCAssignFn &AssignFnVarArg) const { + for (unsigned i = 0, e = Args.size(); i < e; ++i) { + MVT VT = MVT::getVT(Args[i].Ty); + CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg; + if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) { + // Bail out on anything we can't handle. + LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString() + << " (arg number = " << i << "\n"); + return false; + } + } + return true; +} + +bool CallLowering::resultsCompatible(CallLoweringInfo &Info, + MachineFunction &MF, + SmallVectorImpl<ArgInfo> &InArgs, + CCAssignFn &CalleeAssignFnFixed, + CCAssignFn &CalleeAssignFnVarArg, + CCAssignFn &CallerAssignFnFixed, + CCAssignFn &CallerAssignFnVarArg) const { + const Function &F = MF.getFunction(); + CallingConv::ID CalleeCC = Info.CallConv; + CallingConv::ID CallerCC = F.getCallingConv(); + + if (CallerCC == CalleeCC) + return true; + + SmallVector<CCValAssign, 16> ArgLocs1; + CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext()); + if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed, + CalleeAssignFnVarArg)) + return false; + + SmallVector<CCValAssign, 16> ArgLocs2; + CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext()); + if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed, + CalleeAssignFnVarArg)) + return false; + + // We need the argument locations to match up exactly. If there's more in + // one than the other, then we are done. + if (ArgLocs1.size() != ArgLocs2.size()) + return false; + + // Make sure that each location is passed in exactly the same way. + for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { + const CCValAssign &Loc1 = ArgLocs1[i]; + const CCValAssign &Loc2 = ArgLocs2[i]; + + // We need both of them to be the same. So if one is a register and one + // isn't, we're done. + if (Loc1.isRegLoc() != Loc2.isRegLoc()) + return false; + + if (Loc1.isRegLoc()) { + // If they don't have the same register location, we're done. + if (Loc1.getLocReg() != Loc2.getLocReg()) + return false; + + // They matched, so we can move to the next ArgLoc. + continue; + } + + // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match. + if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) + return false; + } + + return true; +} + +Register CallLowering::ValueHandler::extendRegister(Register ValReg, + CCValAssign &VA) { + LLT LocTy{VA.getLocVT()}; + if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) + return ValReg; + switch (VA.getLocInfo()) { + default: break; + case CCValAssign::Full: + case CCValAssign::BCvt: + // FIXME: bitconverting between vector types may or may not be a + // nop in big-endian situations. + return ValReg; + case CCValAssign::AExt: { + auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); + return MIB->getOperand(0).getReg(); + } + case CCValAssign::SExt: { + Register NewReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildSExt(NewReg, ValReg); + return NewReg; + } + case CCValAssign::ZExt: { + Register NewReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildZExt(NewReg, ValReg); + return NewReg; + } + } + llvm_unreachable("unable to extend register"); +} + +void CallLowering::ValueHandler::anchor() {} diff --git a/llvm/lib/CodeGen/GlobalISel/Combiner.cpp b/llvm/lib/CodeGen/GlobalISel/Combiner.cpp new file mode 100644 index 000000000000..b4562a5c6601 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/Combiner.cpp @@ -0,0 +1,159 @@ +//===-- lib/CodeGen/GlobalISel/Combiner.cpp -------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file constains common code to combine machine functions at generic +// level. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Combiner.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/GlobalISel/CombinerInfo.h" +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelWorkList.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "gi-combiner" + +using namespace llvm; + +namespace llvm { +cl::OptionCategory GICombinerOptionCategory( + "GlobalISel Combiner", + "Control the rules which are enabled. These options all take a comma " + "separated list of rules to disable and may be specified by number " + "or number range (e.g. 1-10)." +#ifndef NDEBUG + " They may also be specified by name." +#endif +); +} // end namespace llvm + +namespace { +/// This class acts as the glue the joins the CombinerHelper to the overall +/// Combine algorithm. The CombinerHelper is intended to report the +/// modifications it makes to the MIR to the GISelChangeObserver and the +/// observer subclass will act on these events. In this case, instruction +/// erasure will cancel any future visits to the erased instruction and +/// instruction creation will schedule that instruction for a future visit. +/// Other Combiner implementations may require more complex behaviour from +/// their GISelChangeObserver subclass. +class WorkListMaintainer : public GISelChangeObserver { + using WorkListTy = GISelWorkList<512>; + WorkListTy &WorkList; + /// The instructions that have been created but we want to report once they + /// have their operands. This is only maintained if debug output is requested. + SmallPtrSet<const MachineInstr *, 4> CreatedInstrs; + +public: + WorkListMaintainer(WorkListTy &WorkList) + : GISelChangeObserver(), WorkList(WorkList) {} + virtual ~WorkListMaintainer() { + } + + void erasingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Erasing: " << MI << "\n"); + WorkList.remove(&MI); + } + void createdInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Creating: " << MI << "\n"); + WorkList.insert(&MI); + LLVM_DEBUG(CreatedInstrs.insert(&MI)); + } + void changingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Changing: " << MI << "\n"); + WorkList.insert(&MI); + } + void changedInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Changed: " << MI << "\n"); + WorkList.insert(&MI); + } + + void reportFullyCreatedInstrs() { + LLVM_DEBUG(for (const auto *MI + : CreatedInstrs) { + dbgs() << "Created: "; + MI->print(dbgs()); + }); + LLVM_DEBUG(CreatedInstrs.clear()); + } +}; +} + +Combiner::Combiner(CombinerInfo &Info, const TargetPassConfig *TPC) + : CInfo(Info), TPC(TPC) { + (void)this->TPC; // FIXME: Remove when used. +} + +bool Combiner::combineMachineInstrs(MachineFunction &MF, + GISelCSEInfo *CSEInfo) { + // If the ISel pipeline failed, do not bother running this pass. + // FIXME: Should this be here or in individual combiner passes. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + Builder = + CSEInfo ? std::make_unique<CSEMIRBuilder>() : std::make_unique<MachineIRBuilder>(); + MRI = &MF.getRegInfo(); + Builder->setMF(MF); + if (CSEInfo) + Builder->setCSEInfo(CSEInfo); + + LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n'); + + MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); + + bool MFChanged = false; + bool Changed; + MachineIRBuilder &B = *Builder.get(); + + do { + // Collect all instructions. Do a post order traversal for basic blocks and + // insert with list bottom up, so while we pop_back_val, we'll traverse top + // down RPOT. + Changed = false; + GISelWorkList<512> WorkList; + WorkListMaintainer Observer(WorkList); + GISelObserverWrapper WrapperObserver(&Observer); + if (CSEInfo) + WrapperObserver.addObserver(CSEInfo); + RAIIDelegateInstaller DelInstall(MF, &WrapperObserver); + for (MachineBasicBlock *MBB : post_order(&MF)) { + if (MBB->empty()) + continue; + for (auto MII = MBB->rbegin(), MIE = MBB->rend(); MII != MIE;) { + MachineInstr *CurMI = &*MII; + ++MII; + // Erase dead insts before even adding to the list. + if (isTriviallyDead(*CurMI, *MRI)) { + LLVM_DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n"); + CurMI->eraseFromParentAndMarkDBGValuesForRemoval(); + continue; + } + WorkList.deferred_insert(CurMI); + } + } + WorkList.finalize(); + // Main Loop. Process the instructions here. + while (!WorkList.empty()) { + MachineInstr *CurrInst = WorkList.pop_back_val(); + LLVM_DEBUG(dbgs() << "\nTry combining " << *CurrInst;); + Changed |= CInfo.combine(WrapperObserver, *CurrInst, B); + Observer.reportFullyCreatedInstrs(); + } + MFChanged |= Changed; + } while (Changed); + + return MFChanged; +} diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp new file mode 100644 index 000000000000..854769d283f7 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -0,0 +1,1306 @@ +//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" +#include "llvm/CodeGen/GlobalISel/Combiner.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/Target/TargetMachine.h" + +#define DEBUG_TYPE "gi-combiner" + +using namespace llvm; + +// Option to allow testing of the combiner while no targets know about indexed +// addressing. +static cl::opt<bool> + ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), + cl::desc("Force all indexed operations to be " + "legal for the GlobalISel combiner")); + + +CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, + MachineIRBuilder &B, GISelKnownBits *KB, + MachineDominatorTree *MDT) + : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), + KB(KB), MDT(MDT) { + (void)this->KB; +} + +void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, + Register ToReg) const { + Observer.changingAllUsesOfReg(MRI, FromReg); + + if (MRI.constrainRegAttrs(ToReg, FromReg)) + MRI.replaceRegWith(FromReg, ToReg); + else + Builder.buildCopy(ToReg, FromReg); + + Observer.finishedChangingAllUsesOfReg(); +} + +void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, + MachineOperand &FromRegOp, + Register ToReg) const { + assert(FromRegOp.getParent() && "Expected an operand in an MI"); + Observer.changingInstr(*FromRegOp.getParent()); + + FromRegOp.setReg(ToReg); + + Observer.changedInstr(*FromRegOp.getParent()); +} + +bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { + if (matchCombineCopy(MI)) { + applyCombineCopy(MI); + return true; + } + return false; +} +bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { + if (MI.getOpcode() != TargetOpcode::COPY) + return false; + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + // Simple Copy Propagation. + // a(sx) = COPY b(sx) -> Replace all uses of a with b. + if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy) + return true; + return false; +} +void CombinerHelper::applyCombineCopy(MachineInstr &MI) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + MI.eraseFromParent(); + replaceRegWith(MRI, DstReg, SrcReg); +} + +bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { + bool IsUndef = false; + SmallVector<Register, 4> Ops; + if (matchCombineConcatVectors(MI, IsUndef, Ops)) { + applyCombineConcatVectors(MI, IsUndef, Ops); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, + SmallVectorImpl<Register> &Ops) { + assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && + "Invalid instruction"); + IsUndef = true; + MachineInstr *Undef = nullptr; + + // Walk over all the operands of concat vectors and check if they are + // build_vector themselves or undef. + // Then collect their operands in Ops. + for (const MachineOperand &MO : MI.operands()) { + // Skip the instruction definition. + if (MO.isDef()) + continue; + Register Reg = MO.getReg(); + MachineInstr *Def = MRI.getVRegDef(Reg); + assert(Def && "Operand not defined"); + switch (Def->getOpcode()) { + case TargetOpcode::G_BUILD_VECTOR: + IsUndef = false; + // Remember the operands of the build_vector to fold + // them into the yet-to-build flattened concat vectors. + for (const MachineOperand &BuildVecMO : Def->operands()) { + // Skip the definition. + if (BuildVecMO.isDef()) + continue; + Ops.push_back(BuildVecMO.getReg()); + } + break; + case TargetOpcode::G_IMPLICIT_DEF: { + LLT OpType = MRI.getType(Reg); + // Keep one undef value for all the undef operands. + if (!Undef) { + Builder.setInsertPt(*MI.getParent(), MI); + Undef = Builder.buildUndef(OpType.getScalarType()); + } + assert(MRI.getType(Undef->getOperand(0).getReg()) == + OpType.getScalarType() && + "All undefs should have the same type"); + // Break the undef vector in as many scalar elements as needed + // for the flattening. + for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); + EltIdx != EltEnd; ++EltIdx) + Ops.push_back(Undef->getOperand(0).getReg()); + break; + } + default: + return false; + } + } + return true; +} +void CombinerHelper::applyCombineConcatVectors( + MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { + // We determined that the concat_vectors can be flatten. + // Generate the flattened build_vector. + Register DstReg = MI.getOperand(0).getReg(); + Builder.setInsertPt(*MI.getParent(), MI); + Register NewDstReg = MRI.cloneVirtualRegister(DstReg); + + // Note: IsUndef is sort of redundant. We could have determine it by + // checking that at all Ops are undef. Alternatively, we could have + // generate a build_vector of undefs and rely on another combine to + // clean that up. For now, given we already gather this information + // in tryCombineConcatVectors, just save compile time and issue the + // right thing. + if (IsUndef) + Builder.buildUndef(NewDstReg); + else + Builder.buildBuildVector(NewDstReg, Ops); + MI.eraseFromParent(); + replaceRegWith(MRI, DstReg, NewDstReg); +} + +bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { + SmallVector<Register, 4> Ops; + if (matchCombineShuffleVector(MI, Ops)) { + applyCombineShuffleVector(MI, Ops); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, + SmallVectorImpl<Register> &Ops) { + assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && + "Invalid instruction kind"); + LLT DstType = MRI.getType(MI.getOperand(0).getReg()); + Register Src1 = MI.getOperand(1).getReg(); + LLT SrcType = MRI.getType(Src1); + unsigned DstNumElts = DstType.getNumElements(); + unsigned SrcNumElts = SrcType.getNumElements(); + + // If the resulting vector is smaller than the size of the source + // vectors being concatenated, we won't be able to replace the + // shuffle vector into a concat_vectors. + // + // Note: We may still be able to produce a concat_vectors fed by + // extract_vector_elt and so on. It is less clear that would + // be better though, so don't bother for now. + if (DstNumElts < 2 * SrcNumElts) + return false; + + // Check that the shuffle mask can be broken evenly between the + // different sources. + if (DstNumElts % SrcNumElts != 0) + return false; + + // Mask length is a multiple of the source vector length. + // Check if the shuffle is some kind of concatenation of the input + // vectors. + unsigned NumConcat = DstNumElts / SrcNumElts; + SmallVector<int, 8> ConcatSrcs(NumConcat, -1); + SmallVector<int, 8> Mask; + ShuffleVectorInst::getShuffleMask(MI.getOperand(3).getShuffleMask(), Mask); + for (unsigned i = 0; i != DstNumElts; ++i) { + int Idx = Mask[i]; + // Undef value. + if (Idx < 0) + continue; + // Ensure the indices in each SrcType sized piece are sequential and that + // the same source is used for the whole piece. + if ((Idx % SrcNumElts != (i % SrcNumElts)) || + (ConcatSrcs[i / SrcNumElts] >= 0 && + ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) + return false; + // Remember which source this index came from. + ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; + } + + // The shuffle is concatenating multiple vectors together. + // Collect the different operands for that. + Register UndefReg; + Register Src2 = MI.getOperand(2).getReg(); + for (auto Src : ConcatSrcs) { + if (Src < 0) { + if (!UndefReg) { + Builder.setInsertPt(*MI.getParent(), MI); + UndefReg = Builder.buildUndef(SrcType).getReg(0); + } + Ops.push_back(UndefReg); + } else if (Src == 0) + Ops.push_back(Src1); + else + Ops.push_back(Src2); + } + return true; +} + +void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, + const ArrayRef<Register> Ops) { + Register DstReg = MI.getOperand(0).getReg(); + Builder.setInsertPt(*MI.getParent(), MI); + Register NewDstReg = MRI.cloneVirtualRegister(DstReg); + + Builder.buildConcatVectors(NewDstReg, Ops); + + MI.eraseFromParent(); + replaceRegWith(MRI, DstReg, NewDstReg); +} + +namespace { + +/// Select a preference between two uses. CurrentUse is the current preference +/// while *ForCandidate is attributes of the candidate under consideration. +PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse, + const LLT &TyForCandidate, + unsigned OpcodeForCandidate, + MachineInstr *MIForCandidate) { + if (!CurrentUse.Ty.isValid()) { + if (CurrentUse.ExtendOpcode == OpcodeForCandidate || + CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + return CurrentUse; + } + + // We permit the extend to hoist through basic blocks but this is only + // sensible if the target has extending loads. If you end up lowering back + // into a load and extend during the legalizer then the end result is + // hoisting the extend up to the load. + + // Prefer defined extensions to undefined extensions as these are more + // likely to reduce the number of instructions. + if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && + CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) + return CurrentUse; + else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && + OpcodeForCandidate != TargetOpcode::G_ANYEXT) + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + + // Prefer sign extensions to zero extensions as sign-extensions tend to be + // more expensive. + if (CurrentUse.Ty == TyForCandidate) { + if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && + OpcodeForCandidate == TargetOpcode::G_ZEXT) + return CurrentUse; + else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && + OpcodeForCandidate == TargetOpcode::G_SEXT) + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + } + + // This is potentially target specific. We've chosen the largest type + // because G_TRUNC is usually free. One potential catch with this is that + // some targets have a reduced number of larger registers than smaller + // registers and this choice potentially increases the live-range for the + // larger value. + if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + } + return CurrentUse; +} + +/// Find a suitable place to insert some instructions and insert them. This +/// function accounts for special cases like inserting before a PHI node. +/// The current strategy for inserting before PHI's is to duplicate the +/// instructions for each predecessor. However, while that's ok for G_TRUNC +/// on most targets since it generally requires no code, other targets/cases may +/// want to try harder to find a dominating block. +static void InsertInsnsWithoutSideEffectsBeforeUse( + MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, + std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, + MachineOperand &UseMO)> + Inserter) { + MachineInstr &UseMI = *UseMO.getParent(); + + MachineBasicBlock *InsertBB = UseMI.getParent(); + + // If the use is a PHI then we want the predecessor block instead. + if (UseMI.isPHI()) { + MachineOperand *PredBB = std::next(&UseMO); + InsertBB = PredBB->getMBB(); + } + + // If the block is the same block as the def then we want to insert just after + // the def instead of at the start of the block. + if (InsertBB == DefMI.getParent()) { + MachineBasicBlock::iterator InsertPt = &DefMI; + Inserter(InsertBB, std::next(InsertPt), UseMO); + return; + } + + // Otherwise we want the start of the BB + Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); +} +} // end anonymous namespace + +bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { + PreferredTuple Preferred; + if (matchCombineExtendingLoads(MI, Preferred)) { + applyCombineExtendingLoads(MI, Preferred); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, + PreferredTuple &Preferred) { + // We match the loads and follow the uses to the extend instead of matching + // the extends and following the def to the load. This is because the load + // must remain in the same position for correctness (unless we also add code + // to find a safe place to sink it) whereas the extend is freely movable. + // It also prevents us from duplicating the load for the volatile case or just + // for performance. + + if (MI.getOpcode() != TargetOpcode::G_LOAD && + MI.getOpcode() != TargetOpcode::G_SEXTLOAD && + MI.getOpcode() != TargetOpcode::G_ZEXTLOAD) + return false; + + auto &LoadValue = MI.getOperand(0); + assert(LoadValue.isReg() && "Result wasn't a register?"); + + LLT LoadValueTy = MRI.getType(LoadValue.getReg()); + if (!LoadValueTy.isScalar()) + return false; + + // Most architectures are going to legalize <s8 loads into at least a 1 byte + // load, and the MMOs can only describe memory accesses in multiples of bytes. + // If we try to perform extload combining on those, we can end up with + // %a(s8) = extload %ptr (load 1 byte from %ptr) + // ... which is an illegal extload instruction. + if (LoadValueTy.getSizeInBits() < 8) + return false; + + // For non power-of-2 types, they will very likely be legalized into multiple + // loads. Don't bother trying to match them into extending loads. + if (!isPowerOf2_32(LoadValueTy.getSizeInBits())) + return false; + + // Find the preferred type aside from the any-extends (unless it's the only + // one) and non-extending ops. We'll emit an extending load to that type and + // and emit a variant of (extend (trunc X)) for the others according to the + // relative type sizes. At the same time, pick an extend to use based on the + // extend involved in the chosen type. + unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD + ? TargetOpcode::G_ANYEXT + : MI.getOpcode() == TargetOpcode::G_SEXTLOAD + ? TargetOpcode::G_SEXT + : TargetOpcode::G_ZEXT; + Preferred = {LLT(), PreferredOpcode, nullptr}; + for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) { + if (UseMI.getOpcode() == TargetOpcode::G_SEXT || + UseMI.getOpcode() == TargetOpcode::G_ZEXT || + UseMI.getOpcode() == TargetOpcode::G_ANYEXT) { + Preferred = ChoosePreferredUse(Preferred, + MRI.getType(UseMI.getOperand(0).getReg()), + UseMI.getOpcode(), &UseMI); + } + } + + // There were no extends + if (!Preferred.MI) + return false; + // It should be impossible to chose an extend without selecting a different + // type since by definition the result of an extend is larger. + assert(Preferred.Ty != LoadValueTy && "Extending to same type?"); + + LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI); + return true; +} + +void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, + PreferredTuple &Preferred) { + // Rewrite the load to the chosen extending load. + Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); + + // Inserter to insert a truncate back to the original type at a given point + // with some basic CSE to limit truncate duplication to one per BB. + DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; + auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, + MachineBasicBlock::iterator InsertBefore, + MachineOperand &UseMO) { + MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); + if (PreviouslyEmitted) { + Observer.changingInstr(*UseMO.getParent()); + UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); + Observer.changedInstr(*UseMO.getParent()); + return; + } + + Builder.setInsertPt(*InsertIntoBB, InsertBefore); + Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); + MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); + EmittedInsns[InsertIntoBB] = NewMI; + replaceRegOpWith(MRI, UseMO, NewDstReg); + }; + + Observer.changingInstr(MI); + MI.setDesc( + Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT + ? TargetOpcode::G_SEXTLOAD + : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT + ? TargetOpcode::G_ZEXTLOAD + : TargetOpcode::G_LOAD)); + + // Rewrite all the uses to fix up the types. + auto &LoadValue = MI.getOperand(0); + SmallVector<MachineOperand *, 4> Uses; + for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) + Uses.push_back(&UseMO); + + for (auto *UseMO : Uses) { + MachineInstr *UseMI = UseMO->getParent(); + + // If the extend is compatible with the preferred extend then we should fix + // up the type and extend so that it uses the preferred use. + if (UseMI->getOpcode() == Preferred.ExtendOpcode || + UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { + Register UseDstReg = UseMI->getOperand(0).getReg(); + MachineOperand &UseSrcMO = UseMI->getOperand(1); + const LLT &UseDstTy = MRI.getType(UseDstReg); + if (UseDstReg != ChosenDstReg) { + if (Preferred.Ty == UseDstTy) { + // If the use has the same type as the preferred use, then merge + // the vregs and erase the extend. For example: + // %1:_(s8) = G_LOAD ... + // %2:_(s32) = G_SEXT %1(s8) + // %3:_(s32) = G_ANYEXT %1(s8) + // ... = ... %3(s32) + // rewrites to: + // %2:_(s32) = G_SEXTLOAD ... + // ... = ... %2(s32) + replaceRegWith(MRI, UseDstReg, ChosenDstReg); + Observer.erasingInstr(*UseMO->getParent()); + UseMO->getParent()->eraseFromParent(); + } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { + // If the preferred size is smaller, then keep the extend but extend + // from the result of the extending load. For example: + // %1:_(s8) = G_LOAD ... + // %2:_(s32) = G_SEXT %1(s8) + // %3:_(s64) = G_ANYEXT %1(s8) + // ... = ... %3(s64) + /// rewrites to: + // %2:_(s32) = G_SEXTLOAD ... + // %3:_(s64) = G_ANYEXT %2:_(s32) + // ... = ... %3(s64) + replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); + } else { + // If the preferred size is large, then insert a truncate. For + // example: + // %1:_(s8) = G_LOAD ... + // %2:_(s64) = G_SEXT %1(s8) + // %3:_(s32) = G_ZEXT %1(s8) + // ... = ... %3(s32) + /// rewrites to: + // %2:_(s64) = G_SEXTLOAD ... + // %4:_(s8) = G_TRUNC %2:_(s32) + // %3:_(s64) = G_ZEXT %2:_(s8) + // ... = ... %3(s64) + InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, + InsertTruncAt); + } + continue; + } + // The use is (one of) the uses of the preferred use we chose earlier. + // We're going to update the load to def this value later so just erase + // the old extend. + Observer.erasingInstr(*UseMO->getParent()); + UseMO->getParent()->eraseFromParent(); + continue; + } + + // The use isn't an extend. Truncate back to the type we originally loaded. + // This is free on many targets. + InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); + } + + MI.getOperand(0).setReg(ChosenDstReg); + Observer.changedInstr(MI); +} + +bool CombinerHelper::isPredecessor(MachineInstr &DefMI, MachineInstr &UseMI) { + assert(DefMI.getParent() == UseMI.getParent()); + if (&DefMI == &UseMI) + return false; + + // Loop through the basic block until we find one of the instructions. + MachineBasicBlock::const_iterator I = DefMI.getParent()->begin(); + for (; &*I != &DefMI && &*I != &UseMI; ++I) + return &*I == &DefMI; + + llvm_unreachable("Block must contain instructions"); +} + +bool CombinerHelper::dominates(MachineInstr &DefMI, MachineInstr &UseMI) { + if (MDT) + return MDT->dominates(&DefMI, &UseMI); + else if (DefMI.getParent() != UseMI.getParent()) + return false; + + return isPredecessor(DefMI, UseMI); +} + +bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, + Register &Base, Register &Offset) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + +#ifndef NDEBUG + unsigned Opcode = MI.getOpcode(); + assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || + Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); +#endif + + Base = MI.getOperand(1).getReg(); + MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); + if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) + return false; + + LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); + + for (auto &Use : MRI.use_instructions(Base)) { + if (Use.getOpcode() != TargetOpcode::G_GEP) + continue; + + Offset = Use.getOperand(2).getReg(); + if (!ForceLegalIndexing && + !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { + LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: " + << Use); + continue; + } + + // Make sure the offset calculation is before the potentially indexed op. + // FIXME: we really care about dependency here. The offset calculation might + // be movable. + MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); + if (!OffsetDef || !dominates(*OffsetDef, MI)) { + LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: " + << Use); + continue; + } + + // FIXME: check whether all uses of Base are load/store with foldable + // addressing modes. If so, using the normal addr-modes is better than + // forming an indexed one. + + bool MemOpDominatesAddrUses = true; + for (auto &GEPUse : MRI.use_instructions(Use.getOperand(0).getReg())) { + if (!dominates(MI, GEPUse)) { + MemOpDominatesAddrUses = false; + break; + } + } + + if (!MemOpDominatesAddrUses) { + LLVM_DEBUG( + dbgs() << " Ignoring candidate as memop does not dominate uses: " + << Use); + continue; + } + + LLVM_DEBUG(dbgs() << " Found match: " << Use); + Addr = Use.getOperand(0).getReg(); + return true; + } + + return false; +} + +bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, + Register &Base, Register &Offset) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + +#ifndef NDEBUG + unsigned Opcode = MI.getOpcode(); + assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || + Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); +#endif + + Addr = MI.getOperand(1).getReg(); + MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_GEP, Addr, MRI); + if (!AddrDef || MRI.hasOneUse(Addr)) + return false; + + Base = AddrDef->getOperand(1).getReg(); + Offset = AddrDef->getOperand(2).getReg(); + + LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI); + + if (!ForceLegalIndexing && + !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { + LLVM_DEBUG(dbgs() << " Skipping, not legal for target"); + return false; + } + + MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); + if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { + LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway."); + return false; + } + + if (MI.getOpcode() == TargetOpcode::G_STORE) { + // Would require a copy. + if (Base == MI.getOperand(0).getReg()) { + LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway."); + return false; + } + + // We're expecting one use of Addr in MI, but it could also be the + // value stored, which isn't actually dominated by the instruction. + if (MI.getOperand(0).getReg() == Addr) { + LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses"); + return false; + } + } + + // FIXME: check whether all uses of the base pointer are constant GEPs. That + // might allow us to end base's liveness here by adjusting the constant. + + for (auto &UseMI : MRI.use_instructions(Addr)) { + if (!dominates(MI, UseMI)) { + LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses."); + return false; + } + } + + return true; +} + +bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { + unsigned Opcode = MI.getOpcode(); + if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && + Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) + return false; + + bool IsStore = Opcode == TargetOpcode::G_STORE; + Register Addr, Base, Offset; + bool IsPre = findPreIndexCandidate(MI, Addr, Base, Offset); + if (!IsPre && !findPostIndexCandidate(MI, Addr, Base, Offset)) + return false; + + + unsigned NewOpcode; + switch (Opcode) { + case TargetOpcode::G_LOAD: + NewOpcode = TargetOpcode::G_INDEXED_LOAD; + break; + case TargetOpcode::G_SEXTLOAD: + NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD; + break; + case TargetOpcode::G_ZEXTLOAD: + NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD; + break; + case TargetOpcode::G_STORE: + NewOpcode = TargetOpcode::G_INDEXED_STORE; + break; + default: + llvm_unreachable("Unknown load/store opcode"); + } + + MachineInstr &AddrDef = *MRI.getUniqueVRegDef(Addr); + MachineIRBuilder MIRBuilder(MI); + auto MIB = MIRBuilder.buildInstr(NewOpcode); + if (IsStore) { + MIB.addDef(Addr); + MIB.addUse(MI.getOperand(0).getReg()); + } else { + MIB.addDef(MI.getOperand(0).getReg()); + MIB.addDef(Addr); + } + + MIB.addUse(Base); + MIB.addUse(Offset); + MIB.addImm(IsPre); + MI.eraseFromParent(); + AddrDef.eraseFromParent(); + + LLVM_DEBUG(dbgs() << " Combinined to indexed operation"); + return true; +} + +bool CombinerHelper::matchElideBrByInvertingCond(MachineInstr &MI) { + if (MI.getOpcode() != TargetOpcode::G_BR) + return false; + + // Try to match the following: + // bb1: + // %c(s32) = G_ICMP pred, %a, %b + // %c1(s1) = G_TRUNC %c(s32) + // G_BRCOND %c1, %bb2 + // G_BR %bb3 + // bb2: + // ... + // bb3: + + // The above pattern does not have a fall through to the successor bb2, always + // resulting in a branch no matter which path is taken. Here we try to find + // and replace that pattern with conditional branch to bb3 and otherwise + // fallthrough to bb2. + + MachineBasicBlock *MBB = MI.getParent(); + MachineBasicBlock::iterator BrIt(MI); + if (BrIt == MBB->begin()) + return false; + assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator"); + + MachineInstr *BrCond = &*std::prev(BrIt); + if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) + return false; + + // Check that the next block is the conditional branch target. + if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB())) + return false; + + MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg()); + if (!CmpMI || CmpMI->getOpcode() != TargetOpcode::G_ICMP || + !MRI.hasOneUse(CmpMI->getOperand(0).getReg())) + return false; + return true; +} + +bool CombinerHelper::tryElideBrByInvertingCond(MachineInstr &MI) { + if (!matchElideBrByInvertingCond(MI)) + return false; + applyElideBrByInvertingCond(MI); + return true; +} + +void CombinerHelper::applyElideBrByInvertingCond(MachineInstr &MI) { + MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); + MachineBasicBlock::iterator BrIt(MI); + MachineInstr *BrCond = &*std::prev(BrIt); + MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg()); + + CmpInst::Predicate InversePred = CmpInst::getInversePredicate( + (CmpInst::Predicate)CmpMI->getOperand(1).getPredicate()); + + // Invert the G_ICMP condition. + Observer.changingInstr(*CmpMI); + CmpMI->getOperand(1).setPredicate(InversePred); + Observer.changedInstr(*CmpMI); + + // Change the conditional branch target. + Observer.changingInstr(*BrCond); + BrCond->getOperand(1).setMBB(BrTarget); + Observer.changedInstr(*BrCond); + MI.eraseFromParent(); +} + +static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { + // On Darwin, -Os means optimize for size without hurting performance, so + // only really optimize for size when -Oz (MinSize) is used. + if (MF.getTarget().getTargetTriple().isOSDarwin()) + return MF.getFunction().hasMinSize(); + return MF.getFunction().hasOptSize(); +} + +// Returns a list of types to use for memory op lowering in MemOps. A partial +// port of findOptimalMemOpLowering in TargetLowering. +static bool findGISelOptimalMemOpLowering( + std::vector<LLT> &MemOps, unsigned Limit, uint64_t Size, unsigned DstAlign, + unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, + bool AllowOverlap, unsigned DstAS, unsigned SrcAS, + const AttributeList &FuncAttributes, const TargetLowering &TLI) { + // If 'SrcAlign' is zero, that means the memory operation does not need to + // load the value, i.e. memset or memcpy from constant string. Otherwise, + // it's the inferred alignment of the source. 'DstAlign', on the other hand, + // is the specified alignment of the memory operation. If it is zero, that + // means it's possible to change the alignment of the destination. + // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does + // not need to be loaded. + if (SrcAlign != 0 && SrcAlign < DstAlign) + return false; + + LLT Ty = TLI.getOptimalMemOpLLT(Size, DstAlign, SrcAlign, IsMemset, + ZeroMemset, MemcpyStrSrc, FuncAttributes); + + if (Ty == LLT()) { + // Use the largest scalar type whose alignment constraints are satisfied. + // We only need to check DstAlign here as SrcAlign is always greater or + // equal to DstAlign (or zero). + Ty = LLT::scalar(64); + while (DstAlign && DstAlign < Ty.getSizeInBytes() && + !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, DstAlign)) + Ty = LLT::scalar(Ty.getSizeInBytes()); + assert(Ty.getSizeInBits() > 0 && "Could not find valid type"); + // FIXME: check for the largest legal type we can load/store to. + } + + unsigned NumMemOps = 0; + while (Size != 0) { + unsigned TySize = Ty.getSizeInBytes(); + while (TySize > Size) { + // For now, only use non-vector load / store's for the left-over pieces. + LLT NewTy = Ty; + // FIXME: check for mem op safety and legality of the types. Not all of + // SDAGisms map cleanly to GISel concepts. + if (NewTy.isVector()) + NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32); + NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1)); + unsigned NewTySize = NewTy.getSizeInBytes(); + assert(NewTySize > 0 && "Could not find appropriate type"); + + // If the new LLT cannot cover all of the remaining bits, then consider + // issuing a (or a pair of) unaligned and overlapping load / store. + bool Fast; + // Need to get a VT equivalent for allowMisalignedMemoryAccesses(). + MVT VT = getMVTForLLT(Ty); + if (NumMemOps && AllowOverlap && NewTySize < Size && + TLI.allowsMisalignedMemoryAccesses( + VT, DstAS, DstAlign, MachineMemOperand::MONone, &Fast) && + Fast) + TySize = Size; + else { + Ty = NewTy; + TySize = NewTySize; + } + } + + if (++NumMemOps > Limit) + return false; + + MemOps.push_back(Ty); + Size -= TySize; + } + + return true; +} + +static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { + if (Ty.isVector()) + return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), + Ty.getNumElements()); + return IntegerType::get(C, Ty.getSizeInBits()); +} + +// Get a vectorized representation of the memset value operand, GISel edition. +static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { + MachineRegisterInfo &MRI = *MIB.getMRI(); + unsigned NumBits = Ty.getScalarSizeInBits(); + auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + if (!Ty.isVector() && ValVRegAndVal) { + unsigned KnownVal = ValVRegAndVal->Value; + APInt Scalar = APInt(8, KnownVal); + APInt SplatVal = APInt::getSplat(NumBits, Scalar); + return MIB.buildConstant(Ty, SplatVal).getReg(0); + } + // FIXME: for vector types create a G_BUILD_VECTOR. + if (Ty.isVector()) + return Register(); + + // Extend the byte value to the larger type, and then multiply by a magic + // value 0x010101... in order to replicate it across every byte. + LLT ExtType = Ty.getScalarType(); + auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val); + if (NumBits > 8) { + APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); + auto MagicMI = MIB.buildConstant(ExtType, Magic); + Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0); + } + + assert(ExtType == Ty && "Vector memset value type not supported yet"); + return Val; +} + +bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val, + unsigned KnownLen, unsigned Align, + bool IsVolatile) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + auto &DL = MF.getDataLayout(); + LLVMContext &C = MF.getFunction().getContext(); + + assert(KnownLen != 0 && "Have a zero length memset length!"); + + bool DstAlignCanChange = false; + MachineFrameInfo &MFI = MF.getFrameInfo(); + bool OptSize = shouldLowerMemFuncForSize(MF); + + MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); + if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) + DstAlignCanChange = true; + + unsigned Limit = TLI.getMaxStoresPerMemset(OptSize); + std::vector<LLT> MemOps; + + const auto &DstMMO = **MI.memoperands_begin(); + MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); + + auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0; + + if (!findGISelOptimalMemOpLowering( + MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), 0, + /*IsMemset=*/true, + /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false, + /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), ~0u, + MF.getFunction().getAttributes(), TLI)) + return false; + + if (DstAlignCanChange) { + // Get an estimate of the type from the LLT. + Type *IRTy = getTypeForLLT(MemOps[0], C); + unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); + if (NewAlign > Align) { + Align = NewAlign; + unsigned FI = FIDef->getOperand(1).getIndex(); + // Give the stack frame object a larger alignment if needed. + if (MFI.getObjectAlignment(FI) < Align) + MFI.setObjectAlignment(FI, Align); + } + } + + MachineIRBuilder MIB(MI); + // Find the largest store and generate the bit pattern for it. + LLT LargestTy = MemOps[0]; + for (unsigned i = 1; i < MemOps.size(); i++) + if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits()) + LargestTy = MemOps[i]; + + // The memset stored value is always defined as an s8, so in order to make it + // work with larger store types we need to repeat the bit pattern across the + // wider type. + Register MemSetValue = getMemsetValue(Val, LargestTy, MIB); + + if (!MemSetValue) + return false; + + // Generate the stores. For each store type in the list, we generate the + // matching store of that type to the destination address. + LLT PtrTy = MRI.getType(Dst); + unsigned DstOff = 0; + unsigned Size = KnownLen; + for (unsigned I = 0; I < MemOps.size(); I++) { + LLT Ty = MemOps[I]; + unsigned TySize = Ty.getSizeInBytes(); + if (TySize > Size) { + // Issuing an unaligned load / store pair that overlaps with the previous + // pair. Adjust the offset accordingly. + assert(I == MemOps.size() - 1 && I != 0); + DstOff -= TySize - Size; + } + + // If this store is smaller than the largest store see whether we can get + // the smaller value for free with a truncate. + Register Value = MemSetValue; + if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) { + MVT VT = getMVTForLLT(Ty); + MVT LargestVT = getMVTForLLT(LargestTy); + if (!LargestTy.isVector() && !Ty.isVector() && + TLI.isTruncateFree(LargestVT, VT)) + Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0); + else + Value = getMemsetValue(Val, Ty, MIB); + if (!Value) + return false; + } + + auto *StoreMMO = + MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes()); + + Register Ptr = Dst; + if (DstOff != 0) { + auto Offset = + MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff); + Ptr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0); + } + + MIB.buildStore(Value, Ptr, *StoreMMO); + DstOff += Ty.getSizeInBytes(); + Size -= TySize; + } + + MI.eraseFromParent(); + return true; +} + + +bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, + Register Src, unsigned KnownLen, + unsigned DstAlign, unsigned SrcAlign, + bool IsVolatile) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + auto &DL = MF.getDataLayout(); + LLVMContext &C = MF.getFunction().getContext(); + + assert(KnownLen != 0 && "Have a zero length memcpy length!"); + + bool DstAlignCanChange = false; + MachineFrameInfo &MFI = MF.getFrameInfo(); + bool OptSize = shouldLowerMemFuncForSize(MF); + unsigned Alignment = MinAlign(DstAlign, SrcAlign); + + MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); + if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) + DstAlignCanChange = true; + + // FIXME: infer better src pointer alignment like SelectionDAG does here. + // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining + // if the memcpy is in a tail call position. + + unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize); + std::vector<LLT> MemOps; + + const auto &DstMMO = **MI.memoperands_begin(); + const auto &SrcMMO = **std::next(MI.memoperands_begin()); + MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); + MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); + + if (!findGISelOptimalMemOpLowering( + MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment), + SrcAlign, + /*IsMemset=*/false, + /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, + /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), + SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI)) + return false; + + if (DstAlignCanChange) { + // Get an estimate of the type from the LLT. + Type *IRTy = getTypeForLLT(MemOps[0], C); + unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); + + // Don't promote to an alignment that would require dynamic stack + // realignment. + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (!TRI->needsStackRealignment(MF)) + while (NewAlign > Alignment && + DL.exceedsNaturalStackAlignment(Align(NewAlign))) + NewAlign /= 2; + + if (NewAlign > Alignment) { + Alignment = NewAlign; + unsigned FI = FIDef->getOperand(1).getIndex(); + // Give the stack frame object a larger alignment if needed. + if (MFI.getObjectAlignment(FI) < Alignment) + MFI.setObjectAlignment(FI, Alignment); + } + } + + LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n"); + + MachineIRBuilder MIB(MI); + // Now we need to emit a pair of load and stores for each of the types we've + // collected. I.e. for each type, generate a load from the source pointer of + // that type width, and then generate a corresponding store to the dest buffer + // of that value loaded. This can result in a sequence of loads and stores + // mixed types, depending on what the target specifies as good types to use. + unsigned CurrOffset = 0; + LLT PtrTy = MRI.getType(Src); + unsigned Size = KnownLen; + for (auto CopyTy : MemOps) { + // Issuing an unaligned load / store pair that overlaps with the previous + // pair. Adjust the offset accordingly. + if (CopyTy.getSizeInBytes() > Size) + CurrOffset -= CopyTy.getSizeInBytes() - Size; + + // Construct MMOs for the accesses. + auto *LoadMMO = + MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); + auto *StoreMMO = + MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); + + // Create the load. + Register LoadPtr = Src; + Register Offset; + if (CurrOffset != 0) { + Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset) + .getReg(0); + LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0); + } + auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO); + + // Create the store. + Register StorePtr = + CurrOffset == 0 ? Dst : MIB.buildGEP(PtrTy, Dst, Offset).getReg(0); + MIB.buildStore(LdVal, StorePtr, *StoreMMO); + CurrOffset += CopyTy.getSizeInBytes(); + Size -= CopyTy.getSizeInBytes(); + } + + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, + Register Src, unsigned KnownLen, + unsigned DstAlign, unsigned SrcAlign, + bool IsVolatile) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + auto &DL = MF.getDataLayout(); + LLVMContext &C = MF.getFunction().getContext(); + + assert(KnownLen != 0 && "Have a zero length memmove length!"); + + bool DstAlignCanChange = false; + MachineFrameInfo &MFI = MF.getFrameInfo(); + bool OptSize = shouldLowerMemFuncForSize(MF); + unsigned Alignment = MinAlign(DstAlign, SrcAlign); + + MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); + if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) + DstAlignCanChange = true; + + unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize); + std::vector<LLT> MemOps; + + const auto &DstMMO = **MI.memoperands_begin(); + const auto &SrcMMO = **std::next(MI.memoperands_begin()); + MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); + MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); + + // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due + // to a bug in it's findOptimalMemOpLowering implementation. For now do the + // same thing here. + if (!findGISelOptimalMemOpLowering( + MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment), + SrcAlign, + /*IsMemset=*/false, + /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, + /*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(), + SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI)) + return false; + + if (DstAlignCanChange) { + // Get an estimate of the type from the LLT. + Type *IRTy = getTypeForLLT(MemOps[0], C); + unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); + + // Don't promote to an alignment that would require dynamic stack + // realignment. + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (!TRI->needsStackRealignment(MF)) + while (NewAlign > Alignment && + DL.exceedsNaturalStackAlignment(Align(NewAlign))) + NewAlign /= 2; + + if (NewAlign > Alignment) { + Alignment = NewAlign; + unsigned FI = FIDef->getOperand(1).getIndex(); + // Give the stack frame object a larger alignment if needed. + if (MFI.getObjectAlignment(FI) < Alignment) + MFI.setObjectAlignment(FI, Alignment); + } + } + + LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n"); + + MachineIRBuilder MIB(MI); + // Memmove requires that we perform the loads first before issuing the stores. + // Apart from that, this loop is pretty much doing the same thing as the + // memcpy codegen function. + unsigned CurrOffset = 0; + LLT PtrTy = MRI.getType(Src); + SmallVector<Register, 16> LoadVals; + for (auto CopyTy : MemOps) { + // Construct MMO for the load. + auto *LoadMMO = + MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); + + // Create the load. + Register LoadPtr = Src; + if (CurrOffset != 0) { + auto Offset = + MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); + LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0); + } + LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0)); + CurrOffset += CopyTy.getSizeInBytes(); + } + + CurrOffset = 0; + for (unsigned I = 0; I < MemOps.size(); ++I) { + LLT CopyTy = MemOps[I]; + // Now store the values loaded. + auto *StoreMMO = + MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); + + Register StorePtr = Dst; + if (CurrOffset != 0) { + auto Offset = + MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); + StorePtr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0); + } + MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO); + CurrOffset += CopyTy.getSizeInBytes(); + } + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { + // This combine is fairly complex so it's not written with a separate + // matcher function. + assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); + Intrinsic::ID ID = (Intrinsic::ID)MI.getIntrinsicID(); + assert((ID == Intrinsic::memcpy || ID == Intrinsic::memmove || + ID == Intrinsic::memset) && + "Expected a memcpy like intrinsic"); + + auto MMOIt = MI.memoperands_begin(); + const MachineMemOperand *MemOp = *MMOIt; + bool IsVolatile = MemOp->isVolatile(); + // Don't try to optimize volatile. + if (IsVolatile) + return false; + + unsigned DstAlign = MemOp->getBaseAlignment(); + unsigned SrcAlign = 0; + Register Dst = MI.getOperand(1).getReg(); + Register Src = MI.getOperand(2).getReg(); + Register Len = MI.getOperand(3).getReg(); + + if (ID != Intrinsic::memset) { + assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); + MemOp = *(++MMOIt); + SrcAlign = MemOp->getBaseAlignment(); + } + + // See if this is a constant length copy + auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); + if (!LenVRegAndVal) + return false; // Leave it to the legalizer to lower it to a libcall. + unsigned KnownLen = LenVRegAndVal->Value; + + if (KnownLen == 0) { + MI.eraseFromParent(); + return true; + } + + if (MaxLen && KnownLen > MaxLen) + return false; + + if (ID == Intrinsic::memcpy) + return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); + if (ID == Intrinsic::memmove) + return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); + if (ID == Intrinsic::memset) + return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile); + return false; +} + +bool CombinerHelper::tryCombine(MachineInstr &MI) { + if (tryCombineCopy(MI)) + return true; + if (tryCombineExtendingLoads(MI)) + return true; + if (tryCombineIndexedLoadStore(MI)) + return true; + return false; +} diff --git a/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp b/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp new file mode 100644 index 000000000000..62b903c30b89 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp @@ -0,0 +1,40 @@ +//===-- lib/CodeGen/GlobalISel/GISelChangeObserver.cpp --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file constains common code to combine machine functions at generic +// level. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +using namespace llvm; + +void GISelChangeObserver::changingAllUsesOfReg( + const MachineRegisterInfo &MRI, unsigned Reg) { + for (auto &ChangingMI : MRI.use_instructions(Reg)) { + changingInstr(ChangingMI); + ChangingAllUsesOfReg.insert(&ChangingMI); + } +} + +void GISelChangeObserver::finishedChangingAllUsesOfReg() { + for (auto *ChangedMI : ChangingAllUsesOfReg) + changedInstr(*ChangedMI); + ChangingAllUsesOfReg.clear(); +} + +RAIIDelegateInstaller::RAIIDelegateInstaller(MachineFunction &MF, + MachineFunction::Delegate *Del) + : MF(MF), Delegate(Del) { + // Register this as the delegate for handling insertions and deletions of + // instructions. + MF.setDelegate(Del); +} + +RAIIDelegateInstaller::~RAIIDelegateInstaller() { MF.resetDelegate(Delegate); } diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp new file mode 100644 index 000000000000..be8efa8795f3 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -0,0 +1,383 @@ +//===- lib/CodeGen/GlobalISel/GISelKnownBits.cpp --------------*- C++ *-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// Provides analysis for querying information about KnownBits during GISel +/// passes. +// +//===------------------ +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetOpcodes.h" + +#define DEBUG_TYPE "gisel-known-bits" + +using namespace llvm; + +char llvm::GISelKnownBitsAnalysis::ID = 0; + +INITIALIZE_PASS_BEGIN(GISelKnownBitsAnalysis, DEBUG_TYPE, + "Analysis for ComputingKnownBits", false, true) +INITIALIZE_PASS_END(GISelKnownBitsAnalysis, DEBUG_TYPE, + "Analysis for ComputingKnownBits", false, true) + +GISelKnownBits::GISelKnownBits(MachineFunction &MF) + : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()), + DL(MF.getFunction().getParent()->getDataLayout()) {} + +Align GISelKnownBits::inferAlignmentForFrameIdx(int FrameIdx, int Offset, + const MachineFunction &MF) { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + return commonAlignment(Align(MFI.getObjectAlignment(FrameIdx)), Offset); + // TODO: How to handle cases with Base + Offset? +} + +MaybeAlign GISelKnownBits::inferPtrAlignment(const MachineInstr &MI) { + if (MI.getOpcode() == TargetOpcode::G_FRAME_INDEX) { + int FrameIdx = MI.getOperand(1).getIndex(); + return inferAlignmentForFrameIdx(FrameIdx, 0, *MI.getMF()); + } + return None; +} + +void GISelKnownBits::computeKnownBitsForFrameIndex(Register R, KnownBits &Known, + const APInt &DemandedElts, + unsigned Depth) { + const MachineInstr &MI = *MRI.getVRegDef(R); + computeKnownBitsForAlignment(Known, inferPtrAlignment(MI)); +} + +void GISelKnownBits::computeKnownBitsForAlignment(KnownBits &Known, + MaybeAlign Alignment) { + if (Alignment) + // The low bits are known zero if the pointer is aligned. + Known.Zero.setLowBits(Log2(Alignment)); +} + +KnownBits GISelKnownBits::getKnownBits(MachineInstr &MI) { + return getKnownBits(MI.getOperand(0).getReg()); +} + +KnownBits GISelKnownBits::getKnownBits(Register R) { + KnownBits Known; + LLT Ty = MRI.getType(R); + APInt DemandedElts = + Ty.isVector() ? APInt::getAllOnesValue(Ty.getNumElements()) : APInt(1, 1); + computeKnownBitsImpl(R, Known, DemandedElts); + return Known; +} + +bool GISelKnownBits::signBitIsZero(Register R) { + LLT Ty = MRI.getType(R); + unsigned BitWidth = Ty.getScalarSizeInBits(); + return maskedValueIsZero(R, APInt::getSignMask(BitWidth)); +} + +APInt GISelKnownBits::getKnownZeroes(Register R) { + return getKnownBits(R).Zero; +} + +APInt GISelKnownBits::getKnownOnes(Register R) { return getKnownBits(R).One; } + +void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, + const APInt &DemandedElts, + unsigned Depth) { + MachineInstr &MI = *MRI.getVRegDef(R); + unsigned Opcode = MI.getOpcode(); + LLT DstTy = MRI.getType(R); + + // Handle the case where this is called on a register that does not have a + // type constraint (i.e. it has a register class constraint instead). This is + // unlikely to occur except by looking through copies but it is possible for + // the initial register being queried to be in this state. + if (!DstTy.isValid()) { + Known = KnownBits(); + return; + } + + unsigned BitWidth = DstTy.getSizeInBits(); + Known = KnownBits(BitWidth); // Don't know anything + + if (DstTy.isVector()) + return; // TODO: Handle vectors. + + if (Depth == getMaxDepth()) + return; + + if (!DemandedElts) + return; // No demanded elts, better to assume we don't know anything. + + KnownBits Known2; + + switch (Opcode) { + default: + TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI, + Depth); + break; + case TargetOpcode::COPY: { + MachineOperand Dst = MI.getOperand(0); + MachineOperand Src = MI.getOperand(1); + // Look through trivial copies but don't look through trivial copies of the + // form `%1:(s32) = OP %0:gpr32` known-bits analysis is currently unable to + // determine the bit width of a register class. + // + // We can't use NoSubRegister by name as it's defined by each target but + // it's always defined to be 0 by tablegen. + if (Dst.getSubReg() == 0 /*NoSubRegister*/ && Src.getReg().isVirtual() && + Src.getSubReg() == 0 /*NoSubRegister*/ && + MRI.getType(Src.getReg()).isValid()) { + // Don't increment Depth for this one since we didn't do any work. + computeKnownBitsImpl(Src.getReg(), Known, DemandedElts, Depth); + } + break; + } + case TargetOpcode::G_CONSTANT: { + auto CstVal = getConstantVRegVal(R, MRI); + if (!CstVal) + break; + Known.One = *CstVal; + Known.Zero = ~Known.One; + break; + } + case TargetOpcode::G_FRAME_INDEX: { + computeKnownBitsForFrameIndex(R, Known, DemandedElts); + break; + } + case TargetOpcode::G_SUB: { + // If low bits are known to be zero in both operands, then we know they are + // going to be 0 in the result. Both addition and complement operations + // preserve the low zero bits. + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + unsigned KnownZeroLow = Known2.countMinTrailingZeros(); + if (KnownZeroLow == 0) + break; + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, + Depth + 1); + KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); + Known.Zero.setLowBits(KnownZeroLow); + break; + } + case TargetOpcode::G_XOR: { + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + + // Output known-0 bits are known if clear or set in both the LHS & RHS. + APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); + // Output known-1 are known to be set if set in only one of the LHS, RHS. + Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); + Known.Zero = KnownZeroOut; + break; + } + case TargetOpcode::G_GEP: { + // G_GEP is like G_ADD. FIXME: Is this true for all targets? + LLT Ty = MRI.getType(MI.getOperand(1).getReg()); + if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace())) + break; + LLVM_FALLTHROUGH; + } + case TargetOpcode::G_ADD: { + // Output known-0 bits are known if clear or set in both the low clear bits + // common to both LHS & RHS. For example, 8+(X<<3) is known to have the + // low 3 bits clear. + // Output known-0 bits are also known if the top bits of each input are + // known to be clear. For example, if one input has the top 10 bits clear + // and the other has the top 8 bits clear, we know the top 7 bits of the + // output must be clear. + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); + unsigned KnownZeroLow = Known2.countMinTrailingZeros(); + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, + Depth + 1); + KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); + KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); + Known.Zero.setLowBits(KnownZeroLow); + if (KnownZeroHigh > 1) + Known.Zero.setHighBits(KnownZeroHigh - 1); + break; + } + case TargetOpcode::G_AND: { + // If either the LHS or the RHS are Zero, the result is zero. + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + + // Output known-1 bits are only known if set in both the LHS & RHS. + Known.One &= Known2.One; + // Output known-0 are known to be clear if zero in either the LHS | RHS. + Known.Zero |= Known2.Zero; + break; + } + case TargetOpcode::G_OR: { + // If either the LHS or the RHS are Zero, the result is zero. + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + + // Output known-0 bits are only known if clear in both the LHS & RHS. + Known.Zero &= Known2.Zero; + // Output known-1 are known to be set if set in either the LHS | RHS. + Known.One |= Known2.One; + break; + } + case TargetOpcode::G_MUL: { + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + // If low bits are zero in either operand, output low known-0 bits. + // Also compute a conservative estimate for high known-0 bits. + // More trickiness is possible, but this is sufficient for the + // interesting case of alignment computation. + unsigned TrailZ = + Known.countMinTrailingZeros() + Known2.countMinTrailingZeros(); + unsigned LeadZ = + std::max(Known.countMinLeadingZeros() + Known2.countMinLeadingZeros(), + BitWidth) - + BitWidth; + + Known.resetAll(); + Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); + Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); + break; + } + case TargetOpcode::G_SELECT: { + computeKnownBitsImpl(MI.getOperand(3).getReg(), Known, DemandedElts, + Depth + 1); + // If we don't know any bits, early out. + if (Known.isUnknown()) + break; + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, + Depth + 1); + // Only known if known in both the LHS and RHS. + Known.One &= Known2.One; + Known.Zero &= Known2.Zero; + break; + } + case TargetOpcode::G_FCMP: + case TargetOpcode::G_ICMP: { + if (TL.getBooleanContents(DstTy.isVector(), + Opcode == TargetOpcode::G_FCMP) == + TargetLowering::ZeroOrOneBooleanContent && + BitWidth > 1) + Known.Zero.setBitsFrom(1); + break; + } + case TargetOpcode::G_SEXT: { + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); + // If the sign bit is known to be zero or one, then sext will extend + // it to the top bits, else it will just zext. + Known = Known.sext(BitWidth); + break; + } + case TargetOpcode::G_ANYEXT: { + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); + Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); + break; + } + case TargetOpcode::G_LOAD: { + if (MI.hasOneMemOperand()) { + const MachineMemOperand *MMO = *MI.memoperands_begin(); + if (const MDNode *Ranges = MMO->getRanges()) { + computeKnownBitsFromRangeMetadata(*Ranges, Known); + } + } + break; + } + case TargetOpcode::G_ZEXTLOAD: { + // Everything above the retrieved bits is zero + if (MI.hasOneMemOperand()) + Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits()); + break; + } + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_SHL: { + KnownBits RHSKnown; + computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts, + Depth + 1); + if (!RHSKnown.isConstant()) { + LLVM_DEBUG( + MachineInstr *RHSMI = MRI.getVRegDef(MI.getOperand(2).getReg()); + dbgs() << '[' << Depth << "] Shift not known constant: " << *RHSMI); + break; + } + uint64_t Shift = RHSKnown.getConstant().getZExtValue(); + LLVM_DEBUG(dbgs() << '[' << Depth << "] Shift is " << Shift << '\n'); + + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); + + switch (Opcode) { + case TargetOpcode::G_ASHR: + Known.Zero = Known.Zero.ashr(Shift); + Known.One = Known.One.ashr(Shift); + break; + case TargetOpcode::G_LSHR: + Known.Zero = Known.Zero.lshr(Shift); + Known.One = Known.One.lshr(Shift); + Known.Zero.setBitsFrom(Known.Zero.getBitWidth() - Shift); + break; + case TargetOpcode::G_SHL: + Known.Zero = Known.Zero.shl(Shift); + Known.One = Known.One.shl(Shift); + Known.Zero.setBits(0, Shift); + break; + } + break; + } + case TargetOpcode::G_INTTOPTR: + case TargetOpcode::G_PTRTOINT: + // Fall through and handle them the same as zext/trunc. + LLVM_FALLTHROUGH; + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_TRUNC: { + Register SrcReg = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + unsigned SrcBitWidth = SrcTy.isPointer() + ? DL.getIndexSizeInBits(SrcTy.getAddressSpace()) + : SrcTy.getSizeInBits(); + assert(SrcBitWidth && "SrcBitWidth can't be zero"); + Known = Known.zextOrTrunc(SrcBitWidth, true); + computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1); + Known = Known.zextOrTrunc(BitWidth, true); + if (BitWidth > SrcBitWidth) + Known.Zero.setBitsFrom(SrcBitWidth); + break; + } + } + + assert(!Known.hasConflict() && "Bits known to be one AND zero?"); + LLVM_DEBUG(dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" + << Depth << "] Computed for: " << MI << "[" << Depth + << "] Known: 0x" + << (Known.Zero | Known.One).toString(16, false) << "\n" + << "[" << Depth << "] Zero: 0x" + << Known.Zero.toString(16, false) << "\n" + << "[" << Depth << "] One: 0x" + << Known.One.toString(16, false) << "\n"); +} + +void GISelKnownBitsAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool GISelKnownBitsAnalysis::runOnMachineFunction(MachineFunction &MF) { + return false; +} diff --git a/llvm/lib/CodeGen/GlobalISel/GlobalISel.cpp b/llvm/lib/CodeGen/GlobalISel/GlobalISel.cpp new file mode 100644 index 000000000000..e0391e6f6467 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/GlobalISel.cpp @@ -0,0 +1,24 @@ +//===-- llvm/CodeGen/GlobalISel/GlobalIsel.cpp --- GlobalISel ----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +// This file implements the common initialization routines for the +// GlobalISel library. +//===----------------------------------------------------------------------===// + +#include "llvm/InitializePasses.h" +#include "llvm/PassRegistry.h" + +using namespace llvm; + +void llvm::initializeGlobalISel(PassRegistry &Registry) { + initializeIRTranslatorPass(Registry); + initializeLegalizerPass(Registry); + initializeLocalizerPass(Registry); + initializeRegBankSelectPass(Registry); + initializeInstructionSelectPass(Registry); +} diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp new file mode 100644 index 000000000000..45cef4aca888 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -0,0 +1,2400 @@ +//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the IRTranslator class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/IRTranslator.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopeExit.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/BranchProbabilityInfo.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/Analysis.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" +#include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/LowLevelType.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/StackProtector.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/MC/MCContext.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CodeGen.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LowLevelTypeImpl.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetIntrinsicInfo.h" +#include "llvm/Target/TargetMachine.h" +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <iterator> +#include <string> +#include <utility> +#include <vector> + +#define DEBUG_TYPE "irtranslator" + +using namespace llvm; + +static cl::opt<bool> + EnableCSEInIRTranslator("enable-cse-in-irtranslator", + cl::desc("Should enable CSE in irtranslator"), + cl::Optional, cl::init(false)); +char IRTranslator::ID = 0; + +INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) +INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", + false, false) + +static void reportTranslationError(MachineFunction &MF, + const TargetPassConfig &TPC, + OptimizationRemarkEmitter &ORE, + OptimizationRemarkMissed &R) { + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + + // Print the function name explicitly if we don't have a debug location (which + // makes the diagnostic less useful) or if we're going to emit a raw error. + if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) + R << (" (in function: " + MF.getName() + ")").str(); + + if (TPC.isGlobalISelAbortEnabled()) + report_fatal_error(R.getMsg()); + else + ORE.emit(R); +} + +IRTranslator::IRTranslator() : MachineFunctionPass(ID) { } + +#ifndef NDEBUG +namespace { +/// Verify that every instruction created has the same DILocation as the +/// instruction being translated. +class DILocationVerifier : public GISelChangeObserver { + const Instruction *CurrInst = nullptr; + +public: + DILocationVerifier() = default; + ~DILocationVerifier() = default; + + const Instruction *getCurrentInst() const { return CurrInst; } + void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } + + void erasingInstr(MachineInstr &MI) override {} + void changingInstr(MachineInstr &MI) override {} + void changedInstr(MachineInstr &MI) override {} + + void createdInstr(MachineInstr &MI) override { + assert(getCurrentInst() && "Inserted instruction without a current MI"); + + // Only print the check message if we're actually checking it. +#ifndef NDEBUG + LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst + << " was copied to " << MI); +#endif + // We allow insts in the entry block to have a debug loc line of 0 because + // they could have originated from constants, and we don't want a jumpy + // debug experience. + assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || + MI.getDebugLoc().getLine() == 0) && + "Line info was not transferred to all instructions"); + } +}; +} // namespace +#endif // ifndef NDEBUG + + +void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<StackProtector>(); + AU.addRequired<TargetPassConfig>(); + AU.addRequired<GISelCSEAnalysisWrapperPass>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +IRTranslator::ValueToVRegInfo::VRegListT & +IRTranslator::allocateVRegs(const Value &Val) { + assert(!VMap.contains(Val) && "Value already allocated in VMap"); + auto *Regs = VMap.getVRegs(Val); + auto *Offsets = VMap.getOffsets(Val); + SmallVector<LLT, 4> SplitTys; + computeValueLLTs(*DL, *Val.getType(), SplitTys, + Offsets->empty() ? Offsets : nullptr); + for (unsigned i = 0; i < SplitTys.size(); ++i) + Regs->push_back(0); + return *Regs; +} + +ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { + auto VRegsIt = VMap.findVRegs(Val); + if (VRegsIt != VMap.vregs_end()) + return *VRegsIt->second; + + if (Val.getType()->isVoidTy()) + return *VMap.getVRegs(Val); + + // Create entry for this type. + auto *VRegs = VMap.getVRegs(Val); + auto *Offsets = VMap.getOffsets(Val); + + assert(Val.getType()->isSized() && + "Don't know how to create an empty vreg"); + + SmallVector<LLT, 4> SplitTys; + computeValueLLTs(*DL, *Val.getType(), SplitTys, + Offsets->empty() ? Offsets : nullptr); + + if (!isa<Constant>(Val)) { + for (auto Ty : SplitTys) + VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); + return *VRegs; + } + + if (Val.getType()->isAggregateType()) { + // UndefValue, ConstantAggregateZero + auto &C = cast<Constant>(Val); + unsigned Idx = 0; + while (auto Elt = C.getAggregateElement(Idx++)) { + auto EltRegs = getOrCreateVRegs(*Elt); + llvm::copy(EltRegs, std::back_inserter(*VRegs)); + } + } else { + assert(SplitTys.size() == 1 && "unexpectedly split LLT"); + VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); + bool Success = translate(cast<Constant>(Val), VRegs->front()); + if (!Success) { + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + MF->getFunction().getSubprogram(), + &MF->getFunction().getEntryBlock()); + R << "unable to translate constant: " << ore::NV("Type", Val.getType()); + reportTranslationError(*MF, *TPC, *ORE, R); + return *VRegs; + } + } + + return *VRegs; +} + +int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { + if (FrameIndices.find(&AI) != FrameIndices.end()) + return FrameIndices[&AI]; + + unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); + unsigned Size = + ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); + + // Always allocate at least one byte. + Size = std::max(Size, 1u); + + unsigned Alignment = AI.getAlignment(); + if (!Alignment) + Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); + + int &FI = FrameIndices[&AI]; + FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); + return FI; +} + +unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { + unsigned Alignment = 0; + Type *ValTy = nullptr; + if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { + Alignment = SI->getAlignment(); + ValTy = SI->getValueOperand()->getType(); + } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { + Alignment = LI->getAlignment(); + ValTy = LI->getType(); + } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { + // TODO(PR27168): This instruction has no alignment attribute, but unlike + // the default alignment for load/store, the default here is to assume + // it has NATURAL alignment, not DataLayout-specified alignment. + const DataLayout &DL = AI->getModule()->getDataLayout(); + Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType()); + ValTy = AI->getCompareOperand()->getType(); + } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { + // TODO(PR27168): This instruction has no alignment attribute, but unlike + // the default alignment for load/store, the default here is to assume + // it has NATURAL alignment, not DataLayout-specified alignment. + const DataLayout &DL = AI->getModule()->getDataLayout(); + Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType()); + ValTy = AI->getType(); + } else { + OptimizationRemarkMissed R("gisel-irtranslator", "", &I); + R << "unable to translate memop: " << ore::NV("Opcode", &I); + reportTranslationError(*MF, *TPC, *ORE, R); + return 1; + } + + return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); +} + +MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { + MachineBasicBlock *&MBB = BBToMBB[&BB]; + assert(MBB && "BasicBlock was not encountered before"); + return *MBB; +} + +void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { + assert(NewPred && "new predecessor must be a real MachineBasicBlock"); + MachinePreds[Edge].push_back(NewPred); +} + +bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, + MachineIRBuilder &MIRBuilder) { + // Get or create a virtual register for each value. + // Unless the value is a Constant => loadimm cst? + // or inline constant each time? + // Creation of a virtual register needs to have a size. + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); + uint16_t Flags = 0; + if (isa<Instruction>(U)) { + const Instruction &I = cast<Instruction>(U); + Flags = MachineInstr::copyFlagsFromInstruction(I); + } + + MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); + return true; +} + +bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { + // -0.0 - X --> G_FNEG + if (isa<Constant>(U.getOperand(0)) && + U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); + uint16_t Flags = 0; + if (isa<Instruction>(U)) { + const Instruction &I = cast<Instruction>(U); + Flags = MachineInstr::copyFlagsFromInstruction(I); + } + // Negate the last operand of the FSUB + MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags); + return true; + } + return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); +} + +bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); + uint16_t Flags = 0; + if (isa<Instruction>(U)) { + const Instruction &I = cast<Instruction>(U); + Flags = MachineInstr::copyFlagsFromInstruction(I); + } + MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags); + return true; +} + +bool IRTranslator::translateCompare(const User &U, + MachineIRBuilder &MIRBuilder) { + auto *CI = dyn_cast<CmpInst>(&U); + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); + CmpInst::Predicate Pred = + CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( + cast<ConstantExpr>(U).getPredicate()); + if (CmpInst::isIntPredicate(Pred)) + MIRBuilder.buildICmp(Pred, Res, Op0, Op1); + else if (Pred == CmpInst::FCMP_FALSE) + MIRBuilder.buildCopy( + Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); + else if (Pred == CmpInst::FCMP_TRUE) + MIRBuilder.buildCopy( + Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); + else { + assert(CI && "Instruction should be CmpInst"); + MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1}, + MachineInstr::copyFlagsFromInstruction(*CI)); + } + + return true; +} + +bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { + const ReturnInst &RI = cast<ReturnInst>(U); + const Value *Ret = RI.getReturnValue(); + if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) + Ret = nullptr; + + ArrayRef<Register> VRegs; + if (Ret) + VRegs = getOrCreateVRegs(*Ret); + + Register SwiftErrorVReg = 0; + if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { + SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( + &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); + } + + // The target may mess up with the insertion point, but + // this is not important as a return is the last instruction + // of the block anyway. + return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg); +} + +bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { + const BranchInst &BrInst = cast<BranchInst>(U); + unsigned Succ = 0; + if (!BrInst.isUnconditional()) { + // We want a G_BRCOND to the true BB followed by an unconditional branch. + Register Tst = getOrCreateVReg(*BrInst.getCondition()); + const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); + MachineBasicBlock &TrueBB = getMBB(TrueTgt); + MIRBuilder.buildBrCond(Tst, TrueBB); + } + + const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); + MachineBasicBlock &TgtBB = getMBB(BrTgt); + MachineBasicBlock &CurBB = MIRBuilder.getMBB(); + + // If the unconditional target is the layout successor, fallthrough. + if (!CurBB.isLayoutSuccessor(&TgtBB)) + MIRBuilder.buildBr(TgtBB); + + // Link successors. + for (const BasicBlock *Succ : successors(&BrInst)) + CurBB.addSuccessor(&getMBB(*Succ)); + return true; +} + +void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, + MachineBasicBlock *Dst, + BranchProbability Prob) { + if (!FuncInfo.BPI) { + Src->addSuccessorWithoutProb(Dst); + return; + } + if (Prob.isUnknown()) + Prob = getEdgeProbability(Src, Dst); + Src->addSuccessor(Dst, Prob); +} + +BranchProbability +IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, + const MachineBasicBlock *Dst) const { + const BasicBlock *SrcBB = Src->getBasicBlock(); + const BasicBlock *DstBB = Dst->getBasicBlock(); + if (!FuncInfo.BPI) { + // If BPI is not available, set the default probability as 1 / N, where N is + // the number of successors. + auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); + return BranchProbability(1, SuccSize); + } + return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); +} + +bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { + using namespace SwitchCG; + // Extract cases from the switch. + const SwitchInst &SI = cast<SwitchInst>(U); + BranchProbabilityInfo *BPI = FuncInfo.BPI; + CaseClusterVector Clusters; + Clusters.reserve(SI.getNumCases()); + for (auto &I : SI.cases()) { + MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); + assert(Succ && "Could not find successor mbb in mapping"); + const ConstantInt *CaseVal = I.getCaseValue(); + BranchProbability Prob = + BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) + : BranchProbability(1, SI.getNumCases() + 1); + Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); + } + + MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); + + // Cluster adjacent cases with the same destination. We do this at all + // optimization levels because it's cheap to do and will make codegen faster + // if there are many clusters. + sortAndRangeify(Clusters); + + MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); + + // If there is only the default destination, jump there directly. + if (Clusters.empty()) { + SwitchMBB->addSuccessor(DefaultMBB); + if (DefaultMBB != SwitchMBB->getNextNode()) + MIB.buildBr(*DefaultMBB); + return true; + } + + SL->findJumpTables(Clusters, &SI, DefaultMBB); + + LLVM_DEBUG({ + dbgs() << "Case clusters: "; + for (const CaseCluster &C : Clusters) { + if (C.Kind == CC_JumpTable) + dbgs() << "JT:"; + if (C.Kind == CC_BitTests) + dbgs() << "BT:"; + + C.Low->getValue().print(dbgs(), true); + if (C.Low != C.High) { + dbgs() << '-'; + C.High->getValue().print(dbgs(), true); + } + dbgs() << ' '; + } + dbgs() << '\n'; + }); + + assert(!Clusters.empty()); + SwitchWorkList WorkList; + CaseClusterIt First = Clusters.begin(); + CaseClusterIt Last = Clusters.end() - 1; + auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); + WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); + + // FIXME: At the moment we don't do any splitting optimizations here like + // SelectionDAG does, so this worklist only has one entry. + while (!WorkList.empty()) { + SwitchWorkListItem W = WorkList.back(); + WorkList.pop_back(); + if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) + return false; + } + return true; +} + +void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, + MachineBasicBlock *MBB) { + // Emit the code for the jump table + assert(JT.Reg != -1U && "Should lower JT Header first!"); + MachineIRBuilder MIB(*MBB->getParent()); + MIB.setMBB(*MBB); + MIB.setDebugLoc(CurBuilder->getDebugLoc()); + + Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); + const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + + auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); + MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); +} + +bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, + SwitchCG::JumpTableHeader &JTH, + MachineBasicBlock *HeaderBB) { + MachineIRBuilder MIB(*HeaderBB->getParent()); + MIB.setMBB(*HeaderBB); + MIB.setDebugLoc(CurBuilder->getDebugLoc()); + + const Value &SValue = *JTH.SValue; + // Subtract the lowest switch case value from the value being switched on. + const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); + Register SwitchOpReg = getOrCreateVReg(SValue); + auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); + auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); + + // This value may be smaller or larger than the target's pointer type, and + // therefore require extension or truncating. + Type *PtrIRTy = SValue.getType()->getPointerTo(); + const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); + Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); + + JT.Reg = Sub.getReg(0); + + if (JTH.OmitRangeCheck) { + if (JT.MBB != HeaderBB->getNextNode()) + MIB.buildBr(*JT.MBB); + return true; + } + + // Emit the range check for the jump table, and branch to the default block + // for the switch statement if the value being switched on exceeds the + // largest case in the switch. + auto Cst = getOrCreateVReg( + *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); + Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); + auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); + + auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); + + // Avoid emitting unnecessary branches to the next block. + if (JT.MBB != HeaderBB->getNextNode()) + BrCond = MIB.buildBr(*JT.MBB); + return true; +} + +void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, + MachineBasicBlock *SwitchBB, + MachineIRBuilder &MIB) { + Register CondLHS = getOrCreateVReg(*CB.CmpLHS); + Register Cond; + DebugLoc OldDbgLoc = MIB.getDebugLoc(); + MIB.setDebugLoc(CB.DbgLoc); + MIB.setMBB(*CB.ThisBB); + + if (CB.PredInfo.NoCmp) { + // Branch or fall through to TrueBB. + addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); + addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, + CB.ThisBB); + CB.ThisBB->normalizeSuccProbs(); + if (CB.TrueBB != CB.ThisBB->getNextNode()) + MIB.buildBr(*CB.TrueBB); + MIB.setDebugLoc(OldDbgLoc); + return; + } + + const LLT i1Ty = LLT::scalar(1); + // Build the compare. + if (!CB.CmpMHS) { + Register CondRHS = getOrCreateVReg(*CB.CmpRHS); + Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); + } else { + assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && + "Can only handle SLE ranges"); + + const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); + const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); + + Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); + if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { + Register CondRHS = getOrCreateVReg(*CB.CmpRHS); + Cond = + MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); + } else { + const LLT &CmpTy = MRI->getType(CmpOpReg); + auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); + auto Diff = MIB.buildConstant(CmpTy, High - Low); + Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); + } + } + + // Update successor info + addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); + + addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, + CB.ThisBB); + + // TrueBB and FalseBB are always different unless the incoming IR is + // degenerate. This only happens when running llc on weird IR. + if (CB.TrueBB != CB.FalseBB) + addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); + CB.ThisBB->normalizeSuccProbs(); + + // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock()) + addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, + CB.ThisBB); + + // If the lhs block is the next block, invert the condition so that we can + // fall through to the lhs instead of the rhs block. + if (CB.TrueBB == CB.ThisBB->getNextNode()) { + std::swap(CB.TrueBB, CB.FalseBB); + auto True = MIB.buildConstant(i1Ty, 1); + Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None) + .getReg(0); + } + + MIB.buildBrCond(Cond, *CB.TrueBB); + MIB.buildBr(*CB.FalseBB); + MIB.setDebugLoc(OldDbgLoc); +} + +bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, + MachineBasicBlock *SwitchMBB, + MachineBasicBlock *CurMBB, + MachineBasicBlock *DefaultMBB, + MachineIRBuilder &MIB, + MachineFunction::iterator BBI, + BranchProbability UnhandledProbs, + SwitchCG::CaseClusterIt I, + MachineBasicBlock *Fallthrough, + bool FallthroughUnreachable) { + using namespace SwitchCG; + MachineFunction *CurMF = SwitchMBB->getParent(); + // FIXME: Optimize away range check based on pivot comparisons. + JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; + SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; + BranchProbability DefaultProb = W.DefaultProb; + + // The jump block hasn't been inserted yet; insert it here. + MachineBasicBlock *JumpMBB = JT->MBB; + CurMF->insert(BBI, JumpMBB); + + // Since the jump table block is separate from the switch block, we need + // to keep track of it as a machine predecessor to the default block, + // otherwise we lose the phi edges. + addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, + CurMBB); + addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, + JumpMBB); + + auto JumpProb = I->Prob; + auto FallthroughProb = UnhandledProbs; + + // If the default statement is a target of the jump table, we evenly + // distribute the default probability to successors of CurMBB. Also + // update the probability on the edge from JumpMBB to Fallthrough. + for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), + SE = JumpMBB->succ_end(); + SI != SE; ++SI) { + if (*SI == DefaultMBB) { + JumpProb += DefaultProb / 2; + FallthroughProb -= DefaultProb / 2; + JumpMBB->setSuccProbability(SI, DefaultProb / 2); + JumpMBB->normalizeSuccProbs(); + } else { + // Also record edges from the jump table block to it's successors. + addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, + JumpMBB); + } + } + + // Skip the range check if the fallthrough block is unreachable. + if (FallthroughUnreachable) + JTH->OmitRangeCheck = true; + + if (!JTH->OmitRangeCheck) + addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); + addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); + CurMBB->normalizeSuccProbs(); + + // The jump table header will be inserted in our current block, do the + // range check, and fall through to our fallthrough block. + JTH->HeaderBB = CurMBB; + JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. + + // If we're in the right place, emit the jump table header right now. + if (CurMBB == SwitchMBB) { + if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) + return false; + JTH->Emitted = true; + } + return true; +} +bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, + Value *Cond, + MachineBasicBlock *Fallthrough, + bool FallthroughUnreachable, + BranchProbability UnhandledProbs, + MachineBasicBlock *CurMBB, + MachineIRBuilder &MIB, + MachineBasicBlock *SwitchMBB) { + using namespace SwitchCG; + const Value *RHS, *LHS, *MHS; + CmpInst::Predicate Pred; + if (I->Low == I->High) { + // Check Cond == I->Low. + Pred = CmpInst::ICMP_EQ; + LHS = Cond; + RHS = I->Low; + MHS = nullptr; + } else { + // Check I->Low <= Cond <= I->High. + Pred = CmpInst::ICMP_SLE; + LHS = I->Low; + MHS = Cond; + RHS = I->High; + } + + // If Fallthrough is unreachable, fold away the comparison. + // The false probability is the sum of all unhandled cases. + CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, + CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); + + emitSwitchCase(CB, SwitchMBB, MIB); + return true; +} + +bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, + Value *Cond, + MachineBasicBlock *SwitchMBB, + MachineBasicBlock *DefaultMBB, + MachineIRBuilder &MIB) { + using namespace SwitchCG; + MachineFunction *CurMF = FuncInfo.MF; + MachineBasicBlock *NextMBB = nullptr; + MachineFunction::iterator BBI(W.MBB); + if (++BBI != FuncInfo.MF->end()) + NextMBB = &*BBI; + + if (EnableOpts) { + // Here, we order cases by probability so the most likely case will be + // checked first. However, two clusters can have the same probability in + // which case their relative ordering is non-deterministic. So we use Low + // as a tie-breaker as clusters are guaranteed to never overlap. + llvm::sort(W.FirstCluster, W.LastCluster + 1, + [](const CaseCluster &a, const CaseCluster &b) { + return a.Prob != b.Prob + ? a.Prob > b.Prob + : a.Low->getValue().slt(b.Low->getValue()); + }); + + // Rearrange the case blocks so that the last one falls through if possible + // without changing the order of probabilities. + for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { + --I; + if (I->Prob > W.LastCluster->Prob) + break; + if (I->Kind == CC_Range && I->MBB == NextMBB) { + std::swap(*I, *W.LastCluster); + break; + } + } + } + + // Compute total probability. + BranchProbability DefaultProb = W.DefaultProb; + BranchProbability UnhandledProbs = DefaultProb; + for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) + UnhandledProbs += I->Prob; + + MachineBasicBlock *CurMBB = W.MBB; + for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { + bool FallthroughUnreachable = false; + MachineBasicBlock *Fallthrough; + if (I == W.LastCluster) { + // For the last cluster, fall through to the default destination. + Fallthrough = DefaultMBB; + FallthroughUnreachable = isa<UnreachableInst>( + DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); + } else { + Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); + CurMF->insert(BBI, Fallthrough); + } + UnhandledProbs -= I->Prob; + + switch (I->Kind) { + case CC_BitTests: { + LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented"); + return false; // Bit tests currently unimplemented. + } + case CC_JumpTable: { + if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, + UnhandledProbs, I, Fallthrough, + FallthroughUnreachable)) { + LLVM_DEBUG(dbgs() << "Failed to lower jump table"); + return false; + } + break; + } + case CC_Range: { + if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, + FallthroughUnreachable, UnhandledProbs, + CurMBB, MIB, SwitchMBB)) { + LLVM_DEBUG(dbgs() << "Failed to lower switch range"); + return false; + } + break; + } + } + CurMBB = Fallthrough; + } + + return true; +} + +bool IRTranslator::translateIndirectBr(const User &U, + MachineIRBuilder &MIRBuilder) { + const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); + + const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); + MIRBuilder.buildBrIndirect(Tgt); + + // Link successors. + MachineBasicBlock &CurBB = MIRBuilder.getMBB(); + for (const BasicBlock *Succ : successors(&BrInst)) + CurBB.addSuccessor(&getMBB(*Succ)); + + return true; +} + +static bool isSwiftError(const Value *V) { + if (auto Arg = dyn_cast<Argument>(V)) + return Arg->hasSwiftErrorAttr(); + if (auto AI = dyn_cast<AllocaInst>(V)) + return AI->isSwiftError(); + return false; +} + +bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { + const LoadInst &LI = cast<LoadInst>(U); + + auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone; + Flags |= MachineMemOperand::MOLoad; + + if (DL->getTypeStoreSize(LI.getType()) == 0) + return true; + + ArrayRef<Register> Regs = getOrCreateVRegs(LI); + ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); + Register Base = getOrCreateVReg(*LI.getPointerOperand()); + + Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + + if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { + assert(Regs.size() == 1 && "swifterror should be single pointer"); + Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), + LI.getPointerOperand()); + MIRBuilder.buildCopy(Regs[0], VReg); + return true; + } + + const MDNode *Ranges = + Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; + for (unsigned i = 0; i < Regs.size(); ++i) { + Register Addr; + MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8); + + MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); + unsigned BaseAlign = getMemOpAlignment(LI); + auto MMO = MF->getMachineMemOperand( + Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, + MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), Ranges, + LI.getSyncScopeID(), LI.getOrdering()); + MIRBuilder.buildLoad(Regs[i], Addr, *MMO); + } + + return true; +} + +bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { + const StoreInst &SI = cast<StoreInst>(U); + auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone; + Flags |= MachineMemOperand::MOStore; + + if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) + return true; + + ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); + ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); + Register Base = getOrCreateVReg(*SI.getPointerOperand()); + + Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + + if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { + assert(Vals.size() == 1 && "swifterror should be single pointer"); + + Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), + SI.getPointerOperand()); + MIRBuilder.buildCopy(VReg, Vals[0]); + return true; + } + + for (unsigned i = 0; i < Vals.size(); ++i) { + Register Addr; + MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8); + + MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); + unsigned BaseAlign = getMemOpAlignment(SI); + auto MMO = MF->getMachineMemOperand( + Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, + MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, + SI.getSyncScopeID(), SI.getOrdering()); + MIRBuilder.buildStore(Vals[i], Addr, *MMO); + } + return true; +} + +static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { + const Value *Src = U.getOperand(0); + Type *Int32Ty = Type::getInt32Ty(U.getContext()); + + // getIndexedOffsetInType is designed for GEPs, so the first index is the + // usual array element rather than looking into the actual aggregate. + SmallVector<Value *, 1> Indices; + Indices.push_back(ConstantInt::get(Int32Ty, 0)); + + if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { + for (auto Idx : EVI->indices()) + Indices.push_back(ConstantInt::get(Int32Ty, Idx)); + } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { + for (auto Idx : IVI->indices()) + Indices.push_back(ConstantInt::get(Int32Ty, Idx)); + } else { + for (unsigned i = 1; i < U.getNumOperands(); ++i) + Indices.push_back(U.getOperand(i)); + } + + return 8 * static_cast<uint64_t>( + DL.getIndexedOffsetInType(Src->getType(), Indices)); +} + +bool IRTranslator::translateExtractValue(const User &U, + MachineIRBuilder &MIRBuilder) { + const Value *Src = U.getOperand(0); + uint64_t Offset = getOffsetFromIndices(U, *DL); + ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); + ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); + unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); + auto &DstRegs = allocateVRegs(U); + + for (unsigned i = 0; i < DstRegs.size(); ++i) + DstRegs[i] = SrcRegs[Idx++]; + + return true; +} + +bool IRTranslator::translateInsertValue(const User &U, + MachineIRBuilder &MIRBuilder) { + const Value *Src = U.getOperand(0); + uint64_t Offset = getOffsetFromIndices(U, *DL); + auto &DstRegs = allocateVRegs(U); + ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); + ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); + ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); + auto InsertedIt = InsertedRegs.begin(); + + for (unsigned i = 0; i < DstRegs.size(); ++i) { + if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) + DstRegs[i] = *InsertedIt++; + else + DstRegs[i] = SrcRegs[i]; + } + + return true; +} + +bool IRTranslator::translateSelect(const User &U, + MachineIRBuilder &MIRBuilder) { + Register Tst = getOrCreateVReg(*U.getOperand(0)); + ArrayRef<Register> ResRegs = getOrCreateVRegs(U); + ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); + ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); + + const SelectInst &SI = cast<SelectInst>(U); + uint16_t Flags = 0; + if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition())) + Flags = MachineInstr::copyFlagsFromInstruction(*Cmp); + + for (unsigned i = 0; i < ResRegs.size(); ++i) { + MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]}, + {Tst, Op0Regs[i], Op1Regs[i]}, Flags); + } + + return true; +} + +bool IRTranslator::translateBitCast(const User &U, + MachineIRBuilder &MIRBuilder) { + // If we're bitcasting to the source type, we can reuse the source vreg. + if (getLLTForType(*U.getOperand(0)->getType(), *DL) == + getLLTForType(*U.getType(), *DL)) { + Register SrcReg = getOrCreateVReg(*U.getOperand(0)); + auto &Regs = *VMap.getVRegs(U); + // If we already assigned a vreg for this bitcast, we can't change that. + // Emit a copy to satisfy the users we already emitted. + if (!Regs.empty()) + MIRBuilder.buildCopy(Regs[0], SrcReg); + else { + Regs.push_back(SrcReg); + VMap.getOffsets(U)->push_back(0); + } + return true; + } + return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); +} + +bool IRTranslator::translateCast(unsigned Opcode, const User &U, + MachineIRBuilder &MIRBuilder) { + Register Op = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); + MIRBuilder.buildInstr(Opcode, {Res}, {Op}); + return true; +} + +bool IRTranslator::translateGetElementPtr(const User &U, + MachineIRBuilder &MIRBuilder) { + // FIXME: support vector GEPs. + if (U.getType()->isVectorTy()) + return false; + + Value &Op0 = *U.getOperand(0); + Register BaseReg = getOrCreateVReg(Op0); + Type *PtrIRTy = Op0.getType(); + LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + + int64_t Offset = 0; + for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); + GTI != E; ++GTI) { + const Value *Idx = GTI.getOperand(); + if (StructType *StTy = GTI.getStructTypeOrNull()) { + unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); + Offset += DL->getStructLayout(StTy)->getElementOffset(Field); + continue; + } else { + uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); + + // If this is a scalar constant or a splat vector of constants, + // handle it quickly. + if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { + Offset += ElementSize * CI->getSExtValue(); + continue; + } + + if (Offset != 0) { + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); + BaseReg = + MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0); + Offset = 0; + } + + Register IdxReg = getOrCreateVReg(*Idx); + if (MRI->getType(IdxReg) != OffsetTy) + IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); + + // N = N + Idx * ElementSize; + // Avoid doing it for ElementSize of 1. + Register GepOffsetReg; + if (ElementSize != 1) { + auto ElementSizeMIB = MIRBuilder.buildConstant( + getLLTForType(*OffsetIRTy, *DL), ElementSize); + GepOffsetReg = + MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0); + } else + GepOffsetReg = IdxReg; + + BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0); + } + } + + if (Offset != 0) { + auto OffsetMIB = + MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset); + MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); + return true; + } + + MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); + return true; +} + +bool IRTranslator::translateMemFunc(const CallInst &CI, + MachineIRBuilder &MIRBuilder, + Intrinsic::ID ID) { + + // If the source is undef, then just emit a nop. + if (isa<UndefValue>(CI.getArgOperand(1))) + return true; + + ArrayRef<Register> Res; + auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true); + for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) + ICall.addUse(getOrCreateVReg(**AI)); + + unsigned DstAlign = 0, SrcAlign = 0; + unsigned IsVol = + cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) + ->getZExtValue(); + + if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { + DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1); + SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1); + } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { + DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1); + SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1); + } else { + auto *MSI = cast<MemSetInst>(&CI); + DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1); + } + + // We need to propagate the tail call flag from the IR inst as an argument. + // Otherwise, we have to pessimize and assume later that we cannot tail call + // any memory intrinsics. + ICall.addImm(CI.isTailCall() ? 1 : 0); + + // Create mem operands to store the alignment and volatile info. + auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; + ICall.addMemOperand(MF->getMachineMemOperand( + MachinePointerInfo(CI.getArgOperand(0)), + MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); + if (ID != Intrinsic::memset) + ICall.addMemOperand(MF->getMachineMemOperand( + MachinePointerInfo(CI.getArgOperand(1)), + MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); + + return true; +} + +void IRTranslator::getStackGuard(Register DstReg, + MachineIRBuilder &MIRBuilder) { + const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); + auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); + MIB.addDef(DstReg); + + auto &TLI = *MF->getSubtarget().getTargetLowering(); + Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); + if (!Global) + return; + + MachinePointerInfo MPInfo(Global); + auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | + MachineMemOperand::MODereferenceable; + MachineMemOperand *MemRef = + MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, + DL->getPointerABIAlignment(0).value()); + MIB.setMemRefs({MemRef}); +} + +bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, + MachineIRBuilder &MIRBuilder) { + ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); + MIRBuilder.buildInstr(Op) + .addDef(ResRegs[0]) + .addDef(ResRegs[1]) + .addUse(getOrCreateVReg(*CI.getOperand(0))) + .addUse(getOrCreateVReg(*CI.getOperand(1))); + + return true; +} + +unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { + switch (ID) { + default: + break; + case Intrinsic::bswap: + return TargetOpcode::G_BSWAP; + case Intrinsic::bitreverse: + return TargetOpcode::G_BITREVERSE; + case Intrinsic::ceil: + return TargetOpcode::G_FCEIL; + case Intrinsic::cos: + return TargetOpcode::G_FCOS; + case Intrinsic::ctpop: + return TargetOpcode::G_CTPOP; + case Intrinsic::exp: + return TargetOpcode::G_FEXP; + case Intrinsic::exp2: + return TargetOpcode::G_FEXP2; + case Intrinsic::fabs: + return TargetOpcode::G_FABS; + case Intrinsic::copysign: + return TargetOpcode::G_FCOPYSIGN; + case Intrinsic::minnum: + return TargetOpcode::G_FMINNUM; + case Intrinsic::maxnum: + return TargetOpcode::G_FMAXNUM; + case Intrinsic::minimum: + return TargetOpcode::G_FMINIMUM; + case Intrinsic::maximum: + return TargetOpcode::G_FMAXIMUM; + case Intrinsic::canonicalize: + return TargetOpcode::G_FCANONICALIZE; + case Intrinsic::floor: + return TargetOpcode::G_FFLOOR; + case Intrinsic::fma: + return TargetOpcode::G_FMA; + case Intrinsic::log: + return TargetOpcode::G_FLOG; + case Intrinsic::log2: + return TargetOpcode::G_FLOG2; + case Intrinsic::log10: + return TargetOpcode::G_FLOG10; + case Intrinsic::nearbyint: + return TargetOpcode::G_FNEARBYINT; + case Intrinsic::pow: + return TargetOpcode::G_FPOW; + case Intrinsic::rint: + return TargetOpcode::G_FRINT; + case Intrinsic::round: + return TargetOpcode::G_INTRINSIC_ROUND; + case Intrinsic::sin: + return TargetOpcode::G_FSIN; + case Intrinsic::sqrt: + return TargetOpcode::G_FSQRT; + case Intrinsic::trunc: + return TargetOpcode::G_INTRINSIC_TRUNC; + } + return Intrinsic::not_intrinsic; +} + +bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, + Intrinsic::ID ID, + MachineIRBuilder &MIRBuilder) { + + unsigned Op = getSimpleIntrinsicOpcode(ID); + + // Is this a simple intrinsic? + if (Op == Intrinsic::not_intrinsic) + return false; + + // Yes. Let's translate it. + SmallVector<llvm::SrcOp, 4> VRegs; + for (auto &Arg : CI.arg_operands()) + VRegs.push_back(getOrCreateVReg(*Arg)); + + MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, + MachineInstr::copyFlagsFromInstruction(CI)); + return true; +} + +bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, + MachineIRBuilder &MIRBuilder) { + + // If this is a simple intrinsic (that is, we just need to add a def of + // a vreg, and uses for each arg operand, then translate it. + if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) + return true; + + switch (ID) { + default: + break; + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: { + // No stack colouring in O0, discard region information. + if (MF->getTarget().getOptLevel() == CodeGenOpt::None) + return true; + + unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START + : TargetOpcode::LIFETIME_END; + + // Get the underlying objects for the location passed on the lifetime + // marker. + SmallVector<const Value *, 4> Allocas; + GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL); + + // Iterate over each underlying object, creating lifetime markers for each + // static alloca. Quit if we find a non-static alloca. + for (const Value *V : Allocas) { + const AllocaInst *AI = dyn_cast<AllocaInst>(V); + if (!AI) + continue; + + if (!AI->isStaticAlloca()) + return true; + + MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); + } + return true; + } + case Intrinsic::dbg_declare: { + const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); + assert(DI.getVariable() && "Missing variable"); + + const Value *Address = DI.getAddress(); + if (!Address || isa<UndefValue>(Address)) { + LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); + return true; + } + + assert(DI.getVariable()->isValidLocationForIntrinsic( + MIRBuilder.getDebugLoc()) && + "Expected inlined-at fields to agree"); + auto AI = dyn_cast<AllocaInst>(Address); + if (AI && AI->isStaticAlloca()) { + // Static allocas are tracked at the MF level, no need for DBG_VALUE + // instructions (in fact, they get ignored if they *do* exist). + MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), + getOrCreateFrameIndex(*AI), DI.getDebugLoc()); + } else { + // A dbg.declare describes the address of a source variable, so lower it + // into an indirect DBG_VALUE. + MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), + DI.getVariable(), DI.getExpression()); + } + return true; + } + case Intrinsic::dbg_label: { + const DbgLabelInst &DI = cast<DbgLabelInst>(CI); + assert(DI.getLabel() && "Missing label"); + + assert(DI.getLabel()->isValidLocationForIntrinsic( + MIRBuilder.getDebugLoc()) && + "Expected inlined-at fields to agree"); + + MIRBuilder.buildDbgLabel(DI.getLabel()); + return true; + } + case Intrinsic::vaend: + // No target I know of cares about va_end. Certainly no in-tree target + // does. Simplest intrinsic ever! + return true; + case Intrinsic::vastart: { + auto &TLI = *MF->getSubtarget().getTargetLowering(); + Value *Ptr = CI.getArgOperand(0); + unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; + + // FIXME: Get alignment + MIRBuilder.buildInstr(TargetOpcode::G_VASTART) + .addUse(getOrCreateVReg(*Ptr)) + .addMemOperand(MF->getMachineMemOperand( + MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1)); + return true; + } + case Intrinsic::dbg_value: { + // This form of DBG_VALUE is target-independent. + const DbgValueInst &DI = cast<DbgValueInst>(CI); + const Value *V = DI.getValue(); + assert(DI.getVariable()->isValidLocationForIntrinsic( + MIRBuilder.getDebugLoc()) && + "Expected inlined-at fields to agree"); + if (!V) { + // Currently the optimizer can produce this; insert an undef to + // help debugging. Probably the optimizer should not do this. + MIRBuilder.buildDirectDbgValue(0, DI.getVariable(), DI.getExpression()); + } else if (const auto *CI = dyn_cast<Constant>(V)) { + MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); + } else { + for (Register Reg : getOrCreateVRegs(*V)) { + // FIXME: This does not handle register-indirect values at offset 0. The + // direct/indirect thing shouldn't really be handled by something as + // implicit as reg+noreg vs reg+imm in the first place, but it seems + // pretty baked in right now. + MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); + } + } + return true; + } + case Intrinsic::uadd_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); + case Intrinsic::sadd_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); + case Intrinsic::usub_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); + case Intrinsic::ssub_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); + case Intrinsic::umul_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); + case Intrinsic::smul_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); + case Intrinsic::fmuladd: { + const TargetMachine &TM = MF->getTarget(); + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + Register Dst = getOrCreateVReg(CI); + Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); + Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); + Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); + if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && + TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { + // TODO: Revisit this to see if we should move this part of the + // lowering to the combiner. + MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2}, + MachineInstr::copyFlagsFromInstruction(CI)); + } else { + LLT Ty = getLLTForType(*CI.getType(), *DL); + auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1}, + MachineInstr::copyFlagsFromInstruction(CI)); + MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2}, + MachineInstr::copyFlagsFromInstruction(CI)); + } + return true; + } + case Intrinsic::memcpy: + case Intrinsic::memmove: + case Intrinsic::memset: + return translateMemFunc(CI, MIRBuilder, ID); + case Intrinsic::eh_typeid_for: { + GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); + Register Reg = getOrCreateVReg(CI); + unsigned TypeID = MF->getTypeIDFor(GV); + MIRBuilder.buildConstant(Reg, TypeID); + return true; + } + case Intrinsic::objectsize: + llvm_unreachable("llvm.objectsize.* should have been lowered already"); + + case Intrinsic::is_constant: + llvm_unreachable("llvm.is.constant.* should have been lowered already"); + + case Intrinsic::stackguard: + getStackGuard(getOrCreateVReg(CI), MIRBuilder); + return true; + case Intrinsic::stackprotector: { + LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); + Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); + getStackGuard(GuardVal, MIRBuilder); + + AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); + int FI = getOrCreateFrameIndex(*Slot); + MF->getFrameInfo().setStackProtectorIndex(FI); + + MIRBuilder.buildStore( + GuardVal, getOrCreateVReg(*Slot), + *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), + MachineMemOperand::MOStore | + MachineMemOperand::MOVolatile, + PtrTy.getSizeInBits() / 8, 8)); + return true; + } + case Intrinsic::stacksave: { + // Save the stack pointer to the location provided by the intrinsic. + Register Reg = getOrCreateVReg(CI); + Register StackPtr = MF->getSubtarget() + .getTargetLowering() + ->getStackPointerRegisterToSaveRestore(); + + // If the target doesn't specify a stack pointer, then fall back. + if (!StackPtr) + return false; + + MIRBuilder.buildCopy(Reg, StackPtr); + return true; + } + case Intrinsic::stackrestore: { + // Restore the stack pointer from the location provided by the intrinsic. + Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); + Register StackPtr = MF->getSubtarget() + .getTargetLowering() + ->getStackPointerRegisterToSaveRestore(); + + // If the target doesn't specify a stack pointer, then fall back. + if (!StackPtr) + return false; + + MIRBuilder.buildCopy(StackPtr, Reg); + return true; + } + case Intrinsic::cttz: + case Intrinsic::ctlz: { + ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); + bool isTrailing = ID == Intrinsic::cttz; + unsigned Opcode = isTrailing + ? Cst->isZero() ? TargetOpcode::G_CTTZ + : TargetOpcode::G_CTTZ_ZERO_UNDEF + : Cst->isZero() ? TargetOpcode::G_CTLZ + : TargetOpcode::G_CTLZ_ZERO_UNDEF; + MIRBuilder.buildInstr(Opcode) + .addDef(getOrCreateVReg(CI)) + .addUse(getOrCreateVReg(*CI.getArgOperand(0))); + return true; + } + case Intrinsic::invariant_start: { + LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); + Register Undef = MRI->createGenericVirtualRegister(PtrTy); + MIRBuilder.buildUndef(Undef); + return true; + } + case Intrinsic::invariant_end: + return true; + case Intrinsic::assume: + case Intrinsic::var_annotation: + case Intrinsic::sideeffect: + // Discard annotate attributes, assumptions, and artificial side-effects. + return true; + } + return false; +} + +bool IRTranslator::translateInlineAsm(const CallInst &CI, + MachineIRBuilder &MIRBuilder) { + const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); + if (!IA.getConstraintString().empty()) + return false; + + unsigned ExtraInfo = 0; + if (IA.hasSideEffects()) + ExtraInfo |= InlineAsm::Extra_HasSideEffects; + if (IA.getDialect() == InlineAsm::AD_Intel) + ExtraInfo |= InlineAsm::Extra_AsmDialect; + + MIRBuilder.buildInstr(TargetOpcode::INLINEASM) + .addExternalSymbol(IA.getAsmString().c_str()) + .addImm(ExtraInfo); + + return true; +} + +bool IRTranslator::translateCallSite(const ImmutableCallSite &CS, + MachineIRBuilder &MIRBuilder) { + const Instruction &I = *CS.getInstruction(); + ArrayRef<Register> Res = getOrCreateVRegs(I); + + SmallVector<ArrayRef<Register>, 8> Args; + Register SwiftInVReg = 0; + Register SwiftErrorVReg = 0; + for (auto &Arg : CS.args()) { + if (CLI->supportSwiftError() && isSwiftError(Arg)) { + assert(SwiftInVReg == 0 && "Expected only one swift error argument"); + LLT Ty = getLLTForType(*Arg->getType(), *DL); + SwiftInVReg = MRI->createGenericVirtualRegister(Ty); + MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( + &I, &MIRBuilder.getMBB(), Arg)); + Args.emplace_back(makeArrayRef(SwiftInVReg)); + SwiftErrorVReg = + SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg); + continue; + } + Args.push_back(getOrCreateVRegs(*Arg)); + } + + // We don't set HasCalls on MFI here yet because call lowering may decide to + // optimize into tail calls. Instead, we defer that to selection where a final + // scan is done to check if any instructions are calls. + bool Success = + CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg, + [&]() { return getOrCreateVReg(*CS.getCalledValue()); }); + + // Check if we just inserted a tail call. + if (Success) { + assert(!HasTailCall && "Can't tail call return twice from block?"); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); + } + + return Success; +} + +bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { + const CallInst &CI = cast<CallInst>(U); + auto TII = MF->getTarget().getIntrinsicInfo(); + const Function *F = CI.getCalledFunction(); + + // FIXME: support Windows dllimport function calls. + if (F && F->hasDLLImportStorageClass()) + return false; + + if (CI.isInlineAsm()) + return translateInlineAsm(CI, MIRBuilder); + + Intrinsic::ID ID = Intrinsic::not_intrinsic; + if (F && F->isIntrinsic()) { + ID = F->getIntrinsicID(); + if (TII && ID == Intrinsic::not_intrinsic) + ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); + } + + if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) + return translateCallSite(&CI, MIRBuilder); + + assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); + + if (translateKnownIntrinsic(CI, ID, MIRBuilder)) + return true; + + ArrayRef<Register> ResultRegs; + if (!CI.getType()->isVoidTy()) + ResultRegs = getOrCreateVRegs(CI); + + // Ignore the callsite attributes. Backend code is most likely not expecting + // an intrinsic to sometimes have side effects and sometimes not. + MachineInstrBuilder MIB = + MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); + if (isa<FPMathOperator>(CI)) + MIB->copyIRFlags(CI); + + for (auto &Arg : enumerate(CI.arg_operands())) { + // Some intrinsics take metadata parameters. Reject them. + if (isa<MetadataAsValue>(Arg.value())) + return false; + + // If this is required to be an immediate, don't materialize it in a + // register. + if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { + if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { + // imm arguments are more convenient than cimm (and realistically + // probably sufficient), so use them. + assert(CI->getBitWidth() <= 64 && + "large intrinsic immediates not handled"); + MIB.addImm(CI->getSExtValue()); + } else { + MIB.addFPImm(cast<ConstantFP>(Arg.value())); + } + } else { + ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); + if (VRegs.size() > 1) + return false; + MIB.addUse(VRegs[0]); + } + } + + // Add a MachineMemOperand if it is a target mem intrinsic. + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + TargetLowering::IntrinsicInfo Info; + // TODO: Add a GlobalISel version of getTgtMemIntrinsic. + if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { + MaybeAlign Align = Info.align; + if (!Align) + Align = MaybeAlign( + DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()))); + + uint64_t Size = Info.memVT.getStoreSize(); + MIB.addMemOperand(MF->getMachineMemOperand( + MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value())); + } + + return true; +} + +bool IRTranslator::translateInvoke(const User &U, + MachineIRBuilder &MIRBuilder) { + const InvokeInst &I = cast<InvokeInst>(U); + MCContext &Context = MF->getContext(); + + const BasicBlock *ReturnBB = I.getSuccessor(0); + const BasicBlock *EHPadBB = I.getSuccessor(1); + + const Value *Callee = I.getCalledValue(); + const Function *Fn = dyn_cast<Function>(Callee); + if (isa<InlineAsm>(Callee)) + return false; + + // FIXME: support invoking patchpoint and statepoint intrinsics. + if (Fn && Fn->isIntrinsic()) + return false; + + // FIXME: support whatever these are. + if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) + return false; + + // FIXME: support Windows exception handling. + if (!isa<LandingPadInst>(EHPadBB->front())) + return false; + + // Emit the actual call, bracketed by EH_LABELs so that the MF knows about + // the region covered by the try. + MCSymbol *BeginSymbol = Context.createTempSymbol(); + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); + + if (!translateCallSite(&I, MIRBuilder)) + return false; + + MCSymbol *EndSymbol = Context.createTempSymbol(); + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); + + // FIXME: track probabilities. + MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), + &ReturnMBB = getMBB(*ReturnBB); + MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); + MIRBuilder.getMBB().addSuccessor(&ReturnMBB); + MIRBuilder.getMBB().addSuccessor(&EHPadMBB); + MIRBuilder.buildBr(ReturnMBB); + + return true; +} + +bool IRTranslator::translateCallBr(const User &U, + MachineIRBuilder &MIRBuilder) { + // FIXME: Implement this. + return false; +} + +bool IRTranslator::translateLandingPad(const User &U, + MachineIRBuilder &MIRBuilder) { + const LandingPadInst &LP = cast<LandingPadInst>(U); + + MachineBasicBlock &MBB = MIRBuilder.getMBB(); + + MBB.setIsEHPad(); + + // If there aren't registers to copy the values into (e.g., during SjLj + // exceptions), then don't bother. + auto &TLI = *MF->getSubtarget().getTargetLowering(); + const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); + if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && + TLI.getExceptionSelectorRegister(PersonalityFn) == 0) + return true; + + // If landingpad's return type is token type, we don't create DAG nodes + // for its exception pointer and selector value. The extraction of exception + // pointer or selector value from token type landingpads is not currently + // supported. + if (LP.getType()->isTokenTy()) + return true; + + // Add a label to mark the beginning of the landing pad. Deletion of the + // landing pad can thus be detected via the MachineModuleInfo. + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) + .addSym(MF->addLandingPad(&MBB)); + + LLT Ty = getLLTForType(*LP.getType(), *DL); + Register Undef = MRI->createGenericVirtualRegister(Ty); + MIRBuilder.buildUndef(Undef); + + SmallVector<LLT, 2> Tys; + for (Type *Ty : cast<StructType>(LP.getType())->elements()) + Tys.push_back(getLLTForType(*Ty, *DL)); + assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); + + // Mark exception register as live in. + Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); + if (!ExceptionReg) + return false; + + MBB.addLiveIn(ExceptionReg); + ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); + MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); + + Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); + if (!SelectorReg) + return false; + + MBB.addLiveIn(SelectorReg); + Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); + MIRBuilder.buildCopy(PtrVReg, SelectorReg); + MIRBuilder.buildCast(ResRegs[1], PtrVReg); + + return true; +} + +bool IRTranslator::translateAlloca(const User &U, + MachineIRBuilder &MIRBuilder) { + auto &AI = cast<AllocaInst>(U); + + if (AI.isSwiftError()) + return true; + + if (AI.isStaticAlloca()) { + Register Res = getOrCreateVReg(AI); + int FI = getOrCreateFrameIndex(AI); + MIRBuilder.buildFrameIndex(Res, FI); + return true; + } + + // FIXME: support stack probing for Windows. + if (MF->getTarget().getTargetTriple().isOSWindows()) + return false; + + // Now we're in the harder dynamic case. + Type *Ty = AI.getAllocatedType(); + unsigned Align = + std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); + + Register NumElts = getOrCreateVReg(*AI.getArraySize()); + + Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); + LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); + if (MRI->getType(NumElts) != IntPtrTy) { + Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); + MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); + NumElts = ExtElts; + } + + Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); + Register TySize = + getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); + MIRBuilder.buildMul(AllocSize, NumElts, TySize); + + unsigned StackAlign = + MF->getSubtarget().getFrameLowering()->getStackAlignment(); + if (Align <= StackAlign) + Align = 0; + + // Round the size of the allocation up to the stack alignment size + // by add SA-1 to the size. This doesn't overflow because we're computing + // an address inside an alloca. + auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1); + auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, + MachineInstr::NoUWrap); + auto AlignCst = + MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1)); + auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); + + MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align); + + MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); + assert(MF->getFrameInfo().hasVarSizedObjects()); + return true; +} + +bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { + // FIXME: We may need more info about the type. Because of how LLT works, + // we're completely discarding the i64/double distinction here (amongst + // others). Fortunately the ABIs I know of where that matters don't use va_arg + // anyway but that's not guaranteed. + MIRBuilder.buildInstr(TargetOpcode::G_VAARG) + .addDef(getOrCreateVReg(U)) + .addUse(getOrCreateVReg(*U.getOperand(0))) + .addImm(DL->getABITypeAlignment(U.getType())); + return true; +} + +bool IRTranslator::translateInsertElement(const User &U, + MachineIRBuilder &MIRBuilder) { + // If it is a <1 x Ty> vector, use the scalar as it is + // not a legal vector type in LLT. + if (U.getType()->getVectorNumElements() == 1) { + Register Elt = getOrCreateVReg(*U.getOperand(1)); + auto &Regs = *VMap.getVRegs(U); + if (Regs.empty()) { + Regs.push_back(Elt); + VMap.getOffsets(U)->push_back(0); + } else { + MIRBuilder.buildCopy(Regs[0], Elt); + } + return true; + } + + Register Res = getOrCreateVReg(U); + Register Val = getOrCreateVReg(*U.getOperand(0)); + Register Elt = getOrCreateVReg(*U.getOperand(1)); + Register Idx = getOrCreateVReg(*U.getOperand(2)); + MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); + return true; +} + +bool IRTranslator::translateExtractElement(const User &U, + MachineIRBuilder &MIRBuilder) { + // If it is a <1 x Ty> vector, use the scalar as it is + // not a legal vector type in LLT. + if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { + Register Elt = getOrCreateVReg(*U.getOperand(0)); + auto &Regs = *VMap.getVRegs(U); + if (Regs.empty()) { + Regs.push_back(Elt); + VMap.getOffsets(U)->push_back(0); + } else { + MIRBuilder.buildCopy(Regs[0], Elt); + } + return true; + } + Register Res = getOrCreateVReg(U); + Register Val = getOrCreateVReg(*U.getOperand(0)); + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); + Register Idx; + if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { + if (CI->getBitWidth() != PreferredVecIdxWidth) { + APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); + auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); + Idx = getOrCreateVReg(*NewIdxCI); + } + } + if (!Idx) + Idx = getOrCreateVReg(*U.getOperand(1)); + if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { + const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth); + Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg(); + } + MIRBuilder.buildExtractVectorElement(Res, Val, Idx); + return true; +} + +bool IRTranslator::translateShuffleVector(const User &U, + MachineIRBuilder &MIRBuilder) { + MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) + .addDef(getOrCreateVReg(U)) + .addUse(getOrCreateVReg(*U.getOperand(0))) + .addUse(getOrCreateVReg(*U.getOperand(1))) + .addShuffleMask(cast<Constant>(U.getOperand(2))); + return true; +} + +bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { + const PHINode &PI = cast<PHINode>(U); + + SmallVector<MachineInstr *, 4> Insts; + for (auto Reg : getOrCreateVRegs(PI)) { + auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); + Insts.push_back(MIB.getInstr()); + } + + PendingPHIs.emplace_back(&PI, std::move(Insts)); + return true; +} + +bool IRTranslator::translateAtomicCmpXchg(const User &U, + MachineIRBuilder &MIRBuilder) { + const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); + + if (I.isWeak()) + return false; + + auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone; + Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + + Type *ResType = I.getType(); + Type *ValType = ResType->Type::getStructElementType(0); + + auto Res = getOrCreateVRegs(I); + Register OldValRes = Res[0]; + Register SuccessRes = Res[1]; + Register Addr = getOrCreateVReg(*I.getPointerOperand()); + Register Cmp = getOrCreateVReg(*I.getCompareOperand()); + Register NewVal = getOrCreateVReg(*I.getNewValOperand()); + + MIRBuilder.buildAtomicCmpXchgWithSuccess( + OldValRes, SuccessRes, Addr, Cmp, NewVal, + *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), + Flags, DL->getTypeStoreSize(ValType), + getMemOpAlignment(I), AAMDNodes(), nullptr, + I.getSyncScopeID(), I.getSuccessOrdering(), + I.getFailureOrdering())); + return true; +} + +bool IRTranslator::translateAtomicRMW(const User &U, + MachineIRBuilder &MIRBuilder) { + const AtomicRMWInst &I = cast<AtomicRMWInst>(U); + + auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone; + Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + + Type *ResType = I.getType(); + + Register Res = getOrCreateVReg(I); + Register Addr = getOrCreateVReg(*I.getPointerOperand()); + Register Val = getOrCreateVReg(*I.getValOperand()); + + unsigned Opcode = 0; + switch (I.getOperation()) { + default: + return false; + case AtomicRMWInst::Xchg: + Opcode = TargetOpcode::G_ATOMICRMW_XCHG; + break; + case AtomicRMWInst::Add: + Opcode = TargetOpcode::G_ATOMICRMW_ADD; + break; + case AtomicRMWInst::Sub: + Opcode = TargetOpcode::G_ATOMICRMW_SUB; + break; + case AtomicRMWInst::And: + Opcode = TargetOpcode::G_ATOMICRMW_AND; + break; + case AtomicRMWInst::Nand: + Opcode = TargetOpcode::G_ATOMICRMW_NAND; + break; + case AtomicRMWInst::Or: + Opcode = TargetOpcode::G_ATOMICRMW_OR; + break; + case AtomicRMWInst::Xor: + Opcode = TargetOpcode::G_ATOMICRMW_XOR; + break; + case AtomicRMWInst::Max: + Opcode = TargetOpcode::G_ATOMICRMW_MAX; + break; + case AtomicRMWInst::Min: + Opcode = TargetOpcode::G_ATOMICRMW_MIN; + break; + case AtomicRMWInst::UMax: + Opcode = TargetOpcode::G_ATOMICRMW_UMAX; + break; + case AtomicRMWInst::UMin: + Opcode = TargetOpcode::G_ATOMICRMW_UMIN; + break; + case AtomicRMWInst::FAdd: + Opcode = TargetOpcode::G_ATOMICRMW_FADD; + break; + case AtomicRMWInst::FSub: + Opcode = TargetOpcode::G_ATOMICRMW_FSUB; + break; + } + + MIRBuilder.buildAtomicRMW( + Opcode, Res, Addr, Val, + *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), + Flags, DL->getTypeStoreSize(ResType), + getMemOpAlignment(I), AAMDNodes(), nullptr, + I.getSyncScopeID(), I.getOrdering())); + return true; +} + +bool IRTranslator::translateFence(const User &U, + MachineIRBuilder &MIRBuilder) { + const FenceInst &Fence = cast<FenceInst>(U); + MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), + Fence.getSyncScopeID()); + return true; +} + +void IRTranslator::finishPendingPhis() { +#ifndef NDEBUG + DILocationVerifier Verifier; + GISelObserverWrapper WrapperObserver(&Verifier); + RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); +#endif // ifndef NDEBUG + for (auto &Phi : PendingPHIs) { + const PHINode *PI = Phi.first; + ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; + MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); + EntryBuilder->setDebugLoc(PI->getDebugLoc()); +#ifndef NDEBUG + Verifier.setCurrentInst(PI); +#endif // ifndef NDEBUG + + SmallSet<const MachineBasicBlock *, 16> SeenPreds; + for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { + auto IRPred = PI->getIncomingBlock(i); + ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); + for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { + if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) + continue; + SeenPreds.insert(Pred); + for (unsigned j = 0; j < ValRegs.size(); ++j) { + MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); + MIB.addUse(ValRegs[j]); + MIB.addMBB(Pred); + } + } + } + } +} + +bool IRTranslator::valueIsSplit(const Value &V, + SmallVectorImpl<uint64_t> *Offsets) { + SmallVector<LLT, 4> SplitTys; + if (Offsets && !Offsets->empty()) + Offsets->clear(); + computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); + return SplitTys.size() > 1; +} + +bool IRTranslator::translate(const Instruction &Inst) { + CurBuilder->setDebugLoc(Inst.getDebugLoc()); + // We only emit constants into the entry block from here. To prevent jumpy + // debug behaviour set the line to 0. + if (const DebugLoc &DL = Inst.getDebugLoc()) + EntryBuilder->setDebugLoc( + DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); + else + EntryBuilder->setDebugLoc(DebugLoc()); + + switch (Inst.getOpcode()) { +#define HANDLE_INST(NUM, OPCODE, CLASS) \ + case Instruction::OPCODE: \ + return translate##OPCODE(Inst, *CurBuilder.get()); +#include "llvm/IR/Instruction.def" + default: + return false; + } +} + +bool IRTranslator::translate(const Constant &C, Register Reg) { + if (auto CI = dyn_cast<ConstantInt>(&C)) + EntryBuilder->buildConstant(Reg, *CI); + else if (auto CF = dyn_cast<ConstantFP>(&C)) + EntryBuilder->buildFConstant(Reg, *CF); + else if (isa<UndefValue>(C)) + EntryBuilder->buildUndef(Reg); + else if (isa<ConstantPointerNull>(C)) { + // As we are trying to build a constant val of 0 into a pointer, + // insert a cast to make them correct with respect to types. + unsigned NullSize = DL->getTypeSizeInBits(C.getType()); + auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); + auto *ZeroVal = ConstantInt::get(ZeroTy, 0); + Register ZeroReg = getOrCreateVReg(*ZeroVal); + EntryBuilder->buildCast(Reg, ZeroReg); + } else if (auto GV = dyn_cast<GlobalValue>(&C)) + EntryBuilder->buildGlobalValue(Reg, GV); + else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { + if (!CAZ->getType()->isVectorTy()) + return false; + // Return the scalar if it is a <1 x Ty> vector. + if (CAZ->getNumElements() == 1) + return translate(*CAZ->getElementValue(0u), Reg); + SmallVector<Register, 4> Ops; + for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { + Constant &Elt = *CAZ->getElementValue(i); + Ops.push_back(getOrCreateVReg(Elt)); + } + EntryBuilder->buildBuildVector(Reg, Ops); + } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { + // Return the scalar if it is a <1 x Ty> vector. + if (CV->getNumElements() == 1) + return translate(*CV->getElementAsConstant(0), Reg); + SmallVector<Register, 4> Ops; + for (unsigned i = 0; i < CV->getNumElements(); ++i) { + Constant &Elt = *CV->getElementAsConstant(i); + Ops.push_back(getOrCreateVReg(Elt)); + } + EntryBuilder->buildBuildVector(Reg, Ops); + } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { + switch(CE->getOpcode()) { +#define HANDLE_INST(NUM, OPCODE, CLASS) \ + case Instruction::OPCODE: \ + return translate##OPCODE(*CE, *EntryBuilder.get()); +#include "llvm/IR/Instruction.def" + default: + return false; + } + } else if (auto CV = dyn_cast<ConstantVector>(&C)) { + if (CV->getNumOperands() == 1) + return translate(*CV->getOperand(0), Reg); + SmallVector<Register, 4> Ops; + for (unsigned i = 0; i < CV->getNumOperands(); ++i) { + Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); + } + EntryBuilder->buildBuildVector(Reg, Ops); + } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { + EntryBuilder->buildBlockAddress(Reg, BA); + } else + return false; + + return true; +} + +void IRTranslator::finalizeBasicBlock() { + for (auto &JTCase : SL->JTCases) { + // Emit header first, if it wasn't already emitted. + if (!JTCase.first.Emitted) + emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); + + emitJumpTable(JTCase.second, JTCase.second.MBB); + } + SL->JTCases.clear(); +} + +void IRTranslator::finalizeFunction() { + // Release the memory used by the different maps we + // needed during the translation. + PendingPHIs.clear(); + VMap.reset(); + FrameIndices.clear(); + MachinePreds.clear(); + // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it + // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid + // destroying it twice (in ~IRTranslator() and ~LLVMContext()) + EntryBuilder.reset(); + CurBuilder.reset(); + FuncInfo.clear(); +} + +/// Returns true if a BasicBlock \p BB within a variadic function contains a +/// variadic musttail call. +static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { + if (!IsVarArg) + return false; + + // Walk the block backwards, because tail calls usually only appear at the end + // of a block. + return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { + const auto *CI = dyn_cast<CallInst>(&I); + return CI && CI->isMustTailCall(); + }); +} + +bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { + MF = &CurMF; + const Function &F = MF->getFunction(); + if (F.empty()) + return false; + GISelCSEAnalysisWrapper &Wrapper = + getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); + // Set the CSEConfig and run the analysis. + GISelCSEInfo *CSEInfo = nullptr; + TPC = &getAnalysis<TargetPassConfig>(); + bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() + ? EnableCSEInIRTranslator + : TPC->isGISelCSEEnabled(); + + if (EnableCSE) { + EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); + CSEInfo = &Wrapper.get(TPC->getCSEConfig()); + EntryBuilder->setCSEInfo(CSEInfo); + CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); + CurBuilder->setCSEInfo(CSEInfo); + } else { + EntryBuilder = std::make_unique<MachineIRBuilder>(); + CurBuilder = std::make_unique<MachineIRBuilder>(); + } + CLI = MF->getSubtarget().getCallLowering(); + CurBuilder->setMF(*MF); + EntryBuilder->setMF(*MF); + MRI = &MF->getRegInfo(); + DL = &F.getParent()->getDataLayout(); + ORE = std::make_unique<OptimizationRemarkEmitter>(&F); + FuncInfo.MF = MF; + FuncInfo.BPI = nullptr; + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + const TargetMachine &TM = MF->getTarget(); + SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); + SL->init(TLI, TM, *DL); + + EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F); + + assert(PendingPHIs.empty() && "stale PHIs"); + + if (!DL->isLittleEndian()) { + // Currently we don't properly handle big endian code. + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + F.getSubprogram(), &F.getEntryBlock()); + R << "unable to translate in big endian mode"; + reportTranslationError(*MF, *TPC, *ORE, R); + } + + // Release the per-function state when we return, whether we succeeded or not. + auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); + + // Setup a separate basic-block for the arguments and constants + MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); + MF->push_back(EntryBB); + EntryBuilder->setMBB(*EntryBB); + + DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); + SwiftError.setFunction(CurMF); + SwiftError.createEntriesInEntryBlock(DbgLoc); + + bool IsVarArg = F.isVarArg(); + bool HasMustTailInVarArgFn = false; + + // Create all blocks, in IR order, to preserve the layout. + for (const BasicBlock &BB: F) { + auto *&MBB = BBToMBB[&BB]; + + MBB = MF->CreateMachineBasicBlock(&BB); + MF->push_back(MBB); + + if (BB.hasAddressTaken()) + MBB->setHasAddressTaken(); + + if (!HasMustTailInVarArgFn) + HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); + } + + MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); + + // Make our arguments/constants entry block fallthrough to the IR entry block. + EntryBB->addSuccessor(&getMBB(F.front())); + + // Lower the actual args into this basic block. + SmallVector<ArrayRef<Register>, 8> VRegArgs; + for (const Argument &Arg: F.args()) { + if (DL->getTypeStoreSize(Arg.getType()) == 0) + continue; // Don't handle zero sized types. + ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); + VRegArgs.push_back(VRegs); + + if (Arg.hasSwiftErrorAttr()) { + assert(VRegs.size() == 1 && "Too many vregs for Swift error"); + SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); + } + } + + if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + F.getSubprogram(), &F.getEntryBlock()); + R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); + reportTranslationError(*MF, *TPC, *ORE, R); + return false; + } + + // Need to visit defs before uses when translating instructions. + GISelObserverWrapper WrapperObserver; + if (EnableCSE && CSEInfo) + WrapperObserver.addObserver(CSEInfo); + { + ReversePostOrderTraversal<const Function *> RPOT(&F); +#ifndef NDEBUG + DILocationVerifier Verifier; + WrapperObserver.addObserver(&Verifier); +#endif // ifndef NDEBUG + RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); + for (const BasicBlock *BB : RPOT) { + MachineBasicBlock &MBB = getMBB(*BB); + // Set the insertion point of all the following translations to + // the end of this basic block. + CurBuilder->setMBB(MBB); + HasTailCall = false; + for (const Instruction &Inst : *BB) { + // If we translated a tail call in the last step, then we know + // everything after the call is either a return, or something that is + // handled by the call itself. (E.g. a lifetime marker or assume + // intrinsic.) In this case, we should stop translating the block and + // move on. + if (HasTailCall) + break; +#ifndef NDEBUG + Verifier.setCurrentInst(&Inst); +#endif // ifndef NDEBUG + if (translate(Inst)) + continue; + + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + Inst.getDebugLoc(), BB); + R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); + + if (ORE->allowExtraAnalysis("gisel-irtranslator")) { + std::string InstStrStorage; + raw_string_ostream InstStr(InstStrStorage); + InstStr << Inst; + + R << ": '" << InstStr.str() << "'"; + } + + reportTranslationError(*MF, *TPC, *ORE, R); + return false; + } + + finalizeBasicBlock(); + } +#ifndef NDEBUG + WrapperObserver.removeObserver(&Verifier); +#endif + } + + finishPendingPhis(); + + SwiftError.propagateVRegs(); + + // Merge the argument lowering and constants block with its single + // successor, the LLVM-IR entry block. We want the basic block to + // be maximal. + assert(EntryBB->succ_size() == 1 && + "Custom BB used for lowering should have only one successor"); + // Get the successor of the current entry block. + MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); + assert(NewEntryBB.pred_size() == 1 && + "LLVM-IR entry block has a predecessor!?"); + // Move all the instruction from the current entry block to the + // new entry block. + NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), + EntryBB->end()); + + // Update the live-in information for the new entry block. + for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) + NewEntryBB.addLiveIn(LiveIn); + NewEntryBB.sortUniqueLiveIns(); + + // Get rid of the now empty basic block. + EntryBB->removeSuccessor(&NewEntryBB); + MF->remove(EntryBB); + MF->DeleteMachineBasicBlock(EntryBB); + + assert(&MF->front() == &NewEntryBB && + "New entry wasn't next in the list of basic block!"); + + // Initialize stack protector information. + StackProtector &SP = getAnalysis<StackProtector>(); + SP.copyToMachineFrameInfo(MF->getFrameInfo()); + + return false; +} diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp new file mode 100644 index 000000000000..7c4fd2d140d3 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp @@ -0,0 +1,264 @@ +//===- llvm/CodeGen/GlobalISel/InstructionSelect.cpp - InstructionSelect ---==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the InstructionSelect class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InstructionSelect.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/Twine.h" +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/config.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/TargetRegistry.h" + +#define DEBUG_TYPE "instruction-select" + +using namespace llvm; + +#ifdef LLVM_GISEL_COV_PREFIX +static cl::opt<std::string> + CoveragePrefix("gisel-coverage-prefix", cl::init(LLVM_GISEL_COV_PREFIX), + cl::desc("Record GlobalISel rule coverage files of this " + "prefix if instrumentation was generated")); +#else +static const std::string CoveragePrefix = ""; +#endif + +char InstructionSelect::ID = 0; +INITIALIZE_PASS_BEGIN(InstructionSelect, DEBUG_TYPE, + "Select target instructions out of generic instructions", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis) +INITIALIZE_PASS_END(InstructionSelect, DEBUG_TYPE, + "Select target instructions out of generic instructions", + false, false) + +InstructionSelect::InstructionSelect() : MachineFunctionPass(ID) { } + +void InstructionSelect::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetPassConfig>(); + AU.addRequired<GISelKnownBitsAnalysis>(); + AU.addPreserved<GISelKnownBitsAnalysis>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + LLVM_DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n'); + GISelKnownBits &KB = getAnalysis<GISelKnownBitsAnalysis>().get(MF); + + const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); + InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector(); + CodeGenCoverage CoverageInfo; + assert(ISel && "Cannot work without InstructionSelector"); + ISel->setupMF(MF, KB, CoverageInfo); + + // An optimization remark emitter. Used to report failures. + MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); + + // FIXME: There are many other MF/MFI fields we need to initialize. + + MachineRegisterInfo &MRI = MF.getRegInfo(); +#ifndef NDEBUG + // Check that our input is fully legal: we require the function to have the + // Legalized property, so it should be. + // FIXME: This should be in the MachineVerifier, as the RegBankSelected + // property check already is. + if (!DisableGISelLegalityCheck) + if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) { + reportGISelFailure(MF, TPC, MORE, "gisel-select", + "instruction is not legal", *MI); + return false; + } + // FIXME: We could introduce new blocks and will need to fix the outer loop. + // Until then, keep track of the number of blocks to assert that we don't. + const size_t NumBlocks = MF.size(); +#endif + + for (MachineBasicBlock *MBB : post_order(&MF)) { + if (MBB->empty()) + continue; + + // Select instructions in reverse block order. We permit erasing so have + // to resort to manually iterating and recognizing the begin (rend) case. + bool ReachedBegin = false; + for (auto MII = std::prev(MBB->end()), Begin = MBB->begin(); + !ReachedBegin;) { +#ifndef NDEBUG + // Keep track of the insertion range for debug printing. + const auto AfterIt = std::next(MII); +#endif + // Select this instruction. + MachineInstr &MI = *MII; + + // And have our iterator point to the next instruction, if there is one. + if (MII == Begin) + ReachedBegin = true; + else + --MII; + + LLVM_DEBUG(dbgs() << "Selecting: \n " << MI); + + // We could have folded this instruction away already, making it dead. + // If so, erase it. + if (isTriviallyDead(MI, MRI)) { + LLVM_DEBUG(dbgs() << "Is dead; erasing.\n"); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + continue; + } + + if (!ISel->select(MI)) { + // FIXME: It would be nice to dump all inserted instructions. It's + // not obvious how, esp. considering select() can insert after MI. + reportGISelFailure(MF, TPC, MORE, "gisel-select", "cannot select", MI); + return false; + } + + // Dump the range of instructions that MI expanded into. + LLVM_DEBUG({ + auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII); + dbgs() << "Into:\n"; + for (auto &InsertedMI : make_range(InsertedBegin, AfterIt)) + dbgs() << " " << InsertedMI; + dbgs() << '\n'; + }); + } + } + + for (MachineBasicBlock &MBB : MF) { + if (MBB.empty()) + continue; + + // Try to find redundant copies b/w vregs of the same register class. + bool ReachedBegin = false; + for (auto MII = std::prev(MBB.end()), Begin = MBB.begin(); !ReachedBegin;) { + // Select this instruction. + MachineInstr &MI = *MII; + + // And have our iterator point to the next instruction, if there is one. + if (MII == Begin) + ReachedBegin = true; + else + --MII; + if (MI.getOpcode() != TargetOpcode::COPY) + continue; + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + if (Register::isVirtualRegister(SrcReg) && + Register::isVirtualRegister(DstReg)) { + auto SrcRC = MRI.getRegClass(SrcReg); + auto DstRC = MRI.getRegClass(DstReg); + if (SrcRC == DstRC) { + MRI.replaceRegWith(DstReg, SrcReg); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + } + } + } + } + +#ifndef NDEBUG + const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); + // Now that selection is complete, there are no more generic vregs. Verify + // that the size of the now-constrained vreg is unchanged and that it has a + // register class. + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { + unsigned VReg = Register::index2VirtReg(I); + + MachineInstr *MI = nullptr; + if (!MRI.def_empty(VReg)) + MI = &*MRI.def_instr_begin(VReg); + else if (!MRI.use_empty(VReg)) + MI = &*MRI.use_instr_begin(VReg); + if (!MI) + continue; + + const TargetRegisterClass *RC = MRI.getRegClassOrNull(VReg); + if (!RC) { + reportGISelFailure(MF, TPC, MORE, "gisel-select", + "VReg has no regclass after selection", *MI); + return false; + } + + const LLT Ty = MRI.getType(VReg); + if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) { + reportGISelFailure( + MF, TPC, MORE, "gisel-select", + "VReg's low-level type and register class have different sizes", *MI); + return false; + } + } + + if (MF.size() != NumBlocks) { + MachineOptimizationRemarkMissed R("gisel-select", "GISelFailure", + MF.getFunction().getSubprogram(), + /*MBB=*/nullptr); + R << "inserting blocks is not supported yet"; + reportGISelFailure(MF, TPC, MORE, R); + return false; + } +#endif + auto &TLI = *MF.getSubtarget().getTargetLowering(); + TLI.finalizeLowering(MF); + + // Determine if there are any calls in this machine function. Ported from + // SelectionDAG. + MachineFrameInfo &MFI = MF.getFrameInfo(); + for (const auto &MBB : MF) { + if (MFI.hasCalls() && MF.hasInlineAsm()) + break; + + for (const auto &MI : MBB) { + if ((MI.isCall() && !MI.isReturn()) || MI.isStackAligningInlineAsm()) + MFI.setHasCalls(true); + if (MI.isInlineAsm()) + MF.setHasInlineAsm(true); + } + } + + + LLVM_DEBUG({ + dbgs() << "Rules covered by selecting function: " << MF.getName() << ":"; + for (auto RuleID : CoverageInfo.covered()) + dbgs() << " id" << RuleID; + dbgs() << "\n\n"; + }); + CoverageInfo.emit(CoveragePrefix, + MF.getSubtarget() + .getTargetLowering() + ->getTargetMachine() + .getTarget() + .getBackendName()); + + // If we successfully selected the function nothing is going to use the vreg + // types after us (otherwise MIRPrinter would need them). Make sure the types + // disappear. + MRI.clearVirtRegTypes(); + + // FIXME: Should we accurately track changes? + return true; +} diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp new file mode 100644 index 000000000000..28143b30d4e8 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -0,0 +1,83 @@ +//===- llvm/CodeGen/GlobalISel/InstructionSelector.cpp --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file implements the InstructionSelector class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> + +#define DEBUG_TYPE "instructionselector" + +using namespace llvm; + +InstructionSelector::MatcherState::MatcherState(unsigned MaxRenderers) + : Renderers(MaxRenderers), MIs() {} + +InstructionSelector::InstructionSelector() = default; + +bool InstructionSelector::constrainOperandRegToRegClass( + MachineInstr &I, unsigned OpIdx, const TargetRegisterClass &RC, + const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, + const RegisterBankInfo &RBI) const { + MachineBasicBlock &MBB = *I.getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, + I.getOperand(OpIdx), OpIdx); +} + +bool InstructionSelector::isOperandImmEqual( + const MachineOperand &MO, int64_t Value, + const MachineRegisterInfo &MRI) const { + if (MO.isReg() && MO.getReg()) + if (auto VRegVal = getConstantVRegValWithLookThrough(MO.getReg(), MRI)) + return VRegVal->Value == Value; + return false; +} + +bool InstructionSelector::isBaseWithConstantOffset( + const MachineOperand &Root, const MachineRegisterInfo &MRI) const { + if (!Root.isReg()) + return false; + + MachineInstr *RootI = MRI.getVRegDef(Root.getReg()); + if (RootI->getOpcode() != TargetOpcode::G_GEP) + return false; + + MachineOperand &RHS = RootI->getOperand(2); + MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg()); + if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT) + return false; + + return true; +} + +bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI, + MachineInstr &IntoMI) const { + // Immediate neighbours are already folded. + if (MI.getParent() == IntoMI.getParent() && + std::next(MI.getIterator()) == IntoMI.getIterator()) + return true; + + return !MI.mayLoadOrStore() && !MI.mayRaiseFPException() && + !MI.hasUnmodeledSideEffects() && MI.implicit_operands().empty(); +} diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp new file mode 100644 index 000000000000..601d50e9806f --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp @@ -0,0 +1,155 @@ +//===- lib/CodeGen/GlobalISel/LegalizerPredicates.cpp - Predicates --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A library of predicate factories to use for LegalityPredicate. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" + +using namespace llvm; + +LegalityPredicate LegalityPredicates::typeIs(unsigned TypeIdx, LLT Type) { + return + [=](const LegalityQuery &Query) { return Query.Types[TypeIdx] == Type; }; +} + +LegalityPredicate +LegalityPredicates::typeInSet(unsigned TypeIdx, + std::initializer_list<LLT> TypesInit) { + SmallVector<LLT, 4> Types = TypesInit; + return [=](const LegalityQuery &Query) { + return std::find(Types.begin(), Types.end(), Query.Types[TypeIdx]) != Types.end(); + }; +} + +LegalityPredicate LegalityPredicates::typePairInSet( + unsigned TypeIdx0, unsigned TypeIdx1, + std::initializer_list<std::pair<LLT, LLT>> TypesInit) { + SmallVector<std::pair<LLT, LLT>, 4> Types = TypesInit; + return [=](const LegalityQuery &Query) { + std::pair<LLT, LLT> Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1]}; + return std::find(Types.begin(), Types.end(), Match) != Types.end(); + }; +} + +LegalityPredicate LegalityPredicates::typePairAndMemDescInSet( + unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx, + std::initializer_list<TypePairAndMemDesc> TypesAndMemDescInit) { + SmallVector<TypePairAndMemDesc, 4> TypesAndMemDesc = TypesAndMemDescInit; + return [=](const LegalityQuery &Query) { + TypePairAndMemDesc Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1], + Query.MMODescrs[MMOIdx].SizeInBits, + Query.MMODescrs[MMOIdx].AlignInBits}; + return std::find_if( + TypesAndMemDesc.begin(), TypesAndMemDesc.end(), + [=](const TypePairAndMemDesc &Entry) ->bool { + return Match.isCompatible(Entry); + }) != TypesAndMemDesc.end(); + }; +} + +LegalityPredicate LegalityPredicates::isScalar(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].isScalar(); + }; +} + +LegalityPredicate LegalityPredicates::isVector(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].isVector(); + }; +} + +LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].isPointer(); + }; +} + +LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx, + unsigned AddrSpace) { + return [=](const LegalityQuery &Query) { + LLT Ty = Query.Types[TypeIdx]; + return Ty.isPointer() && Ty.getAddressSpace() == AddrSpace; + }; +} + +LegalityPredicate LegalityPredicates::narrowerThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isScalar() && QueryTy.getSizeInBits() < Size; + }; +} + +LegalityPredicate LegalityPredicates::widerThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isScalar() && QueryTy.getSizeInBits() > Size; + }; +} + +LegalityPredicate LegalityPredicates::scalarOrEltNarrowerThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.getScalarSizeInBits() < Size; + }; +} + +LegalityPredicate LegalityPredicates::scalarOrEltWiderThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.getScalarSizeInBits() > Size; + }; +} + +LegalityPredicate LegalityPredicates::scalarOrEltSizeNotPow2(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return !isPowerOf2_32(QueryTy.getScalarSizeInBits()); + }; +} + +LegalityPredicate LegalityPredicates::sizeNotPow2(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isScalar() && !isPowerOf2_32(QueryTy.getSizeInBits()); + }; +} + +LegalityPredicate LegalityPredicates::sameSize(unsigned TypeIdx0, + unsigned TypeIdx1) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx0].getSizeInBits() == + Query.Types[TypeIdx1].getSizeInBits(); + }; +} + +LegalityPredicate LegalityPredicates::memSizeInBytesNotPow2(unsigned MMOIdx) { + return [=](const LegalityQuery &Query) { + return !isPowerOf2_32(Query.MMODescrs[MMOIdx].SizeInBits / 8); + }; +} + +LegalityPredicate LegalityPredicates::numElementsNotPow2(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isVector() && !isPowerOf2_32(QueryTy.getNumElements()); + }; +} + +LegalityPredicate LegalityPredicates::atomicOrderingAtLeastOrStrongerThan( + unsigned MMOIdx, AtomicOrdering Ordering) { + return [=](const LegalityQuery &Query) { + return isAtLeastOrStrongerThan(Query.MMODescrs[MMOIdx].Ordering, Ordering); + }; +} diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp new file mode 100644 index 000000000000..fcbecf90a845 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp @@ -0,0 +1,71 @@ +//===- lib/CodeGen/GlobalISel/LegalizerMutations.cpp - Mutations ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A library of mutation factories to use for LegalityMutation. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" + +using namespace llvm; + +LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, LLT Ty) { + return + [=](const LegalityQuery &Query) { return std::make_pair(TypeIdx, Ty); }; +} + +LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, + unsigned FromTypeIdx) { + return [=](const LegalityQuery &Query) { + return std::make_pair(TypeIdx, Query.Types[FromTypeIdx]); + }; +} + +LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx, + unsigned FromTypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT OldTy = Query.Types[TypeIdx]; + const LLT NewTy = Query.Types[FromTypeIdx]; + return std::make_pair(TypeIdx, OldTy.changeElementType(NewTy)); + }; +} + +LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx, + LLT NewEltTy) { + return [=](const LegalityQuery &Query) { + const LLT OldTy = Query.Types[TypeIdx]; + return std::make_pair(TypeIdx, OldTy.changeElementType(NewEltTy)); + }; +} + +LegalizeMutation LegalizeMutations::widenScalarOrEltToNextPow2(unsigned TypeIdx, + unsigned Min) { + return [=](const LegalityQuery &Query) { + const LLT Ty = Query.Types[TypeIdx]; + unsigned NewEltSizeInBits = + std::max(1u << Log2_32_Ceil(Ty.getScalarSizeInBits()), Min); + return std::make_pair(TypeIdx, Ty.changeElementSize(NewEltSizeInBits)); + }; +} + +LegalizeMutation LegalizeMutations::moreElementsToNextPow2(unsigned TypeIdx, + unsigned Min) { + return [=](const LegalityQuery &Query) { + const LLT VecTy = Query.Types[TypeIdx]; + unsigned NewNumElements = + std::max(1u << Log2_32_Ceil(VecTy.getNumElements()), Min); + return std::make_pair(TypeIdx, + LLT::vector(NewNumElements, VecTy.getElementType())); + }; +} + +LegalizeMutation LegalizeMutations::scalarize(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return std::make_pair(TypeIdx, Query.Types[TypeIdx].getElementType()); + }; +} diff --git a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp new file mode 100644 index 000000000000..1593e21fe07e --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp @@ -0,0 +1,299 @@ +//===-- llvm/CodeGen/GlobalISel/Legalizer.cpp -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file This file implements the LegalizerHelper class to legalize individual +/// instructions and the LegalizePass wrapper pass for the primary +/// legalization. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Legalizer.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelWorkList.h" +#include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h" +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Target/TargetMachine.h" + +#include <iterator> + +#define DEBUG_TYPE "legalizer" + +using namespace llvm; + +static cl::opt<bool> + EnableCSEInLegalizer("enable-cse-in-legalizer", + cl::desc("Should enable CSE in Legalizer"), + cl::Optional, cl::init(false)); + +char Legalizer::ID = 0; +INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE, + "Legalize the Machine IR a function's Machine IR", false, + false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) +INITIALIZE_PASS_END(Legalizer, DEBUG_TYPE, + "Legalize the Machine IR a function's Machine IR", false, + false) + +Legalizer::Legalizer() : MachineFunctionPass(ID) { } + +void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetPassConfig>(); + AU.addRequired<GISelCSEAnalysisWrapperPass>(); + AU.addPreserved<GISelCSEAnalysisWrapperPass>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +void Legalizer::init(MachineFunction &MF) { +} + +static bool isArtifact(const MachineInstr &MI) { + switch (MI.getOpcode()) { + default: + return false; + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_ANYEXT: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_MERGE_VALUES: + case TargetOpcode::G_UNMERGE_VALUES: + case TargetOpcode::G_CONCAT_VECTORS: + case TargetOpcode::G_BUILD_VECTOR: + case TargetOpcode::G_EXTRACT: + return true; + } +} +using InstListTy = GISelWorkList<256>; +using ArtifactListTy = GISelWorkList<128>; + +namespace { +class LegalizerWorkListManager : public GISelChangeObserver { + InstListTy &InstList; + ArtifactListTy &ArtifactList; +#ifndef NDEBUG + SmallVector<MachineInstr *, 4> NewMIs; +#endif + +public: + LegalizerWorkListManager(InstListTy &Insts, ArtifactListTy &Arts) + : InstList(Insts), ArtifactList(Arts) {} + + void createdOrChangedInstr(MachineInstr &MI) { + // Only legalize pre-isel generic instructions. + // Legalization process could generate Target specific pseudo + // instructions with generic types. Don't record them + if (isPreISelGenericOpcode(MI.getOpcode())) { + if (isArtifact(MI)) + ArtifactList.insert(&MI); + else + InstList.insert(&MI); + } + } + + void createdInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << ".. .. New MI: " << MI); + LLVM_DEBUG(NewMIs.push_back(&MI)); + createdOrChangedInstr(MI); + } + + void printNewInstrs() { + LLVM_DEBUG({ + for (const auto *MI : NewMIs) + dbgs() << ".. .. New MI: " << *MI; + NewMIs.clear(); + }); + } + + void erasingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << ".. .. Erasing: " << MI); + InstList.remove(&MI); + ArtifactList.remove(&MI); + } + + void changingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << ".. .. Changing MI: " << MI); + } + + void changedInstr(MachineInstr &MI) override { + // When insts change, we want to revisit them to legalize them again. + // We'll consider them the same as created. + LLVM_DEBUG(dbgs() << ".. .. Changed MI: " << MI); + createdOrChangedInstr(MI); + } +}; +} // namespace + +bool Legalizer::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + LLVM_DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n'); + init(MF); + const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); + GISelCSEAnalysisWrapper &Wrapper = + getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); + MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); + + const size_t NumBlocks = MF.size(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + // Populate Insts + InstListTy InstList; + ArtifactListTy ArtifactList; + ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); + // Perform legalization bottom up so we can DCE as we legalize. + // Traverse BB in RPOT and within each basic block, add insts top down, + // so when we pop_back_val in the legalization process, we traverse bottom-up. + for (auto *MBB : RPOT) { + if (MBB->empty()) + continue; + for (MachineInstr &MI : *MBB) { + // Only legalize pre-isel generic instructions: others don't have types + // and are assumed to be legal. + if (!isPreISelGenericOpcode(MI.getOpcode())) + continue; + if (isArtifact(MI)) + ArtifactList.deferred_insert(&MI); + else + InstList.deferred_insert(&MI); + } + } + ArtifactList.finalize(); + InstList.finalize(); + std::unique_ptr<MachineIRBuilder> MIRBuilder; + GISelCSEInfo *CSEInfo = nullptr; + bool EnableCSE = EnableCSEInLegalizer.getNumOccurrences() + ? EnableCSEInLegalizer + : TPC.isGISelCSEEnabled(); + + if (EnableCSE) { + MIRBuilder = std::make_unique<CSEMIRBuilder>(); + CSEInfo = &Wrapper.get(TPC.getCSEConfig()); + MIRBuilder->setCSEInfo(CSEInfo); + } else + MIRBuilder = std::make_unique<MachineIRBuilder>(); + // This observer keeps the worklist updated. + LegalizerWorkListManager WorkListObserver(InstList, ArtifactList); + // We want both WorkListObserver as well as CSEInfo to observe all changes. + // Use the wrapper observer. + GISelObserverWrapper WrapperObserver(&WorkListObserver); + if (EnableCSE && CSEInfo) + WrapperObserver.addObserver(CSEInfo); + // Now install the observer as the delegate to MF. + // This will keep all the observers notified about new insertions/deletions. + RAIIDelegateInstaller DelInstall(MF, &WrapperObserver); + LegalizerHelper Helper(MF, WrapperObserver, *MIRBuilder.get()); + const LegalizerInfo &LInfo(Helper.getLegalizerInfo()); + LegalizationArtifactCombiner ArtCombiner(*MIRBuilder.get(), MF.getRegInfo(), + LInfo); + auto RemoveDeadInstFromLists = [&WrapperObserver](MachineInstr *DeadMI) { + WrapperObserver.erasingInstr(*DeadMI); + }; + auto stopLegalizing = [&](MachineInstr &MI) { + Helper.MIRBuilder.stopObservingChanges(); + reportGISelFailure(MF, TPC, MORE, "gisel-legalize", + "unable to legalize instruction", MI); + }; + bool Changed = false; + SmallVector<MachineInstr *, 128> RetryList; + do { + assert(RetryList.empty() && "Expected no instructions in RetryList"); + unsigned NumArtifacts = ArtifactList.size(); + while (!InstList.empty()) { + MachineInstr &MI = *InstList.pop_back_val(); + assert(isPreISelGenericOpcode(MI.getOpcode()) && "Expecting generic opcode"); + if (isTriviallyDead(MI, MRI)) { + LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n"); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + continue; + } + + // Do the legalization for this instruction. + auto Res = Helper.legalizeInstrStep(MI); + // Error out if we couldn't legalize this instruction. We may want to + // fall back to DAG ISel instead in the future. + if (Res == LegalizerHelper::UnableToLegalize) { + // Move illegal artifacts to RetryList instead of aborting because + // legalizing InstList may generate artifacts that allow + // ArtifactCombiner to combine away them. + if (isArtifact(MI)) { + RetryList.push_back(&MI); + continue; + } + stopLegalizing(MI); + return false; + } + WorkListObserver.printNewInstrs(); + Changed |= Res == LegalizerHelper::Legalized; + } + // Try to combine the instructions in RetryList again if there + // are new artifacts. If not, stop legalizing. + if (!RetryList.empty()) { + if (ArtifactList.size() > NumArtifacts) { + while (!RetryList.empty()) + ArtifactList.insert(RetryList.pop_back_val()); + } else { + MachineInstr *MI = *RetryList.begin(); + stopLegalizing(*MI); + return false; + } + } + while (!ArtifactList.empty()) { + MachineInstr &MI = *ArtifactList.pop_back_val(); + assert(isPreISelGenericOpcode(MI.getOpcode()) && "Expecting generic opcode"); + if (isTriviallyDead(MI, MRI)) { + LLVM_DEBUG(dbgs() << MI << "Is dead\n"); + RemoveDeadInstFromLists(&MI); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + continue; + } + SmallVector<MachineInstr *, 4> DeadInstructions; + if (ArtCombiner.tryCombineInstruction(MI, DeadInstructions, + WrapperObserver)) { + WorkListObserver.printNewInstrs(); + for (auto *DeadMI : DeadInstructions) { + LLVM_DEBUG(dbgs() << *DeadMI << "Is dead\n"); + RemoveDeadInstFromLists(DeadMI); + DeadMI->eraseFromParentAndMarkDBGValuesForRemoval(); + } + Changed = true; + continue; + } + // If this was not an artifact (that could be combined away), this might + // need special handling. Add it to InstList, so when it's processed + // there, it has to be legal or specially handled. + else + InstList.insert(&MI); + } + } while (!InstList.empty()); + + // For now don't support if new blocks are inserted - we would need to fix the + // outerloop for that. + if (MF.size() != NumBlocks) { + MachineOptimizationRemarkMissed R("gisel-legalize", "GISelFailure", + MF.getFunction().getSubprogram(), + /*MBB=*/nullptr); + R << "inserting blocks is not supported yet"; + reportGISelFailure(MF, TPC, MORE, R); + return false; + } + + return Changed; +} diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp new file mode 100644 index 000000000000..21512e543878 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -0,0 +1,4277 @@ +//===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file This file implements the LegalizerHelper class to legalize +/// individual instructions and the LegalizeMachineIR wrapper pass for the +/// primary legalization. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" +#include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" + +#define DEBUG_TYPE "legalizer" + +using namespace llvm; +using namespace LegalizeActions; + +/// Try to break down \p OrigTy into \p NarrowTy sized pieces. +/// +/// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy, +/// with any leftover piece as type \p LeftoverTy +/// +/// Returns -1 in the first element of the pair if the breakdown is not +/// satisfiable. +static std::pair<int, int> +getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) { + assert(!LeftoverTy.isValid() && "this is an out argument"); + + unsigned Size = OrigTy.getSizeInBits(); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + unsigned NumParts = Size / NarrowSize; + unsigned LeftoverSize = Size - NumParts * NarrowSize; + assert(Size > NarrowSize); + + if (LeftoverSize == 0) + return {NumParts, 0}; + + if (NarrowTy.isVector()) { + unsigned EltSize = OrigTy.getScalarSizeInBits(); + if (LeftoverSize % EltSize != 0) + return {-1, -1}; + LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); + } else { + LeftoverTy = LLT::scalar(LeftoverSize); + } + + int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits(); + return std::make_pair(NumParts, NumLeftover); +} + +LegalizerHelper::LegalizerHelper(MachineFunction &MF, + GISelChangeObserver &Observer, + MachineIRBuilder &Builder) + : MIRBuilder(Builder), MRI(MF.getRegInfo()), + LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) { + MIRBuilder.setMF(MF); + MIRBuilder.setChangeObserver(Observer); +} + +LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI, + GISelChangeObserver &Observer, + MachineIRBuilder &B) + : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) { + MIRBuilder.setMF(MF); + MIRBuilder.setChangeObserver(Observer); +} +LegalizerHelper::LegalizeResult +LegalizerHelper::legalizeInstrStep(MachineInstr &MI) { + LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs())); + + if (MI.getOpcode() == TargetOpcode::G_INTRINSIC || + MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) + return LI.legalizeIntrinsic(MI, MRI, MIRBuilder) ? Legalized + : UnableToLegalize; + auto Step = LI.getAction(MI, MRI); + switch (Step.Action) { + case Legal: + LLVM_DEBUG(dbgs() << ".. Already legal\n"); + return AlreadyLegal; + case Libcall: + LLVM_DEBUG(dbgs() << ".. Convert to libcall\n"); + return libcall(MI); + case NarrowScalar: + LLVM_DEBUG(dbgs() << ".. Narrow scalar\n"); + return narrowScalar(MI, Step.TypeIdx, Step.NewType); + case WidenScalar: + LLVM_DEBUG(dbgs() << ".. Widen scalar\n"); + return widenScalar(MI, Step.TypeIdx, Step.NewType); + case Lower: + LLVM_DEBUG(dbgs() << ".. Lower\n"); + return lower(MI, Step.TypeIdx, Step.NewType); + case FewerElements: + LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n"); + return fewerElementsVector(MI, Step.TypeIdx, Step.NewType); + case MoreElements: + LLVM_DEBUG(dbgs() << ".. Increase number of elements\n"); + return moreElementsVector(MI, Step.TypeIdx, Step.NewType); + case Custom: + LLVM_DEBUG(dbgs() << ".. Custom legalization\n"); + return LI.legalizeCustom(MI, MRI, MIRBuilder, Observer) ? Legalized + : UnableToLegalize; + default: + LLVM_DEBUG(dbgs() << ".. Unable to legalize\n"); + return UnableToLegalize; + } +} + +void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts, + SmallVectorImpl<Register> &VRegs) { + for (int i = 0; i < NumParts; ++i) + VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); + MIRBuilder.buildUnmerge(VRegs, Reg); +} + +bool LegalizerHelper::extractParts(Register Reg, LLT RegTy, + LLT MainTy, LLT &LeftoverTy, + SmallVectorImpl<Register> &VRegs, + SmallVectorImpl<Register> &LeftoverRegs) { + assert(!LeftoverTy.isValid() && "this is an out argument"); + + unsigned RegSize = RegTy.getSizeInBits(); + unsigned MainSize = MainTy.getSizeInBits(); + unsigned NumParts = RegSize / MainSize; + unsigned LeftoverSize = RegSize - NumParts * MainSize; + + // Use an unmerge when possible. + if (LeftoverSize == 0) { + for (unsigned I = 0; I < NumParts; ++I) + VRegs.push_back(MRI.createGenericVirtualRegister(MainTy)); + MIRBuilder.buildUnmerge(VRegs, Reg); + return true; + } + + if (MainTy.isVector()) { + unsigned EltSize = MainTy.getScalarSizeInBits(); + if (LeftoverSize % EltSize != 0) + return false; + LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); + } else { + LeftoverTy = LLT::scalar(LeftoverSize); + } + + // For irregular sizes, extract the individual parts. + for (unsigned I = 0; I != NumParts; ++I) { + Register NewReg = MRI.createGenericVirtualRegister(MainTy); + VRegs.push_back(NewReg); + MIRBuilder.buildExtract(NewReg, Reg, MainSize * I); + } + + for (unsigned Offset = MainSize * NumParts; Offset < RegSize; + Offset += LeftoverSize) { + Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy); + LeftoverRegs.push_back(NewReg); + MIRBuilder.buildExtract(NewReg, Reg, Offset); + } + + return true; +} + +static LLT getGCDType(LLT OrigTy, LLT TargetTy) { + if (OrigTy.isVector() && TargetTy.isVector()) { + assert(OrigTy.getElementType() == TargetTy.getElementType()); + int GCD = greatestCommonDivisor(OrigTy.getNumElements(), + TargetTy.getNumElements()); + return LLT::scalarOrVector(GCD, OrigTy.getElementType()); + } + + if (OrigTy.isVector() && !TargetTy.isVector()) { + assert(OrigTy.getElementType() == TargetTy); + return TargetTy; + } + + assert(!OrigTy.isVector() && !TargetTy.isVector()); + + int GCD = greatestCommonDivisor(OrigTy.getSizeInBits(), + TargetTy.getSizeInBits()); + return LLT::scalar(GCD); +} + +void LegalizerHelper::insertParts(Register DstReg, + LLT ResultTy, LLT PartTy, + ArrayRef<Register> PartRegs, + LLT LeftoverTy, + ArrayRef<Register> LeftoverRegs) { + if (!LeftoverTy.isValid()) { + assert(LeftoverRegs.empty()); + + if (!ResultTy.isVector()) { + MIRBuilder.buildMerge(DstReg, PartRegs); + return; + } + + if (PartTy.isVector()) + MIRBuilder.buildConcatVectors(DstReg, PartRegs); + else + MIRBuilder.buildBuildVector(DstReg, PartRegs); + return; + } + + unsigned PartSize = PartTy.getSizeInBits(); + unsigned LeftoverPartSize = LeftoverTy.getSizeInBits(); + + Register CurResultReg = MRI.createGenericVirtualRegister(ResultTy); + MIRBuilder.buildUndef(CurResultReg); + + unsigned Offset = 0; + for (Register PartReg : PartRegs) { + Register NewResultReg = MRI.createGenericVirtualRegister(ResultTy); + MIRBuilder.buildInsert(NewResultReg, CurResultReg, PartReg, Offset); + CurResultReg = NewResultReg; + Offset += PartSize; + } + + for (unsigned I = 0, E = LeftoverRegs.size(); I != E; ++I) { + // Use the original output register for the final insert to avoid a copy. + Register NewResultReg = (I + 1 == E) ? + DstReg : MRI.createGenericVirtualRegister(ResultTy); + + MIRBuilder.buildInsert(NewResultReg, CurResultReg, LeftoverRegs[I], Offset); + CurResultReg = NewResultReg; + Offset += LeftoverPartSize; + } +} + +static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { + switch (Opcode) { + case TargetOpcode::G_SDIV: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + switch (Size) { + case 32: + return RTLIB::SDIV_I32; + case 64: + return RTLIB::SDIV_I64; + case 128: + return RTLIB::SDIV_I128; + default: + llvm_unreachable("unexpected size"); + } + case TargetOpcode::G_UDIV: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + switch (Size) { + case 32: + return RTLIB::UDIV_I32; + case 64: + return RTLIB::UDIV_I64; + case 128: + return RTLIB::UDIV_I128; + default: + llvm_unreachable("unexpected size"); + } + case TargetOpcode::G_SREM: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32; + case TargetOpcode::G_UREM: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32; + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + assert(Size == 32 && "Unsupported size"); + return RTLIB::CTLZ_I32; + case TargetOpcode::G_FADD: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32; + case TargetOpcode::G_FSUB: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32; + case TargetOpcode::G_FMUL: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32; + case TargetOpcode::G_FDIV: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32; + case TargetOpcode::G_FEXP: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::EXP_F64 : RTLIB::EXP_F32; + case TargetOpcode::G_FEXP2: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::EXP2_F64 : RTLIB::EXP2_F32; + case TargetOpcode::G_FREM: + return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32; + case TargetOpcode::G_FPOW: + return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32; + case TargetOpcode::G_FMA: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32; + case TargetOpcode::G_FSIN: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::SIN_F128 + : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32; + case TargetOpcode::G_FCOS: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::COS_F128 + : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32; + case TargetOpcode::G_FLOG10: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::LOG10_F128 + : Size == 64 ? RTLIB::LOG10_F64 : RTLIB::LOG10_F32; + case TargetOpcode::G_FLOG: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::LOG_F128 + : Size == 64 ? RTLIB::LOG_F64 : RTLIB::LOG_F32; + case TargetOpcode::G_FLOG2: + assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); + return Size == 128 ? RTLIB::LOG2_F128 + : Size == 64 ? RTLIB::LOG2_F64 : RTLIB::LOG2_F32; + case TargetOpcode::G_FCEIL: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::CEIL_F64 : RTLIB::CEIL_F32; + case TargetOpcode::G_FFLOOR: + assert((Size == 32 || Size == 64) && "Unsupported size"); + return Size == 64 ? RTLIB::FLOOR_F64 : RTLIB::FLOOR_F32; + } + llvm_unreachable("Unknown libcall function"); +} + +/// True if an instruction is in tail position in its caller. Intended for +/// legalizing libcalls as tail calls when possible. +static bool isLibCallInTailPosition(MachineInstr &MI) { + const Function &F = MI.getParent()->getParent()->getFunction(); + + // Conservatively require the attributes of the call to match those of + // the return. Ignore NoAlias and NonNull because they don't affect the + // call sequence. + AttributeList CallerAttrs = F.getAttributes(); + if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) + .removeAttribute(Attribute::NoAlias) + .removeAttribute(Attribute::NonNull) + .hasAttributes()) + return false; + + // It's not safe to eliminate the sign / zero extension of the return value. + if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || + CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) + return false; + + // Only tail call if the following instruction is a standard return. + auto &TII = *MI.getMF()->getSubtarget().getInstrInfo(); + MachineInstr *Next = MI.getNextNode(); + if (!Next || TII.isTailCall(*Next) || !Next->isReturn()) + return false; + + return true; +} + +LegalizerHelper::LegalizeResult +llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall, + const CallLowering::ArgInfo &Result, + ArrayRef<CallLowering::ArgInfo> Args) { + auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); + auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); + const char *Name = TLI.getLibcallName(Libcall); + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(Libcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = Result; + std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); + if (!CLI.lowerCall(MIRBuilder, Info)) + return LegalizerHelper::UnableToLegalize; + + return LegalizerHelper::Legalized; +} + +// Useful for libcalls where all operands have the same type. +static LegalizerHelper::LegalizeResult +simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size, + Type *OpType) { + auto Libcall = getRTLibDesc(MI.getOpcode(), Size); + + SmallVector<CallLowering::ArgInfo, 3> Args; + for (unsigned i = 1; i < MI.getNumOperands(); i++) + Args.push_back({MI.getOperand(i).getReg(), OpType}); + return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType}, + Args); +} + +LegalizerHelper::LegalizeResult +llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, + MachineInstr &MI) { + assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); + + SmallVector<CallLowering::ArgInfo, 3> Args; + // Add all the args, except for the last which is an imm denoting 'tail'. + for (unsigned i = 1; i < MI.getNumOperands() - 1; i++) { + Register Reg = MI.getOperand(i).getReg(); + + // Need derive an IR type for call lowering. + LLT OpLLT = MRI.getType(Reg); + Type *OpTy = nullptr; + if (OpLLT.isPointer()) + OpTy = Type::getInt8PtrTy(Ctx, OpLLT.getAddressSpace()); + else + OpTy = IntegerType::get(Ctx, OpLLT.getSizeInBits()); + Args.push_back({Reg, OpTy}); + } + + auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); + auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); + Intrinsic::ID ID = MI.getOperand(0).getIntrinsicID(); + RTLIB::Libcall RTLibcall; + switch (ID) { + case Intrinsic::memcpy: + RTLibcall = RTLIB::MEMCPY; + break; + case Intrinsic::memset: + RTLibcall = RTLIB::MEMSET; + break; + case Intrinsic::memmove: + RTLibcall = RTLIB::MEMMOVE; + break; + default: + return LegalizerHelper::UnableToLegalize; + } + const char *Name = TLI.getLibcallName(RTLibcall); + + MIRBuilder.setInstr(MI); + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(RTLibcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx)); + Info.IsTailCall = MI.getOperand(MI.getNumOperands() - 1).getImm() == 1 && + isLibCallInTailPosition(MI); + + std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); + if (!CLI.lowerCall(MIRBuilder, Info)) + return LegalizerHelper::UnableToLegalize; + + if (Info.LoweredTailCall) { + assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?"); + // We must have a return following the call to get past + // isLibCallInTailPosition. + assert(MI.getNextNode() && MI.getNextNode()->isReturn() && + "Expected instr following MI to be a return?"); + + // We lowered a tail call, so the call is now the return from the block. + // Delete the old return. + MI.getNextNode()->eraseFromParent(); + } + + return LegalizerHelper::Legalized; +} + +static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType, + Type *FromType) { + auto ToMVT = MVT::getVT(ToType); + auto FromMVT = MVT::getVT(FromType); + + switch (Opcode) { + case TargetOpcode::G_FPEXT: + return RTLIB::getFPEXT(FromMVT, ToMVT); + case TargetOpcode::G_FPTRUNC: + return RTLIB::getFPROUND(FromMVT, ToMVT); + case TargetOpcode::G_FPTOSI: + return RTLIB::getFPTOSINT(FromMVT, ToMVT); + case TargetOpcode::G_FPTOUI: + return RTLIB::getFPTOUINT(FromMVT, ToMVT); + case TargetOpcode::G_SITOFP: + return RTLIB::getSINTTOFP(FromMVT, ToMVT); + case TargetOpcode::G_UITOFP: + return RTLIB::getUINTTOFP(FromMVT, ToMVT); + } + llvm_unreachable("Unsupported libcall function"); +} + +static LegalizerHelper::LegalizeResult +conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType, + Type *FromType) { + RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType); + return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType}, + {{MI.getOperand(1).getReg(), FromType}}); +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::libcall(MachineInstr &MI) { + LLT LLTy = MRI.getType(MI.getOperand(0).getReg()); + unsigned Size = LLTy.getSizeInBits(); + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); + + MIRBuilder.setInstr(MI); + + switch (MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SREM: + case TargetOpcode::G_UREM: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: { + Type *HLTy = IntegerType::get(Ctx, Size); + auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FADD: + case TargetOpcode::G_FSUB: + case TargetOpcode::G_FMUL: + case TargetOpcode::G_FDIV: + case TargetOpcode::G_FMA: + case TargetOpcode::G_FPOW: + case TargetOpcode::G_FREM: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: + case TargetOpcode::G_FLOG10: + case TargetOpcode::G_FLOG: + case TargetOpcode::G_FLOG2: + case TargetOpcode::G_FEXP: + case TargetOpcode::G_FEXP2: + case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FFLOOR: { + if (Size > 64) { + LLVM_DEBUG(dbgs() << "Size " << Size << " too large to legalize.\n"); + return UnableToLegalize; + } + Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx); + auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FPEXT: { + // FIXME: Support other floating point types (half, fp128 etc) + unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + if (ToSize != 64 || FromSize != 32) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall( + MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx)); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FPTRUNC: { + // FIXME: Support other floating point types (half, fp128 etc) + unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + if (ToSize != 32 || FromSize != 64) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall( + MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx)); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FPTOSI: + case TargetOpcode::G_FPTOUI: { + // FIXME: Support other types + unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + if ((ToSize != 32 && ToSize != 64) || (FromSize != 32 && FromSize != 64)) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall( + MI, MIRBuilder, + ToSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx), + FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx)); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_SITOFP: + case TargetOpcode::G_UITOFP: { + // FIXME: Support other types + unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + if ((FromSize != 32 && FromSize != 64) || (ToSize != 32 && ToSize != 64)) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall( + MI, MIRBuilder, + ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx), + FromSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx)); + if (Status != Legalized) + return Status; + break; + } + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, + unsigned TypeIdx, + LLT NarrowTy) { + MIRBuilder.setInstr(MI); + + uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + uint64_t NarrowSize = NarrowTy.getSizeInBits(); + + switch (MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_IMPLICIT_DEF: { + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + int NumParts = SizeOp0 / NarrowSize; + + SmallVector<Register, 2> DstRegs; + for (int i = 0; i < NumParts; ++i) + DstRegs.push_back( + MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg()); + + Register DstReg = MI.getOperand(0).getReg(); + if(MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_CONSTANT: { + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + const APInt &Val = MI.getOperand(1).getCImm()->getValue(); + unsigned TotalSize = Ty.getSizeInBits(); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + int NumParts = TotalSize / NarrowSize; + + SmallVector<Register, 4> PartRegs; + for (int I = 0; I != NumParts; ++I) { + unsigned Offset = I * NarrowSize; + auto K = MIRBuilder.buildConstant(NarrowTy, + Val.lshr(Offset).trunc(NarrowSize)); + PartRegs.push_back(K.getReg(0)); + } + + LLT LeftoverTy; + unsigned LeftoverBits = TotalSize - NumParts * NarrowSize; + SmallVector<Register, 1> LeftoverRegs; + if (LeftoverBits != 0) { + LeftoverTy = LLT::scalar(LeftoverBits); + auto K = MIRBuilder.buildConstant( + LeftoverTy, + Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits)); + LeftoverRegs.push_back(K.getReg(0)); + } + + insertParts(MI.getOperand(0).getReg(), + Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs); + + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SEXT: { + if (TypeIdx != 0) + return UnableToLegalize; + + Register SrcReg = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + // FIXME: support the general case where the requested NarrowTy may not be + // the same as the source type. E.g. s128 = sext(s32) + if ((SrcTy.getSizeInBits() != SizeOp0 / 2) || + SrcTy.getSizeInBits() != NarrowTy.getSizeInBits()) { + LLVM_DEBUG(dbgs() << "Can't narrow sext to type " << NarrowTy << "\n"); + return UnableToLegalize; + } + + // Shift the sign bit of the low register through the high register. + auto ShiftAmt = + MIRBuilder.buildConstant(LLT::scalar(64), NarrowTy.getSizeInBits() - 1); + auto Shift = MIRBuilder.buildAShr(NarrowTy, SrcReg, ShiftAmt); + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {SrcReg, Shift.getReg(0)}); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_ZEXT: { + if (TypeIdx != 0) + return UnableToLegalize; + + LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); + uint64_t SizeOp1 = SrcTy.getSizeInBits(); + if (SizeOp0 % SizeOp1 != 0) + return UnableToLegalize; + + // Generate a merge where the bottom bits are taken from the source, and + // zero everything else. + Register ZeroReg = MIRBuilder.buildConstant(SrcTy, 0).getReg(0); + unsigned NumParts = SizeOp0 / SizeOp1; + SmallVector<Register, 4> Srcs = {MI.getOperand(1).getReg()}; + for (unsigned Part = 1; Part < NumParts; ++Part) + Srcs.push_back(ZeroReg); + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), Srcs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_TRUNC: { + if (TypeIdx != 1) + return UnableToLegalize; + + uint64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + if (NarrowTy.getSizeInBits() * 2 != SizeOp1) { + LLVM_DEBUG(dbgs() << "Can't narrow trunc to type " << NarrowTy << "\n"); + return UnableToLegalize; + } + + auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1).getReg()); + MIRBuilder.buildCopy(MI.getOperand(0).getReg(), Unmerge.getReg(0)); + MI.eraseFromParent(); + return Legalized; + } + + case TargetOpcode::G_ADD: { + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + // Expand in terms of carry-setting/consuming G_ADDE instructions. + int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); + + SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); + extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); + + Register CarryIn; + for (int i = 0; i < NumParts; ++i) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + + if (i == 0) + MIRBuilder.buildUAddo(DstReg, CarryOut, Src1Regs[i], Src2Regs[i]); + else { + MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i], + Src2Regs[i], CarryIn); + } + + DstRegs.push_back(DstReg); + CarryIn = CarryOut; + } + Register DstReg = MI.getOperand(0).getReg(); + if(MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SUB: { + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); + + SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); + extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); + + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut}, + {Src1Regs[0], Src2Regs[0]}); + DstRegs.push_back(DstReg); + Register BorrowIn = BorrowOut; + for (int i = 1; i < NumParts; ++i) { + DstReg = MRI.createGenericVirtualRegister(NarrowTy); + BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + + MIRBuilder.buildInstr(TargetOpcode::G_USUBE, {DstReg, BorrowOut}, + {Src1Regs[i], Src2Regs[i], BorrowIn}); + + DstRegs.push_back(DstReg); + BorrowIn = BorrowOut; + } + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_MUL: + case TargetOpcode::G_UMULH: + return narrowScalarMul(MI, NarrowTy); + case TargetOpcode::G_EXTRACT: + return narrowScalarExtract(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_INSERT: + return narrowScalarInsert(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_LOAD: { + const auto &MMO = **MI.memoperands_begin(); + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + if (8 * MMO.getSize() != DstTy.getSizeInBits()) { + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + auto &MMO = **MI.memoperands_begin(); + MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO); + MIRBuilder.buildAnyExt(DstReg, TmpReg); + MI.eraseFromParent(); + return Legalized; + } + + return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); + } + case TargetOpcode::G_ZEXTLOAD: + case TargetOpcode::G_SEXTLOAD: { + bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD; + Register DstReg = MI.getOperand(0).getReg(); + Register PtrReg = MI.getOperand(1).getReg(); + + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + auto &MMO = **MI.memoperands_begin(); + if (MMO.getSizeInBits() == NarrowSize) { + MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); + } else { + unsigned ExtLoad = ZExt ? TargetOpcode::G_ZEXTLOAD + : TargetOpcode::G_SEXTLOAD; + MIRBuilder.buildInstr(ExtLoad) + .addDef(TmpReg) + .addUse(PtrReg) + .addMemOperand(&MMO); + } + + if (ZExt) + MIRBuilder.buildZExt(DstReg, TmpReg); + else + MIRBuilder.buildSExt(DstReg, TmpReg); + + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_STORE: { + const auto &MMO = **MI.memoperands_begin(); + + Register SrcReg = MI.getOperand(0).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + if (SrcTy.isVector()) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowSize; + unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); + unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize; + if (SrcTy.isVector() && LeftoverBits != 0) + return UnableToLegalize; + + if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + auto &MMO = **MI.memoperands_begin(); + MIRBuilder.buildTrunc(TmpReg, SrcReg); + MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO); + MI.eraseFromParent(); + return Legalized; + } + + return reduceLoadStoreWidth(MI, 0, NarrowTy); + } + case TargetOpcode::G_SELECT: + return narrowScalarSelect(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_AND: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: { + // Legalize bitwise operation: + // A = BinOp<Ty> B, C + // into: + // B1, ..., BN = G_UNMERGE_VALUES B + // C1, ..., CN = G_UNMERGE_VALUES C + // A1 = BinOp<Ty/N> B1, C2 + // ... + // AN = BinOp<Ty/N> BN, CN + // A = G_MERGE_VALUES A1, ..., AN + return narrowScalarBasic(MI, TypeIdx, NarrowTy); + } + case TargetOpcode::G_SHL: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: + return narrowScalarShift(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + case TargetOpcode::G_CTPOP: + if (TypeIdx != 0) + return UnableToLegalize; // TODO + + Observer.changingInstr(MI); + narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_INTTOPTR: + if (TypeIdx != 1) + return UnableToLegalize; + + Observer.changingInstr(MI); + narrowScalarSrc(MI, NarrowTy, 1); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PTRTOINT: + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PHI: { + unsigned NumParts = SizeOp0 / NarrowSize; + SmallVector<Register, 2> DstRegs; + SmallVector<SmallVector<Register, 2>, 2> SrcRegs; + DstRegs.resize(NumParts); + SrcRegs.resize(MI.getNumOperands() / 2); + Observer.changingInstr(MI); + for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { + MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts, + SrcRegs[i / 2]); + } + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, MI); + for (unsigned i = 0; i < NumParts; ++i) { + DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy); + MachineInstrBuilder MIB = + MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]); + for (unsigned j = 1; j < MI.getNumOperands(); j += 2) + MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1)); + } + MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI()); + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs); + Observer.changedInstr(MI); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: + case TargetOpcode::G_INSERT_VECTOR_ELT: { + if (TypeIdx != 2) + return UnableToLegalize; + + int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3; + Observer.changingInstr(MI); + narrowScalarSrc(MI, NarrowTy, OpIdx); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_ICMP: { + uint64_t SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); + if (NarrowSize * 2 != SrcSize) + return UnableToLegalize; + + Observer.changingInstr(MI); + Register LHSL = MRI.createGenericVirtualRegister(NarrowTy); + Register LHSH = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2).getReg()); + + Register RHSL = MRI.createGenericVirtualRegister(NarrowTy); + Register RHSH = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3).getReg()); + + CmpInst::Predicate Pred = + static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); + LLT ResTy = MRI.getType(MI.getOperand(0).getReg()); + + if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { + MachineInstrBuilder XorL = MIRBuilder.buildXor(NarrowTy, LHSL, RHSL); + MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH); + MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH); + MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0); + MIRBuilder.buildICmp(Pred, MI.getOperand(0).getReg(), Or, Zero); + } else { + MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH); + MachineInstrBuilder CmpHEQ = + MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH); + MachineInstrBuilder CmpLU = MIRBuilder.buildICmp( + ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL); + MIRBuilder.buildSelect(MI.getOperand(0).getReg(), CmpHEQ, CmpLU, CmpH); + } + Observer.changedInstr(MI); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SEXT_INREG: { + if (TypeIdx != 0) + return UnableToLegalize; + + if (!MI.getOperand(2).isImm()) + return UnableToLegalize; + int64_t SizeInBits = MI.getOperand(2).getImm(); + + // So long as the new type has more bits than the bits we're extending we + // don't need to break it apart. + if (NarrowTy.getScalarSizeInBits() >= SizeInBits) { + Observer.changingInstr(MI); + // We don't lose any non-extension bits by truncating the src and + // sign-extending the dst. + MachineOperand &MO1 = MI.getOperand(1); + auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1.getReg()); + MO1.setReg(TruncMIB->getOperand(0).getReg()); + + MachineOperand &MO2 = MI.getOperand(0); + Register DstExt = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildInstr(TargetOpcode::G_SEXT, {MO2.getReg()}, {DstExt}); + MO2.setReg(DstExt); + Observer.changedInstr(MI); + return Legalized; + } + + // Break it apart. Components below the extension point are unmodified. The + // component containing the extension point becomes a narrower SEXT_INREG. + // Components above it are ashr'd from the component containing the + // extension point. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + int NumParts = SizeOp0 / NarrowSize; + + // List the registers where the destination will be scattered. + SmallVector<Register, 2> DstRegs; + // List the registers where the source will be split. + SmallVector<Register, 2> SrcRegs; + + // Create all the temporary registers. + for (int i = 0; i < NumParts; ++i) { + Register SrcReg = MRI.createGenericVirtualRegister(NarrowTy); + + SrcRegs.push_back(SrcReg); + } + + // Explode the big arguments into smaller chunks. + MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1).getReg()); + + Register AshrCstReg = + MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1) + ->getOperand(0) + .getReg(); + Register FullExtensionReg = 0; + Register PartialExtensionReg = 0; + + // Do the operation on each small part. + for (int i = 0; i < NumParts; ++i) { + if ((i + 1) * NarrowTy.getScalarSizeInBits() < SizeInBits) + DstRegs.push_back(SrcRegs[i]); + else if (i * NarrowTy.getScalarSizeInBits() > SizeInBits) { + assert(PartialExtensionReg && + "Expected to visit partial extension before full"); + if (FullExtensionReg) { + DstRegs.push_back(FullExtensionReg); + continue; + } + DstRegs.push_back(MIRBuilder + .buildInstr(TargetOpcode::G_ASHR, {NarrowTy}, + {PartialExtensionReg, AshrCstReg}) + ->getOperand(0) + .getReg()); + FullExtensionReg = DstRegs.back(); + } else { + DstRegs.push_back( + MIRBuilder + .buildInstr( + TargetOpcode::G_SEXT_INREG, {NarrowTy}, + {SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()}) + ->getOperand(0) + .getReg()); + PartialExtensionReg = DstRegs.back(); + } + } + + // Gather the destination registers into the final destination. + Register DstReg = MI.getOperand(0).getReg(); + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; + } + } +} + +void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy, + unsigned OpIdx, unsigned ExtOpcode) { + MachineOperand &MO = MI.getOperand(OpIdx); + auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO.getReg()}); + MO.setReg(ExtB->getOperand(0).getReg()); +} + +void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, + unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy}, + {MO.getReg()}); + MO.setReg(ExtB->getOperand(0).getReg()); +} + +void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy, + unsigned OpIdx, unsigned TruncOpcode) { + MachineOperand &MO = MI.getOperand(OpIdx); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt}); + MO.setReg(DstExt); +} + +void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy, + unsigned OpIdx, unsigned ExtOpcode) { + MachineOperand &MO = MI.getOperand(OpIdx); + Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc}); + MO.setReg(DstTrunc); +} + +void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy, + unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildExtract(MO.getReg(), DstExt, 0); + MO.setReg(DstExt); +} + +void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, + unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + + LLT OldTy = MRI.getType(MO.getReg()); + unsigned OldElts = OldTy.getNumElements(); + unsigned NewElts = MoreTy.getNumElements(); + + unsigned NumParts = NewElts / OldElts; + + // Use concat_vectors if the result is a multiple of the number of elements. + if (NumParts * OldElts == NewElts) { + SmallVector<Register, 8> Parts; + Parts.push_back(MO.getReg()); + + Register ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0); + for (unsigned I = 1; I != NumParts; ++I) + Parts.push_back(ImpDef); + + auto Concat = MIRBuilder.buildConcatVectors(MoreTy, Parts); + MO.setReg(Concat.getReg(0)); + return; + } + + Register MoreReg = MRI.createGenericVirtualRegister(MoreTy); + Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0); + MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0); + MO.setReg(MoreReg); +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + Register Src1 = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(Src1); + const int DstSize = DstTy.getSizeInBits(); + const int SrcSize = SrcTy.getSizeInBits(); + const int WideSize = WideTy.getSizeInBits(); + const int NumMerge = (DstSize + WideSize - 1) / WideSize; + + unsigned NumOps = MI.getNumOperands(); + unsigned NumSrc = MI.getNumOperands() - 1; + unsigned PartSize = DstTy.getSizeInBits() / NumSrc; + + if (WideSize >= DstSize) { + // Directly pack the bits in the target type. + Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0); + + for (unsigned I = 2; I != NumOps; ++I) { + const unsigned Offset = (I - 1) * PartSize; + + Register SrcReg = MI.getOperand(I).getReg(); + assert(MRI.getType(SrcReg) == LLT::scalar(PartSize)); + + auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg); + + Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg : + MRI.createGenericVirtualRegister(WideTy); + + auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset); + auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt); + MIRBuilder.buildOr(NextResult, ResultReg, Shl); + ResultReg = NextResult; + } + + if (WideSize > DstSize) + MIRBuilder.buildTrunc(DstReg, ResultReg); + else if (DstTy.isPointer()) + MIRBuilder.buildIntToPtr(DstReg, ResultReg); + + MI.eraseFromParent(); + return Legalized; + } + + // Unmerge the original values to the GCD type, and recombine to the next + // multiple greater than the original type. + // + // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6 + // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0 + // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1 + // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2 + // %10:_(s6) = G_MERGE_VALUES %4, %5, %6 + // %11:_(s6) = G_MERGE_VALUES %7, %8, %9 + // %12:_(s12) = G_MERGE_VALUES %10, %11 + // + // Padding with undef if necessary: + // + // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6 + // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0 + // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1 + // %7:_(s2) = G_IMPLICIT_DEF + // %8:_(s6) = G_MERGE_VALUES %3, %4, %5 + // %9:_(s6) = G_MERGE_VALUES %6, %7, %7 + // %10:_(s12) = G_MERGE_VALUES %8, %9 + + const int GCD = greatestCommonDivisor(SrcSize, WideSize); + LLT GCDTy = LLT::scalar(GCD); + + SmallVector<Register, 8> Parts; + SmallVector<Register, 8> NewMergeRegs; + SmallVector<Register, 8> Unmerges; + LLT WideDstTy = LLT::scalar(NumMerge * WideSize); + + // Decompose the original operands if they don't evenly divide. + for (int I = 1, E = MI.getNumOperands(); I != E; ++I) { + Register SrcReg = MI.getOperand(I).getReg(); + if (GCD == SrcSize) { + Unmerges.push_back(SrcReg); + } else { + auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); + for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J) + Unmerges.push_back(Unmerge.getReg(J)); + } + } + + // Pad with undef to the next size that is a multiple of the requested size. + if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) { + Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0); + for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I) + Unmerges.push_back(UndefReg); + } + + const int PartsPerGCD = WideSize / GCD; + + // Build merges of each piece. + ArrayRef<Register> Slicer(Unmerges); + for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) { + auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD)); + NewMergeRegs.push_back(Merge.getReg(0)); + } + + // A truncate may be necessary if the requested type doesn't evenly divide the + // original result type. + if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) { + MIRBuilder.buildMerge(DstReg, NewMergeRegs); + } else { + auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs); + MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0)); + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + unsigned NumDst = MI.getNumOperands() - 1; + Register SrcReg = MI.getOperand(NumDst).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + if (!SrcTy.isScalar()) + return UnableToLegalize; + + Register Dst0Reg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(Dst0Reg); + if (!DstTy.isScalar()) + return UnableToLegalize; + + unsigned NewSrcSize = NumDst * WideTy.getSizeInBits(); + LLT NewSrcTy = LLT::scalar(NewSrcSize); + unsigned SizeDiff = WideTy.getSizeInBits() - DstTy.getSizeInBits(); + + auto WideSrc = MIRBuilder.buildZExt(NewSrcTy, SrcReg); + + for (unsigned I = 1; I != NumDst; ++I) { + auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, SizeDiff * I); + auto Shl = MIRBuilder.buildShl(NewSrcTy, WideSrc, ShiftAmt); + WideSrc = MIRBuilder.buildOr(NewSrcTy, WideSrc, Shl); + } + + Observer.changingInstr(MI); + + MI.getOperand(NumDst).setReg(WideSrc->getOperand(0).getReg()); + for (unsigned I = 0; I != NumDst; ++I) + widenScalarDst(MI, WideTy, I); + + Observer.changedInstr(MI); + + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + LLT DstTy = MRI.getType(DstReg); + unsigned Offset = MI.getOperand(2).getImm(); + + if (TypeIdx == 0) { + if (SrcTy.isVector() || DstTy.isVector()) + return UnableToLegalize; + + SrcOp Src(SrcReg); + if (SrcTy.isPointer()) { + // Extracts from pointers can be handled only if they are really just + // simple integers. + const DataLayout &DL = MIRBuilder.getDataLayout(); + if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) + return UnableToLegalize; + + LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits()); + Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src); + SrcTy = SrcAsIntTy; + } + + if (DstTy.isPointer()) + return UnableToLegalize; + + if (Offset == 0) { + // Avoid a shift in the degenerate case. + MIRBuilder.buildTrunc(DstReg, + MIRBuilder.buildAnyExtOrTrunc(WideTy, Src)); + MI.eraseFromParent(); + return Legalized; + } + + // Do a shift in the source type. + LLT ShiftTy = SrcTy; + if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) { + Src = MIRBuilder.buildAnyExt(WideTy, Src); + ShiftTy = WideTy; + } else if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) + return UnableToLegalize; + + auto LShr = MIRBuilder.buildLShr( + ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset)); + MIRBuilder.buildTrunc(DstReg, LShr); + MI.eraseFromParent(); + return Legalized; + } + + if (SrcTy.isScalar()) { + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + Observer.changedInstr(MI); + return Legalized; + } + + if (!SrcTy.isVector()) + return UnableToLegalize; + + if (DstTy != SrcTy.getElementType()) + return UnableToLegalize; + + if (Offset % SrcTy.getScalarSizeInBits() != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + + MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) * + Offset); + widenScalarDst(MI, WideTy.getScalarType(), 0); + Observer.changedInstr(MI); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { + MIRBuilder.setInstr(MI); + + switch (MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_EXTRACT: + return widenScalarExtract(MI, TypeIdx, WideTy); + case TargetOpcode::G_INSERT: + return widenScalarInsert(MI, TypeIdx, WideTy); + case TargetOpcode::G_MERGE_VALUES: + return widenScalarMergeValues(MI, TypeIdx, WideTy); + case TargetOpcode::G_UNMERGE_VALUES: + return widenScalarUnmergeValues(MI, TypeIdx, WideTy); + case TargetOpcode::G_UADDO: + case TargetOpcode::G_USUBO: { + if (TypeIdx == 1) + return UnableToLegalize; // TODO + auto LHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy}, + {MI.getOperand(2).getReg()}); + auto RHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy}, + {MI.getOperand(3).getReg()}); + unsigned Opcode = MI.getOpcode() == TargetOpcode::G_UADDO + ? TargetOpcode::G_ADD + : TargetOpcode::G_SUB; + // Do the arithmetic in the larger type. + auto NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSZext, RHSZext}); + LLT OrigTy = MRI.getType(MI.getOperand(0).getReg()); + APInt Mask = APInt::getAllOnesValue(OrigTy.getSizeInBits()); + auto AndOp = MIRBuilder.buildInstr( + TargetOpcode::G_AND, {WideTy}, + {NewOp, MIRBuilder.buildConstant(WideTy, Mask.getZExtValue())}); + // There is no overflow if the AndOp is the same as NewOp. + MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1).getReg(), NewOp, + AndOp); + // Now trunc the NewOp to the original result. + MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), NewOp); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTPOP: { + if (TypeIdx == 0) { + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + + Register SrcReg = MI.getOperand(1).getReg(); + + // First ZEXT the input. + auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg); + LLT CurTy = MRI.getType(SrcReg); + if (MI.getOpcode() == TargetOpcode::G_CTTZ) { + // The count is the same in the larger type except if the original + // value was zero. This can be handled by setting the bit just off + // the top of the original type. + auto TopBit = + APInt::getOneBitSet(WideTy.getSizeInBits(), CurTy.getSizeInBits()); + MIBSrc = MIRBuilder.buildOr( + WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit)); + } + + // Perform the operation at the larger size. + auto MIBNewOp = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, {MIBSrc}); + // This is already the correct result for CTPOP and CTTZs + if (MI.getOpcode() == TargetOpcode::G_CTLZ || + MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) { + // The correct result is NewOp - (Difference in widety and current ty). + unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits(); + MIBNewOp = MIRBuilder.buildInstr( + TargetOpcode::G_SUB, {WideTy}, + {MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)}); + } + + MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_BSWAP: { + Observer.changingInstr(MI); + Register DstReg = MI.getOperand(0).getReg(); + + Register ShrReg = MRI.createGenericVirtualRegister(WideTy); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + + MI.getOperand(0).setReg(DstExt); + + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + + LLT Ty = MRI.getType(DstReg); + unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); + MIRBuilder.buildConstant(ShiftAmtReg, DiffBits); + MIRBuilder.buildInstr(TargetOpcode::G_LSHR) + .addDef(ShrReg) + .addUse(DstExt) + .addUse(ShiftAmtReg); + + MIRBuilder.buildTrunc(DstReg, ShrReg); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_BITREVERSE: { + Observer.changingInstr(MI); + + Register DstReg = MI.getOperand(0).getReg(); + LLT Ty = MRI.getType(DstReg); + unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); + + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + MI.getOperand(0).setReg(DstExt); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + + auto ShiftAmt = MIRBuilder.buildConstant(WideTy, DiffBits); + auto Shift = MIRBuilder.buildLShr(WideTy, DstExt, ShiftAmt); + MIRBuilder.buildTrunc(DstReg, Shift); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: + case TargetOpcode::G_SUB: + // Perform operation at larger width (any extension is fines here, high bits + // don't affect the result) and then truncate the result back to the + // original type. + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_SHL: + Observer.changingInstr(MI); + + if (TypeIdx == 0) { + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + } else { + assert(TypeIdx == 1); + // The "number of bits to shift" operand must preserve its value as an + // unsigned integer: + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + } + + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_SDIV: + case TargetOpcode::G_SREM: + case TargetOpcode::G_SMIN: + case TargetOpcode::G_SMAX: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + Observer.changingInstr(MI); + + if (TypeIdx == 0) { + unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ? + TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; + + widenScalarSrc(MI, WideTy, 1, CvtOp); + widenScalarDst(MI, WideTy); + } else { + assert(TypeIdx == 1); + // The "number of bits to shift" operand must preserve its value as an + // unsigned integer: + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + } + + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_UDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_UMIN: + case TargetOpcode::G_UMAX: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_SELECT: + Observer.changingInstr(MI); + if (TypeIdx == 0) { + // Perform operation at larger width (any extension is fine here, high + // bits don't affect the result) and then truncate the result back to the + // original type. + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); + widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + } else { + bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector(); + // Explicit extension is required here since high bits affect the result. + widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false)); + } + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_FPTOSI: + case TargetOpcode::G_FPTOUI: + Observer.changingInstr(MI); + + if (TypeIdx == 0) + widenScalarDst(MI, WideTy); + else + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT); + + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_SITOFP: + if (TypeIdx != 1) + return UnableToLegalize; + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_UITOFP: + if (TypeIdx != 1) + return UnableToLegalize; + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_LOAD: + case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_ZEXTLOAD: + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_STORE: { + if (TypeIdx != 0) + return UnableToLegalize; + + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + if (!isPowerOf2_32(Ty.getSizeInBits())) + return UnableToLegalize; + + Observer.changingInstr(MI); + + unsigned ExtType = Ty.getScalarSizeInBits() == 1 ? + TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT; + widenScalarSrc(MI, WideTy, 0, ExtType); + + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_CONSTANT: { + MachineOperand &SrcMO = MI.getOperand(1); + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); + const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits()); + Observer.changingInstr(MI); + SrcMO.setCImm(ConstantInt::get(Ctx, Val)); + + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_FCONSTANT: { + MachineOperand &SrcMO = MI.getOperand(1); + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); + APFloat Val = SrcMO.getFPImm()->getValueAPF(); + bool LosesInfo; + switch (WideTy.getSizeInBits()) { + case 32: + Val.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, + &LosesInfo); + break; + case 64: + Val.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, + &LosesInfo); + break; + default: + return UnableToLegalize; + } + + assert(!LosesInfo && "extend should always be lossless"); + + Observer.changingInstr(MI); + SrcMO.setFPImm(ConstantFP::get(Ctx, Val)); + + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_IMPLICIT_DEF: { + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_BRCOND: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false)); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_FCMP: + Observer.changingInstr(MI); + if (TypeIdx == 0) + widenScalarDst(MI, WideTy); + else { + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT); + widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT); + } + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_ICMP: + Observer.changingInstr(MI); + if (TypeIdx == 0) + widenScalarDst(MI, WideTy); + else { + unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>( + MI.getOperand(1).getPredicate())) + ? TargetOpcode::G_SEXT + : TargetOpcode::G_ZEXT; + widenScalarSrc(MI, WideTy, 2, ExtOpcode); + widenScalarSrc(MI, WideTy, 3, ExtOpcode); + } + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_GEP: + assert(TypeIdx == 1 && "unable to legalize pointer of GEP"); + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_PHI: { + assert(TypeIdx == 0 && "Expecting only Idx 0"); + + Observer.changingInstr(MI); + for (unsigned I = 1; I < MI.getNumOperands(); I += 2) { + MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT); + } + + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: { + if (TypeIdx == 0) { + Register VecReg = MI.getOperand(1).getReg(); + LLT VecTy = MRI.getType(VecReg); + Observer.changingInstr(MI); + + widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(), + WideTy.getSizeInBits()), + 1, TargetOpcode::G_SEXT); + + widenScalarDst(MI, WideTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + + if (TypeIdx != 2) + return UnableToLegalize; + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_FADD: + case TargetOpcode::G_FMUL: + case TargetOpcode::G_FSUB: + case TargetOpcode::G_FMA: + case TargetOpcode::G_FMAD: + case TargetOpcode::G_FNEG: + case TargetOpcode::G_FABS: + case TargetOpcode::G_FCANONICALIZE: + case TargetOpcode::G_FMINNUM: + case TargetOpcode::G_FMAXNUM: + case TargetOpcode::G_FMINNUM_IEEE: + case TargetOpcode::G_FMAXNUM_IEEE: + case TargetOpcode::G_FMINIMUM: + case TargetOpcode::G_FMAXIMUM: + case TargetOpcode::G_FDIV: + case TargetOpcode::G_FREM: + case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FFLOOR: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: + case TargetOpcode::G_FLOG10: + case TargetOpcode::G_FLOG: + case TargetOpcode::G_FLOG2: + case TargetOpcode::G_FRINT: + case TargetOpcode::G_FNEARBYINT: + case TargetOpcode::G_FSQRT: + case TargetOpcode::G_FEXP: + case TargetOpcode::G_FEXP2: + case TargetOpcode::G_FPOW: + case TargetOpcode::G_INTRINSIC_TRUNC: + case TargetOpcode::G_INTRINSIC_ROUND: + assert(TypeIdx == 0); + Observer.changingInstr(MI); + + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) + widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT); + + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_INTTOPTR: + if (TypeIdx != 1) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PTRTOINT: + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_BUILD_VECTOR: { + Observer.changingInstr(MI); + + const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType(); + for (int I = 1, E = MI.getNumOperands(); I != E; ++I) + widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT); + + // Avoid changing the result vector type if the source element type was + // requested. + if (TypeIdx == 1) { + auto &TII = *MI.getMF()->getSubtarget().getInstrInfo(); + MI.setDesc(TII.get(TargetOpcode::G_BUILD_VECTOR_TRUNC)); + } else { + widenScalarDst(MI, WideTy, 0); + } + + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_SEXT_INREG: + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC); + Observer.changedInstr(MI); + return Legalized; + } +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + using namespace TargetOpcode; + MIRBuilder.setInstr(MI); + + switch(MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_SREM: + case TargetOpcode::G_UREM: { + Register QuotReg = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV) + .addDef(QuotReg) + .addUse(MI.getOperand(1).getReg()) + .addUse(MI.getOperand(2).getReg()); + + Register ProdReg = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg()); + MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), + ProdReg); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SADDO: + case TargetOpcode::G_SSUBO: + return lowerSADDO_SSUBO(MI); + case TargetOpcode::G_SMULO: + case TargetOpcode::G_UMULO: { + // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the + // result. + Register Res = MI.getOperand(0).getReg(); + Register Overflow = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + + MIRBuilder.buildMul(Res, LHS, RHS); + + unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO + ? TargetOpcode::G_SMULH + : TargetOpcode::G_UMULH; + + Register HiPart = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildInstr(Opcode) + .addDef(HiPart) + .addUse(LHS) + .addUse(RHS); + + Register Zero = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildConstant(Zero, 0); + + // For *signed* multiply, overflow is detected by checking: + // (hi != (lo >> bitwidth-1)) + if (Opcode == TargetOpcode::G_SMULH) { + Register Shifted = MRI.createGenericVirtualRegister(Ty); + Register ShiftAmt = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1); + MIRBuilder.buildInstr(TargetOpcode::G_ASHR) + .addDef(Shifted) + .addUse(Res) + .addUse(ShiftAmt); + MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted); + } else { + MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero); + } + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_FNEG: { + // TODO: Handle vector types once we are able to + // represent them. + if (Ty.isVector()) + return UnableToLegalize; + Register Res = MI.getOperand(0).getReg(); + Type *ZeroTy; + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); + switch (Ty.getSizeInBits()) { + case 16: + ZeroTy = Type::getHalfTy(Ctx); + break; + case 32: + ZeroTy = Type::getFloatTy(Ctx); + break; + case 64: + ZeroTy = Type::getDoubleTy(Ctx); + break; + case 128: + ZeroTy = Type::getFP128Ty(Ctx); + break; + default: + llvm_unreachable("unexpected floating-point type"); + } + ConstantFP &ZeroForNegation = + *cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy)); + auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation); + Register SubByReg = MI.getOperand(1).getReg(); + Register ZeroReg = Zero->getOperand(0).getReg(); + MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg}, + MI.getFlags()); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_FSUB: { + // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)). + // First, check if G_FNEG is marked as Lower. If so, we may + // end up with an infinite loop as G_FSUB is used to legalize G_FNEG. + if (LI.getAction({G_FNEG, {Ty}}).Action == Lower) + return UnableToLegalize; + Register Res = MI.getOperand(0).getReg(); + Register LHS = MI.getOperand(1).getReg(); + Register RHS = MI.getOperand(2).getReg(); + Register Neg = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS); + MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags()); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_FMAD: + return lowerFMad(MI); + case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: { + Register OldValRes = MI.getOperand(0).getReg(); + Register SuccessRes = MI.getOperand(1).getReg(); + Register Addr = MI.getOperand(2).getReg(); + Register CmpVal = MI.getOperand(3).getReg(); + Register NewVal = MI.getOperand(4).getReg(); + MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal, + **MI.memoperands_begin()); + MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_LOAD: + case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_ZEXTLOAD: { + // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT + Register DstReg = MI.getOperand(0).getReg(); + Register PtrReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + auto &MMO = **MI.memoperands_begin(); + + if (DstTy.getSizeInBits() == MMO.getSizeInBits()) { + if (MI.getOpcode() == TargetOpcode::G_LOAD) { + // This load needs splitting into power of 2 sized loads. + if (DstTy.isVector()) + return UnableToLegalize; + if (isPowerOf2_32(DstTy.getSizeInBits())) + return UnableToLegalize; // Don't know what we're being asked to do. + + // Our strategy here is to generate anyextending loads for the smaller + // types up to next power-2 result type, and then combine the two larger + // result values together, before truncating back down to the non-pow-2 + // type. + // E.g. v1 = i24 load => + // v2 = i32 load (2 byte) + // v3 = i32 load (1 byte) + // v4 = i32 shl v3, 16 + // v5 = i32 or v4, v2 + // v1 = i24 trunc v5 + // By doing this we generate the correct truncate which should get + // combined away as an artifact with a matching extend. + uint64_t LargeSplitSize = PowerOf2Floor(DstTy.getSizeInBits()); + uint64_t SmallSplitSize = DstTy.getSizeInBits() - LargeSplitSize; + + MachineFunction &MF = MIRBuilder.getMF(); + MachineMemOperand *LargeMMO = + MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8); + MachineMemOperand *SmallMMO = MF.getMachineMemOperand( + &MMO, LargeSplitSize / 8, SmallSplitSize / 8); + + LLT PtrTy = MRI.getType(PtrReg); + unsigned AnyExtSize = NextPowerOf2(DstTy.getSizeInBits()); + LLT AnyExtTy = LLT::scalar(AnyExtSize); + Register LargeLdReg = MRI.createGenericVirtualRegister(AnyExtTy); + Register SmallLdReg = MRI.createGenericVirtualRegister(AnyExtTy); + auto LargeLoad = + MIRBuilder.buildLoad(LargeLdReg, PtrReg, *LargeMMO); + + auto OffsetCst = + MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8); + Register GEPReg = MRI.createGenericVirtualRegister(PtrTy); + auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0)); + auto SmallLoad = MIRBuilder.buildLoad(SmallLdReg, SmallPtr.getReg(0), + *SmallMMO); + + auto ShiftAmt = MIRBuilder.buildConstant(AnyExtTy, LargeSplitSize); + auto Shift = MIRBuilder.buildShl(AnyExtTy, SmallLoad, ShiftAmt); + auto Or = MIRBuilder.buildOr(AnyExtTy, Shift, LargeLoad); + MIRBuilder.buildTrunc(DstReg, {Or.getReg(0)}); + MI.eraseFromParent(); + return Legalized; + } + MIRBuilder.buildLoad(DstReg, PtrReg, MMO); + MI.eraseFromParent(); + return Legalized; + } + + if (DstTy.isScalar()) { + Register TmpReg = + MRI.createGenericVirtualRegister(LLT::scalar(MMO.getSizeInBits())); + MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpected opcode"); + case TargetOpcode::G_LOAD: + MIRBuilder.buildAnyExt(DstReg, TmpReg); + break; + case TargetOpcode::G_SEXTLOAD: + MIRBuilder.buildSExt(DstReg, TmpReg); + break; + case TargetOpcode::G_ZEXTLOAD: + MIRBuilder.buildZExt(DstReg, TmpReg); + break; + } + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; + } + case TargetOpcode::G_STORE: { + // Lower a non-power of 2 store into multiple pow-2 stores. + // E.g. split an i24 store into an i16 store + i8 store. + // We do this by first extending the stored value to the next largest power + // of 2 type, and then using truncating stores to store the components. + // By doing this, likewise with G_LOAD, generate an extend that can be + // artifact-combined away instead of leaving behind extracts. + Register SrcReg = MI.getOperand(0).getReg(); + Register PtrReg = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + MachineMemOperand &MMO = **MI.memoperands_begin(); + if (SrcTy.getSizeInBits() != MMO.getSizeInBits()) + return UnableToLegalize; + if (SrcTy.isVector()) + return UnableToLegalize; + if (isPowerOf2_32(SrcTy.getSizeInBits())) + return UnableToLegalize; // Don't know what we're being asked to do. + + // Extend to the next pow-2. + const LLT ExtendTy = LLT::scalar(NextPowerOf2(SrcTy.getSizeInBits())); + auto ExtVal = MIRBuilder.buildAnyExt(ExtendTy, SrcReg); + + // Obtain the smaller value by shifting away the larger value. + uint64_t LargeSplitSize = PowerOf2Floor(SrcTy.getSizeInBits()); + uint64_t SmallSplitSize = SrcTy.getSizeInBits() - LargeSplitSize; + auto ShiftAmt = MIRBuilder.buildConstant(ExtendTy, LargeSplitSize); + auto SmallVal = MIRBuilder.buildLShr(ExtendTy, ExtVal, ShiftAmt); + + // Generate the GEP and truncating stores. + LLT PtrTy = MRI.getType(PtrReg); + auto OffsetCst = + MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8); + Register GEPReg = MRI.createGenericVirtualRegister(PtrTy); + auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0)); + + MachineFunction &MF = MIRBuilder.getMF(); + MachineMemOperand *LargeMMO = + MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8); + MachineMemOperand *SmallMMO = + MF.getMachineMemOperand(&MMO, LargeSplitSize / 8, SmallSplitSize / 8); + MIRBuilder.buildStore(ExtVal.getReg(0), PtrReg, *LargeMMO); + MIRBuilder.buildStore(SmallVal.getReg(0), SmallPtr.getReg(0), *SmallMMO); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTPOP: + return lowerBitCount(MI, TypeIdx, Ty); + case G_UADDO: { + Register Res = MI.getOperand(0).getReg(); + Register CarryOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + + MIRBuilder.buildAdd(Res, LHS, RHS); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_UADDE: { + Register Res = MI.getOperand(0).getReg(); + Register CarryOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + Register CarryIn = MI.getOperand(4).getReg(); + + Register TmpRes = MRI.createGenericVirtualRegister(Ty); + Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty); + + MIRBuilder.buildAdd(TmpRes, LHS, RHS); + MIRBuilder.buildZExt(ZExtCarryIn, CarryIn); + MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_USUBO: { + Register Res = MI.getOperand(0).getReg(); + Register BorrowOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + + MIRBuilder.buildSub(Res, LHS, RHS); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_USUBE: { + Register Res = MI.getOperand(0).getReg(); + Register BorrowOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + Register BorrowIn = MI.getOperand(4).getReg(); + + Register TmpRes = MRI.createGenericVirtualRegister(Ty); + Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty); + Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); + + MIRBuilder.buildSub(TmpRes, LHS, RHS); + MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn); + MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn); + MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LHS_EQ_RHS, LHS, RHS); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, LHS_ULT_RHS, LHS, RHS); + MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_UITOFP: + return lowerUITOFP(MI, TypeIdx, Ty); + case G_SITOFP: + return lowerSITOFP(MI, TypeIdx, Ty); + case G_FPTOUI: + return lowerFPTOUI(MI, TypeIdx, Ty); + case G_SMIN: + case G_SMAX: + case G_UMIN: + case G_UMAX: + return lowerMinMax(MI, TypeIdx, Ty); + case G_FCOPYSIGN: + return lowerFCopySign(MI, TypeIdx, Ty); + case G_FMINNUM: + case G_FMAXNUM: + return lowerFMinNumMaxNum(MI); + case G_UNMERGE_VALUES: + return lowerUnmergeValues(MI); + case TargetOpcode::G_SEXT_INREG: { + assert(MI.getOperand(2).isImm() && "Expected immediate"); + int64_t SizeInBits = MI.getOperand(2).getImm(); + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + Register TmpRes = MRI.createGenericVirtualRegister(DstTy); + + auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits); + MIRBuilder.buildInstr(TargetOpcode::G_SHL, {TmpRes}, {SrcReg, MIBSz->getOperand(0).getReg()}); + MIRBuilder.buildInstr(TargetOpcode::G_ASHR, {DstReg}, {TmpRes, MIBSz->getOperand(0).getReg()}); + MI.eraseFromParent(); + return Legalized; + } + case G_SHUFFLE_VECTOR: + return lowerShuffleVector(MI); + case G_DYN_STACKALLOC: + return lowerDynStackAlloc(MI); + case G_EXTRACT: + return lowerExtract(MI); + case G_INSERT: + return lowerInsert(MI); + } +} + +LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef( + MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { + SmallVector<Register, 2> DstRegs; + + unsigned NarrowSize = NarrowTy.getSizeInBits(); + Register DstReg = MI.getOperand(0).getReg(); + unsigned Size = MRI.getType(DstReg).getSizeInBits(); + int NumParts = Size / NarrowSize; + // FIXME: Don't know how to handle the situation where the small vectors + // aren't all the same size yet. + if (Size % NarrowSize != 0) + return UnableToLegalize; + + for (int i = 0; i < NumParts; ++i) { + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildUndef(TmpReg); + DstRegs.push_back(TmpReg); + } + + if (NarrowTy.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + const unsigned Opc = MI.getOpcode(); + const unsigned NumOps = MI.getNumOperands() - 1; + const unsigned NarrowSize = NarrowTy.getSizeInBits(); + const Register DstReg = MI.getOperand(0).getReg(); + const unsigned Flags = MI.getFlags(); + const LLT DstTy = MRI.getType(DstReg); + const unsigned Size = DstTy.getSizeInBits(); + const int NumParts = Size / NarrowSize; + const LLT EltTy = DstTy.getElementType(); + const unsigned EltSize = EltTy.getSizeInBits(); + const unsigned BitsForNumParts = NarrowSize * NumParts; + + // Check if we have any leftovers. If we do, then only handle the case where + // the leftover is one element. + if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size) + return UnableToLegalize; + + if (BitsForNumParts != Size) { + Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildUndef(AccumDstReg); + + // Handle the pieces which evenly divide into the requested type with + // extract/op/insert sequence. + for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) { + SmallVector<SrcOp, 4> SrcOps; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset); + SrcOps.push_back(PartOpReg); + } + + Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); + + Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset); + AccumDstReg = PartInsertReg; + } + + // Handle the remaining element sized leftover piece. + SmallVector<SrcOp, 4> SrcOps; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + Register PartOpReg = MRI.createGenericVirtualRegister(EltTy); + MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), + BitsForNumParts); + SrcOps.push_back(PartOpReg); + } + + Register PartDstReg = MRI.createGenericVirtualRegister(EltTy); + MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); + MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts); + MI.eraseFromParent(); + + return Legalized; + } + + SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs; + + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs); + + if (NumOps >= 2) + extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src1Regs); + + if (NumOps >= 3) + extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs); + + for (int i = 0; i < NumParts; ++i) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + + if (NumOps == 1) + MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags); + else if (NumOps == 2) { + MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i], Src1Regs[i]}, Flags); + } else if (NumOps == 3) { + MIRBuilder.buildInstr(Opc, {DstReg}, + {Src0Regs[i], Src1Regs[i], Src2Regs[i]}, Flags); + } + + DstRegs.push_back(DstReg); + } + + if (NarrowTy.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +// Handle splitting vector operations which need to have the same number of +// elements in each type index, but each type index may have a different element +// type. +// +// e.g. <4 x s64> = G_SHL <4 x s64>, <4 x s32> -> +// <2 x s64> = G_SHL <2 x s64>, <2 x s32> +// <2 x s64> = G_SHL <2 x s64>, <2 x s32> +// +// Also handles some irregular breakdown cases, e.g. +// e.g. <3 x s64> = G_SHL <3 x s64>, <3 x s32> -> +// <2 x s64> = G_SHL <2 x s64>, <2 x s32> +// s64 = G_SHL s64, s32 +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorMultiEltType( + MachineInstr &MI, unsigned TypeIdx, LLT NarrowTyArg) { + if (TypeIdx != 0) + return UnableToLegalize; + + const LLT NarrowTy0 = NarrowTyArg; + const unsigned NewNumElts = + NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1; + + const Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT LeftoverTy0; + + // All of the operands need to have the same number of elements, so if we can + // determine a type breakdown for the result type, we can for all of the + // source types. + int NumParts = getNarrowTypeBreakDown(DstTy, NarrowTy0, LeftoverTy0).first; + if (NumParts < 0) + return UnableToLegalize; + + SmallVector<MachineInstrBuilder, 4> NewInsts; + + SmallVector<Register, 4> DstRegs, LeftoverDstRegs; + SmallVector<Register, 4> PartRegs, LeftoverRegs; + + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + LLT LeftoverTy; + Register SrcReg = MI.getOperand(I).getReg(); + LLT SrcTyI = MRI.getType(SrcReg); + LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType()); + LLT LeftoverTyI; + + // Split this operand into the requested typed registers, and any leftover + // required to reproduce the original type. + if (!extractParts(SrcReg, SrcTyI, NarrowTyI, LeftoverTyI, PartRegs, + LeftoverRegs)) + return UnableToLegalize; + + if (I == 1) { + // For the first operand, create an instruction for each part and setup + // the result. + for (Register PartReg : PartRegs) { + Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0); + NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) + .addDef(PartDstReg) + .addUse(PartReg)); + DstRegs.push_back(PartDstReg); + } + + for (Register LeftoverReg : LeftoverRegs) { + Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0); + NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) + .addDef(PartDstReg) + .addUse(LeftoverReg)); + LeftoverDstRegs.push_back(PartDstReg); + } + } else { + assert(NewInsts.size() == PartRegs.size() + LeftoverRegs.size()); + + // Add the newly created operand splits to the existing instructions. The + // odd-sized pieces are ordered after the requested NarrowTyArg sized + // pieces. + unsigned InstCount = 0; + for (unsigned J = 0, JE = PartRegs.size(); J != JE; ++J) + NewInsts[InstCount++].addUse(PartRegs[J]); + for (unsigned J = 0, JE = LeftoverRegs.size(); J != JE; ++J) + NewInsts[InstCount++].addUse(LeftoverRegs[J]); + } + + PartRegs.clear(); + LeftoverRegs.clear(); + } + + // Insert the newly built operations and rebuild the result register. + for (auto &MIB : NewInsts) + MIRBuilder.insertInstr(MIB); + + insertParts(DstReg, DstTy, NarrowTy0, DstRegs, LeftoverTy0, LeftoverDstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + + LLT NarrowTy0 = NarrowTy; + LLT NarrowTy1; + unsigned NumParts; + + if (NarrowTy.isVector()) { + // Uneven breakdown not handled. + NumParts = DstTy.getNumElements() / NarrowTy.getNumElements(); + if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements()) + return UnableToLegalize; + + NarrowTy1 = LLT::vector(NumParts, SrcTy.getElementType().getSizeInBits()); + } else { + NumParts = DstTy.getNumElements(); + NarrowTy1 = SrcTy.getElementType(); + } + + SmallVector<Register, 4> SrcRegs, DstRegs; + extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs); + + for (unsigned I = 0; I < NumParts; ++I) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode()) + .addDef(DstReg) + .addUse(SrcRegs[I]); + + NewInst->setFlags(MI.getFlags()); + DstRegs.push_back(DstReg); + } + + if (NarrowTy.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(2).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(Src0Reg); + + unsigned NumParts; + LLT NarrowTy0, NarrowTy1; + + if (TypeIdx == 0) { + unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; + unsigned OldElts = DstTy.getNumElements(); + + NarrowTy0 = NarrowTy; + NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements(); + NarrowTy1 = NarrowTy.isVector() ? + LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) : + SrcTy.getElementType(); + + } else { + unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; + unsigned OldElts = SrcTy.getNumElements(); + + NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : + NarrowTy.getNumElements(); + NarrowTy0 = LLT::vector(NarrowTy.getNumElements(), + DstTy.getScalarSizeInBits()); + NarrowTy1 = NarrowTy; + } + + // FIXME: Don't know how to handle the situation where the small vectors + // aren't all the same size yet. + if (NarrowTy1.isVector() && + NarrowTy1.getNumElements() * NumParts != DstTy.getNumElements()) + return UnableToLegalize; + + CmpInst::Predicate Pred + = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); + + SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; + extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs); + extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs); + + for (unsigned I = 0; I < NumParts; ++I) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + DstRegs.push_back(DstReg); + + if (MI.getOpcode() == TargetOpcode::G_ICMP) + MIRBuilder.buildICmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); + else { + MachineInstr *NewCmp + = MIRBuilder.buildFCmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); + NewCmp->setFlags(MI.getFlags()); + } + } + + if (NarrowTy1.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register CondReg = MI.getOperand(1).getReg(); + + unsigned NumParts = 0; + LLT NarrowTy0, NarrowTy1; + + LLT DstTy = MRI.getType(DstReg); + LLT CondTy = MRI.getType(CondReg); + unsigned Size = DstTy.getSizeInBits(); + + assert(TypeIdx == 0 || CondTy.isVector()); + + if (TypeIdx == 0) { + NarrowTy0 = NarrowTy; + NarrowTy1 = CondTy; + + unsigned NarrowSize = NarrowTy0.getSizeInBits(); + // FIXME: Don't know how to handle the situation where the small vectors + // aren't all the same size yet. + if (Size % NarrowSize != 0) + return UnableToLegalize; + + NumParts = Size / NarrowSize; + + // Need to break down the condition type + if (CondTy.isVector()) { + if (CondTy.getNumElements() == NumParts) + NarrowTy1 = CondTy.getElementType(); + else + NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts, + CondTy.getScalarSizeInBits()); + } + } else { + NumParts = CondTy.getNumElements(); + if (NarrowTy.isVector()) { + // TODO: Handle uneven breakdown. + if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements()) + return UnableToLegalize; + + return UnableToLegalize; + } else { + NarrowTy0 = DstTy.getElementType(); + NarrowTy1 = NarrowTy; + } + } + + SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs; + if (CondTy.isVector()) + extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs); + + extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs); + extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs); + + for (unsigned i = 0; i < NumParts; ++i) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg, + Src1Regs[i], Src2Regs[i]); + DstRegs.push_back(DstReg); + } + + if (NarrowTy0.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + const Register DstReg = MI.getOperand(0).getReg(); + LLT PhiTy = MRI.getType(DstReg); + LLT LeftoverTy; + + // All of the operands need to have the same number of elements, so if we can + // determine a type breakdown for the result type, we can for all of the + // source types. + int NumParts, NumLeftover; + std::tie(NumParts, NumLeftover) + = getNarrowTypeBreakDown(PhiTy, NarrowTy, LeftoverTy); + if (NumParts < 0) + return UnableToLegalize; + + SmallVector<Register, 4> DstRegs, LeftoverDstRegs; + SmallVector<MachineInstrBuilder, 4> NewInsts; + + const int TotalNumParts = NumParts + NumLeftover; + + // Insert the new phis in the result block first. + for (int I = 0; I != TotalNumParts; ++I) { + LLT Ty = I < NumParts ? NarrowTy : LeftoverTy; + Register PartDstReg = MRI.createGenericVirtualRegister(Ty); + NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI) + .addDef(PartDstReg)); + if (I < NumParts) + DstRegs.push_back(PartDstReg); + else + LeftoverDstRegs.push_back(PartDstReg); + } + + MachineBasicBlock *MBB = MI.getParent(); + MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI()); + insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs); + + SmallVector<Register, 4> PartRegs, LeftoverRegs; + + // Insert code to extract the incoming values in each predecessor block. + for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { + PartRegs.clear(); + LeftoverRegs.clear(); + + Register SrcReg = MI.getOperand(I).getReg(); + MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + + LLT Unused; + if (!extractParts(SrcReg, PhiTy, NarrowTy, Unused, PartRegs, + LeftoverRegs)) + return UnableToLegalize; + + // Add the newly created operand splits to the existing instructions. The + // odd-sized pieces are ordered after the requested NarrowTyArg sized + // pieces. + for (int J = 0; J != TotalNumParts; ++J) { + MachineInstrBuilder MIB = NewInsts[J]; + MIB.addUse(J < NumParts ? PartRegs[J] : LeftoverRegs[J - NumParts]); + MIB.addMBB(&OpMBB); + } + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorUnmergeValues(MachineInstr &MI, + unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + const int NumDst = MI.getNumOperands() - 1; + const Register SrcReg = MI.getOperand(NumDst).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + + // TODO: Create sequence of extracts. + if (DstTy == NarrowTy) + return UnableToLegalize; + + LLT GCDTy = getGCDType(SrcTy, NarrowTy); + if (DstTy == GCDTy) { + // This would just be a copy of the same unmerge. + // TODO: Create extracts, pad with undef and create intermediate merges. + return UnableToLegalize; + } + + auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); + const int NumUnmerge = Unmerge->getNumOperands() - 1; + const int PartsPerUnmerge = NumDst / NumUnmerge; + + for (int I = 0; I != NumUnmerge; ++I) { + auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); + + for (int J = 0; J != PartsPerUnmerge; ++J) + MIB.addDef(MI.getOperand(I * PartsPerUnmerge + J).getReg()); + MIB.addUse(Unmerge.getReg(I)); + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorBuildVector(MachineInstr &MI, + unsigned TypeIdx, + LLT NarrowTy) { + assert(TypeIdx == 0 && "not a vector type index"); + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = DstTy.getElementType(); + + int DstNumElts = DstTy.getNumElements(); + int NarrowNumElts = NarrowTy.getNumElements(); + int NumConcat = (DstNumElts + NarrowNumElts - 1) / NarrowNumElts; + LLT WidenedDstTy = LLT::vector(NarrowNumElts * NumConcat, SrcTy); + + SmallVector<Register, 8> ConcatOps; + SmallVector<Register, 8> SubBuildVector; + + Register UndefReg; + if (WidenedDstTy != DstTy) + UndefReg = MIRBuilder.buildUndef(SrcTy).getReg(0); + + // Create a G_CONCAT_VECTORS of NarrowTy pieces, padding with undef as + // necessary. + // + // %3:_(<3 x s16>) = G_BUILD_VECTOR %0, %1, %2 + // -> <2 x s16> + // + // %4:_(s16) = G_IMPLICIT_DEF + // %5:_(<2 x s16>) = G_BUILD_VECTOR %0, %1 + // %6:_(<2 x s16>) = G_BUILD_VECTOR %2, %4 + // %7:_(<4 x s16>) = G_CONCAT_VECTORS %5, %6 + // %3:_(<3 x s16>) = G_EXTRACT %7, 0 + for (int I = 0; I != NumConcat; ++I) { + for (int J = 0; J != NarrowNumElts; ++J) { + int SrcIdx = NarrowNumElts * I + J; + + if (SrcIdx < DstNumElts) { + Register SrcReg = MI.getOperand(SrcIdx + 1).getReg(); + SubBuildVector.push_back(SrcReg); + } else + SubBuildVector.push_back(UndefReg); + } + + auto BuildVec = MIRBuilder.buildBuildVector(NarrowTy, SubBuildVector); + ConcatOps.push_back(BuildVec.getReg(0)); + SubBuildVector.clear(); + } + + if (DstTy == WidenedDstTy) + MIRBuilder.buildConcatVectors(DstReg, ConcatOps); + else { + auto Concat = MIRBuilder.buildConcatVectors(WidenedDstTy, ConcatOps); + MIRBuilder.buildExtract(DstReg, Concat, 0); + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + // FIXME: Don't know how to handle secondary types yet. + if (TypeIdx != 0) + return UnableToLegalize; + + MachineMemOperand *MMO = *MI.memoperands_begin(); + + // This implementation doesn't work for atomics. Give up instead of doing + // something invalid. + if (MMO->getOrdering() != AtomicOrdering::NotAtomic || + MMO->getFailureOrdering() != AtomicOrdering::NotAtomic) + return UnableToLegalize; + + bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD; + Register ValReg = MI.getOperand(0).getReg(); + Register AddrReg = MI.getOperand(1).getReg(); + LLT ValTy = MRI.getType(ValReg); + + int NumParts = -1; + int NumLeftover = -1; + LLT LeftoverTy; + SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs; + if (IsLoad) { + std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy); + } else { + if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs, + NarrowLeftoverRegs)) { + NumParts = NarrowRegs.size(); + NumLeftover = NarrowLeftoverRegs.size(); + } + } + + if (NumParts == -1) + return UnableToLegalize; + + const LLT OffsetTy = LLT::scalar(MRI.getType(AddrReg).getScalarSizeInBits()); + + unsigned TotalSize = ValTy.getSizeInBits(); + + // Split the load/store into PartTy sized pieces starting at Offset. If this + // is a load, return the new registers in ValRegs. For a store, each elements + // of ValRegs should be PartTy. Returns the next offset that needs to be + // handled. + auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs, + unsigned Offset) -> unsigned { + MachineFunction &MF = MIRBuilder.getMF(); + unsigned PartSize = PartTy.getSizeInBits(); + for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize; + Offset += PartSize, ++Idx) { + unsigned ByteSize = PartSize / 8; + unsigned ByteOffset = Offset / 8; + Register NewAddrReg; + + MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset); + + MachineMemOperand *NewMMO = + MF.getMachineMemOperand(MMO, ByteOffset, ByteSize); + + if (IsLoad) { + Register Dst = MRI.createGenericVirtualRegister(PartTy); + ValRegs.push_back(Dst); + MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO); + } else { + MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO); + } + } + + return Offset; + }; + + unsigned HandledOffset = splitTypePieces(NarrowTy, NarrowRegs, 0); + + // Handle the rest of the register if this isn't an even type breakdown. + if (LeftoverTy.isValid()) + splitTypePieces(LeftoverTy, NarrowLeftoverRegs, HandledOffset); + + if (IsLoad) { + insertParts(ValReg, ValTy, NarrowTy, NarrowRegs, + LeftoverTy, NarrowLeftoverRegs); + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + using namespace TargetOpcode; + + MIRBuilder.setInstr(MI); + switch (MI.getOpcode()) { + case G_IMPLICIT_DEF: + return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy); + case G_AND: + case G_OR: + case G_XOR: + case G_ADD: + case G_SUB: + case G_MUL: + case G_SMULH: + case G_UMULH: + case G_FADD: + case G_FMUL: + case G_FSUB: + case G_FNEG: + case G_FABS: + case G_FCANONICALIZE: + case G_FDIV: + case G_FREM: + case G_FMA: + case G_FMAD: + case G_FPOW: + case G_FEXP: + case G_FEXP2: + case G_FLOG: + case G_FLOG2: + case G_FLOG10: + case G_FNEARBYINT: + case G_FCEIL: + case G_FFLOOR: + case G_FRINT: + case G_INTRINSIC_ROUND: + case G_INTRINSIC_TRUNC: + case G_FCOS: + case G_FSIN: + case G_FSQRT: + case G_BSWAP: + case G_BITREVERSE: + case G_SDIV: + case G_SMIN: + case G_SMAX: + case G_UMIN: + case G_UMAX: + case G_FMINNUM: + case G_FMAXNUM: + case G_FMINNUM_IEEE: + case G_FMAXNUM_IEEE: + case G_FMINIMUM: + case G_FMAXIMUM: + return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy); + case G_SHL: + case G_LSHR: + case G_ASHR: + case G_CTLZ: + case G_CTLZ_ZERO_UNDEF: + case G_CTTZ: + case G_CTTZ_ZERO_UNDEF: + case G_CTPOP: + case G_FCOPYSIGN: + return fewerElementsVectorMultiEltType(MI, TypeIdx, NarrowTy); + case G_ZEXT: + case G_SEXT: + case G_ANYEXT: + case G_FPEXT: + case G_FPTRUNC: + case G_SITOFP: + case G_UITOFP: + case G_FPTOSI: + case G_FPTOUI: + case G_INTTOPTR: + case G_PTRTOINT: + case G_ADDRSPACE_CAST: + return fewerElementsVectorCasts(MI, TypeIdx, NarrowTy); + case G_ICMP: + case G_FCMP: + return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy); + case G_SELECT: + return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy); + case G_PHI: + return fewerElementsVectorPhi(MI, TypeIdx, NarrowTy); + case G_UNMERGE_VALUES: + return fewerElementsVectorUnmergeValues(MI, TypeIdx, NarrowTy); + case G_BUILD_VECTOR: + return fewerElementsVectorBuildVector(MI, TypeIdx, NarrowTy); + case G_LOAD: + case G_STORE: + return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); + default: + return UnableToLegalize; + } +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt, + const LLT HalfTy, const LLT AmtTy) { + + Register InL = MRI.createGenericVirtualRegister(HalfTy); + Register InH = MRI.createGenericVirtualRegister(HalfTy); + MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg()); + + if (Amt.isNullValue()) { + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH}); + MI.eraseFromParent(); + return Legalized; + } + + LLT NVT = HalfTy; + unsigned NVTBits = HalfTy.getSizeInBits(); + unsigned VTBits = 2 * NVTBits; + + SrcOp Lo(Register(0)), Hi(Register(0)); + if (MI.getOpcode() == TargetOpcode::G_SHL) { + if (Amt.ugt(VTBits)) { + Lo = Hi = MIRBuilder.buildConstant(NVT, 0); + } else if (Amt.ugt(NVTBits)) { + Lo = MIRBuilder.buildConstant(NVT, 0); + Hi = MIRBuilder.buildShl(NVT, InL, + MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); + } else if (Amt == NVTBits) { + Lo = MIRBuilder.buildConstant(NVT, 0); + Hi = InL; + } else { + Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt)); + auto OrLHS = + MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt)); + auto OrRHS = MIRBuilder.buildLShr( + NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); + Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); + } + } else if (MI.getOpcode() == TargetOpcode::G_LSHR) { + if (Amt.ugt(VTBits)) { + Lo = Hi = MIRBuilder.buildConstant(NVT, 0); + } else if (Amt.ugt(NVTBits)) { + Lo = MIRBuilder.buildLShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); + Hi = MIRBuilder.buildConstant(NVT, 0); + } else if (Amt == NVTBits) { + Lo = InH; + Hi = MIRBuilder.buildConstant(NVT, 0); + } else { + auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); + + auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); + auto OrRHS = MIRBuilder.buildShl( + NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); + + Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); + Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst); + } + } else { + if (Amt.ugt(VTBits)) { + Hi = Lo = MIRBuilder.buildAShr( + NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); + } else if (Amt.ugt(NVTBits)) { + Lo = MIRBuilder.buildAShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); + Hi = MIRBuilder.buildAShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); + } else if (Amt == NVTBits) { + Lo = InH; + Hi = MIRBuilder.buildAShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); + } else { + auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); + + auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); + auto OrRHS = MIRBuilder.buildShl( + NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); + + Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); + Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst); + } + } + + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()}); + MI.eraseFromParent(); + + return Legalized; +} + +// TODO: Optimize if constant shift amount. +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, + LLT RequestedTy) { + if (TypeIdx == 1) { + Observer.changingInstr(MI); + narrowScalarSrc(MI, RequestedTy, 2); + Observer.changedInstr(MI); + return Legalized; + } + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + Register Amt = MI.getOperand(2).getReg(); + LLT ShiftAmtTy = MRI.getType(Amt); + const unsigned DstEltSize = DstTy.getScalarSizeInBits(); + if (DstEltSize % 2 != 0) + return UnableToLegalize; + + // Ignore the input type. We can only go to exactly half the size of the + // input. If that isn't small enough, the resulting pieces will be further + // legalized. + const unsigned NewBitSize = DstEltSize / 2; + const LLT HalfTy = LLT::scalar(NewBitSize); + const LLT CondTy = LLT::scalar(1); + + if (const MachineInstr *KShiftAmt = + getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) { + return narrowScalarShiftByConstant( + MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy); + } + + // TODO: Expand with known bits. + + // Handle the fully general expansion by an unknown amount. + auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize); + + Register InL = MRI.createGenericVirtualRegister(HalfTy); + Register InH = MRI.createGenericVirtualRegister(HalfTy); + MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg()); + + auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits); + auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt); + + auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0); + auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits); + auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero); + + Register ResultRegs[2]; + switch (MI.getOpcode()) { + case TargetOpcode::G_SHL: { + // Short: ShAmt < NewBitSize + auto LoS = MIRBuilder.buildShl(HalfTy, InL, Amt); + + auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, AmtLack); + auto HiOr = MIRBuilder.buildShl(HalfTy, InH, Amt); + auto HiS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr); + + // Long: ShAmt >= NewBitSize + auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero. + auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part. + + auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL); + auto Hi = MIRBuilder.buildSelect( + HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL)); + + ResultRegs[0] = Lo.getReg(0); + ResultRegs[1] = Hi.getReg(0); + break; + } + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: { + // Short: ShAmt < NewBitSize + auto HiS = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, {InH, Amt}); + + auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, Amt); + auto HiOr = MIRBuilder.buildShl(HalfTy, InH, AmtLack); + auto LoS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr); + + // Long: ShAmt >= NewBitSize + MachineInstrBuilder HiL; + if (MI.getOpcode() == TargetOpcode::G_LSHR) { + HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero. + } else { + auto ShiftAmt = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1); + HiL = MIRBuilder.buildAShr(HalfTy, InH, ShiftAmt); // Sign of Hi part. + } + auto LoL = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, + {InH, AmtExcess}); // Lo from Hi part. + + auto Lo = MIRBuilder.buildSelect( + HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL)); + + auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL); + + ResultRegs[0] = Lo.getReg(0); + ResultRegs[1] = Hi.getReg(0); + break; + } + default: + llvm_unreachable("not a shift"); + } + + MIRBuilder.buildMerge(DstReg, ResultRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, + LLT MoreTy) { + assert(TypeIdx == 0 && "Expecting only Idx 0"); + + Observer.changingInstr(MI); + for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { + MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + moreElementsVectorSrc(MI, MoreTy, I); + } + + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx, + LLT MoreTy) { + MIRBuilder.setInstr(MI); + unsigned Opc = MI.getOpcode(); + switch (Opc) { + case TargetOpcode::G_IMPLICIT_DEF: + case TargetOpcode::G_LOAD: { + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_STORE: + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_AND: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: + case TargetOpcode::G_SMIN: + case TargetOpcode::G_SMAX: + case TargetOpcode::G_UMIN: + case TargetOpcode::G_UMAX: { + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 1); + moreElementsVectorSrc(MI, MoreTy, 2); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_EXTRACT: + if (TypeIdx != 1) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 1); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_INSERT: + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 1); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_SELECT: + if (TypeIdx != 0) + return UnableToLegalize; + if (MRI.getType(MI.getOperand(1).getReg()).isVector()) + return UnableToLegalize; + + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 2); + moreElementsVectorSrc(MI, MoreTy, 3); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_UNMERGE_VALUES: { + if (TypeIdx != 1) + return UnableToLegalize; + + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + int NumDst = MI.getNumOperands() - 1; + moreElementsVectorSrc(MI, MoreTy, NumDst); + + auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); + for (int I = 0; I != NumDst; ++I) + MIB.addDef(MI.getOperand(I).getReg()); + + int NewNumDst = MoreTy.getSizeInBits() / DstTy.getSizeInBits(); + for (int I = NumDst; I != NewNumDst; ++I) + MIB.addDef(MRI.createGenericVirtualRegister(DstTy)); + + MIB.addUse(MI.getOperand(NumDst).getReg()); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_PHI: + return moreElementsVectorPhi(MI, TypeIdx, MoreTy); + default: + return UnableToLegalize; + } +} + +void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs, + ArrayRef<Register> Src1Regs, + ArrayRef<Register> Src2Regs, + LLT NarrowTy) { + MachineIRBuilder &B = MIRBuilder; + unsigned SrcParts = Src1Regs.size(); + unsigned DstParts = DstRegs.size(); + + unsigned DstIdx = 0; // Low bits of the result. + Register FactorSum = + B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0); + DstRegs[DstIdx] = FactorSum; + + unsigned CarrySumPrevDstIdx; + SmallVector<Register, 4> Factors; + + for (DstIdx = 1; DstIdx < DstParts; DstIdx++) { + // Collect low parts of muls for DstIdx. + for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1; + i <= std::min(DstIdx, SrcParts - 1); ++i) { + MachineInstrBuilder Mul = + B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]); + Factors.push_back(Mul.getReg(0)); + } + // Collect high parts of muls from previous DstIdx. + for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts; + i <= std::min(DstIdx - 1, SrcParts - 1); ++i) { + MachineInstrBuilder Umulh = + B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]); + Factors.push_back(Umulh.getReg(0)); + } + // Add CarrySum from additons calculated for previous DstIdx. + if (DstIdx != 1) { + Factors.push_back(CarrySumPrevDstIdx); + } + + Register CarrySum; + // Add all factors and accumulate all carries into CarrySum. + if (DstIdx != DstParts - 1) { + MachineInstrBuilder Uaddo = + B.buildUAddo(NarrowTy, LLT::scalar(1), Factors[0], Factors[1]); + FactorSum = Uaddo.getReg(0); + CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0); + for (unsigned i = 2; i < Factors.size(); ++i) { + MachineInstrBuilder Uaddo = + B.buildUAddo(NarrowTy, LLT::scalar(1), FactorSum, Factors[i]); + FactorSum = Uaddo.getReg(0); + MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1)); + CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0); + } + } else { + // Since value for the next index is not calculated, neither is CarrySum. + FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0); + for (unsigned i = 2; i < Factors.size(); ++i) + FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0); + } + + CarrySumPrevDstIdx = CarrySum; + DstRegs[DstIdx] = FactorSum; + Factors.clear(); + } +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register Src1 = MI.getOperand(1).getReg(); + Register Src2 = MI.getOperand(2).getReg(); + + LLT Ty = MRI.getType(DstReg); + if (Ty.isVector()) + return UnableToLegalize; + + unsigned SrcSize = MRI.getType(Src1).getSizeInBits(); + unsigned DstSize = Ty.getSizeInBits(); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + if (DstSize % NarrowSize != 0 || SrcSize % NarrowSize != 0) + return UnableToLegalize; + + unsigned NumDstParts = DstSize / NarrowSize; + unsigned NumSrcParts = SrcSize / NarrowSize; + bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH; + unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1); + + SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs; + extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts); + extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts); + DstTmpRegs.resize(DstTmpParts); + multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy); + + // Take only high half of registers if this is high mul. + ArrayRef<Register> DstRegs( + IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts); + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + uint64_t NarrowSize = NarrowTy.getSizeInBits(); + + int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + // FIXME: add support for when SizeOp1 isn't an exact multiple of + // NarrowSize. + if (SizeOp1 % NarrowSize != 0) + return UnableToLegalize; + int NumParts = SizeOp1 / NarrowSize; + + SmallVector<Register, 2> SrcRegs, DstRegs; + SmallVector<uint64_t, 2> Indexes; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); + + Register OpReg = MI.getOperand(0).getReg(); + uint64_t OpStart = MI.getOperand(2).getImm(); + uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); + for (int i = 0; i < NumParts; ++i) { + unsigned SrcStart = i * NarrowSize; + + if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) { + // No part of the extract uses this subregister, ignore it. + continue; + } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) { + // The entire subregister is extracted, forward the value. + DstRegs.push_back(SrcRegs[i]); + continue; + } + + // OpSegStart is where this destination segment would start in OpReg if it + // extended infinitely in both directions. + int64_t ExtractOffset; + uint64_t SegSize; + if (OpStart < SrcStart) { + ExtractOffset = 0; + SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart); + } else { + ExtractOffset = OpStart - SrcStart; + SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize); + } + + Register SegReg = SrcRegs[i]; + if (ExtractOffset != 0 || SegSize != NarrowSize) { + // A genuine extract is needed. + SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); + MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset); + } + + DstRegs.push_back(SegReg); + } + + Register DstReg = MI.getOperand(0).getReg(); + if(MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + // FIXME: Don't know how to handle secondary types yet. + if (TypeIdx != 0) + return UnableToLegalize; + + uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + uint64_t NarrowSize = NarrowTy.getSizeInBits(); + + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowSize; + + SmallVector<Register, 2> SrcRegs, DstRegs; + SmallVector<uint64_t, 2> Indexes; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); + + Register OpReg = MI.getOperand(2).getReg(); + uint64_t OpStart = MI.getOperand(3).getImm(); + uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); + for (int i = 0; i < NumParts; ++i) { + unsigned DstStart = i * NarrowSize; + + if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) { + // No part of the insert affects this subregister, forward the original. + DstRegs.push_back(SrcRegs[i]); + continue; + } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) { + // The entire subregister is defined by this insert, forward the new + // value. + DstRegs.push_back(OpReg); + continue; + } + + // OpSegStart is where this destination segment would start in OpReg if it + // extended infinitely in both directions. + int64_t ExtractOffset, InsertOffset; + uint64_t SegSize; + if (OpStart < DstStart) { + InsertOffset = 0; + ExtractOffset = DstStart - OpStart; + SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart); + } else { + InsertOffset = OpStart - DstStart; + ExtractOffset = 0; + SegSize = + std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart); + } + + Register SegReg = OpReg; + if (ExtractOffset != 0 || SegSize != OpSize) { + // A genuine extract is needed. + SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); + MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset); + } + + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset); + DstRegs.push_back(DstReg); + } + + assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered"); + Register DstReg = MI.getOperand(0).getReg(); + if(MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + + assert(MI.getNumOperands() == 3 && TypeIdx == 0); + + SmallVector<Register, 4> DstRegs, DstLeftoverRegs; + SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs; + SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; + LLT LeftoverTy; + if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy, + Src0Regs, Src0LeftoverRegs)) + return UnableToLegalize; + + LLT Unused; + if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused, + Src1Regs, Src1LeftoverRegs)) + llvm_unreachable("inconsistent extractParts result"); + + for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { + auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, + {Src0Regs[I], Src1Regs[I]}); + DstRegs.push_back(Inst->getOperand(0).getReg()); + } + + for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { + auto Inst = MIRBuilder.buildInstr( + MI.getOpcode(), + {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]}); + DstLeftoverRegs.push_back(Inst->getOperand(0).getReg()); + } + + insertParts(DstReg, DstTy, NarrowTy, DstRegs, + LeftoverTy, DstLeftoverRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + Register CondReg = MI.getOperand(1).getReg(); + LLT CondTy = MRI.getType(CondReg); + if (CondTy.isVector()) // TODO: Handle vselect + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + + SmallVector<Register, 4> DstRegs, DstLeftoverRegs; + SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; + SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs; + LLT LeftoverTy; + if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy, + Src1Regs, Src1LeftoverRegs)) + return UnableToLegalize; + + LLT Unused; + if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused, + Src2Regs, Src2LeftoverRegs)) + llvm_unreachable("inconsistent extractParts result"); + + for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { + auto Select = MIRBuilder.buildSelect(NarrowTy, + CondReg, Src1Regs[I], Src2Regs[I]); + DstRegs.push_back(Select->getOperand(0).getReg()); + } + + for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { + auto Select = MIRBuilder.buildSelect( + LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]); + DstLeftoverRegs.push_back(Select->getOperand(0).getReg()); + } + + insertParts(DstReg, DstTy, NarrowTy, DstRegs, + LeftoverTy, DstLeftoverRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + unsigned Opc = MI.getOpcode(); + auto &TII = *MI.getMF()->getSubtarget().getInstrInfo(); + auto isSupported = [this](const LegalityQuery &Q) { + auto QAction = LI.getAction(Q).Action; + return QAction == Legal || QAction == Libcall || QAction == Custom; + }; + switch (Opc) { + default: + return UnableToLegalize; + case TargetOpcode::G_CTLZ_ZERO_UNDEF: { + // This trivially expands to CTLZ. + Observer.changingInstr(MI); + MI.setDesc(TII.get(TargetOpcode::G_CTLZ)); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_CTLZ: { + Register SrcReg = MI.getOperand(1).getReg(); + unsigned Len = Ty.getSizeInBits(); + if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) { + // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero. + auto MIBCtlzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF, + {Ty}, {SrcReg}); + auto MIBZero = MIRBuilder.buildConstant(Ty, 0); + auto MIBLen = MIRBuilder.buildConstant(Ty, Len); + auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), + SrcReg, MIBZero); + MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen, + MIBCtlzZU); + MI.eraseFromParent(); + return Legalized; + } + // for now, we do this: + // NewLen = NextPowerOf2(Len); + // x = x | (x >> 1); + // x = x | (x >> 2); + // ... + // x = x | (x >>16); + // x = x | (x >>32); // for 64-bit input + // Upto NewLen/2 + // return Len - popcount(x); + // + // Ref: "Hacker's Delight" by Henry Warren + Register Op = SrcReg; + unsigned NewLen = PowerOf2Ceil(Len); + for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) { + auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i); + auto MIBOp = MIRBuilder.buildInstr( + TargetOpcode::G_OR, {Ty}, + {Op, MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {Ty}, + {Op, MIBShiftAmt})}); + Op = MIBOp->getOperand(0).getReg(); + } + auto MIBPop = MIRBuilder.buildInstr(TargetOpcode::G_CTPOP, {Ty}, {Op}); + MIRBuilder.buildInstr(TargetOpcode::G_SUB, {MI.getOperand(0).getReg()}, + {MIRBuilder.buildConstant(Ty, Len), MIBPop}); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_CTTZ_ZERO_UNDEF: { + // This trivially expands to CTTZ. + Observer.changingInstr(MI); + MI.setDesc(TII.get(TargetOpcode::G_CTTZ)); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_CTTZ: { + Register SrcReg = MI.getOperand(1).getReg(); + unsigned Len = Ty.getSizeInBits(); + if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) { + // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with + // zero. + auto MIBCttzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, + {Ty}, {SrcReg}); + auto MIBZero = MIRBuilder.buildConstant(Ty, 0); + auto MIBLen = MIRBuilder.buildConstant(Ty, Len); + auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), + SrcReg, MIBZero); + MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen, + MIBCttzZU); + MI.eraseFromParent(); + return Legalized; + } + // for now, we use: { return popcount(~x & (x - 1)); } + // unless the target has ctlz but not ctpop, in which case we use: + // { return 32 - nlz(~x & (x-1)); } + // Ref: "Hacker's Delight" by Henry Warren + auto MIBCstNeg1 = MIRBuilder.buildConstant(Ty, -1); + auto MIBNot = + MIRBuilder.buildInstr(TargetOpcode::G_XOR, {Ty}, {SrcReg, MIBCstNeg1}); + auto MIBTmp = MIRBuilder.buildInstr( + TargetOpcode::G_AND, {Ty}, + {MIBNot, MIRBuilder.buildInstr(TargetOpcode::G_ADD, {Ty}, + {SrcReg, MIBCstNeg1})}); + if (!isSupported({TargetOpcode::G_CTPOP, {Ty, Ty}}) && + isSupported({TargetOpcode::G_CTLZ, {Ty, Ty}})) { + auto MIBCstLen = MIRBuilder.buildConstant(Ty, Len); + MIRBuilder.buildInstr( + TargetOpcode::G_SUB, {MI.getOperand(0).getReg()}, + {MIBCstLen, + MIRBuilder.buildInstr(TargetOpcode::G_CTLZ, {Ty}, {MIBTmp})}); + MI.eraseFromParent(); + return Legalized; + } + MI.setDesc(TII.get(TargetOpcode::G_CTPOP)); + MI.getOperand(1).setReg(MIBTmp->getOperand(0).getReg()); + return Legalized; + } + } +} + +// Expand s32 = G_UITOFP s64 using bit operations to an IEEE float +// representation. +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + const LLT S1 = LLT::scalar(1); + + assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32); + + // unsigned cul2f(ulong u) { + // uint lz = clz(u); + // uint e = (u != 0) ? 127U + 63U - lz : 0; + // u = (u << lz) & 0x7fffffffffffffffUL; + // ulong t = u & 0xffffffffffUL; + // uint v = (e << 23) | (uint)(u >> 40); + // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); + // return as_float(v + r); + // } + + auto Zero32 = MIRBuilder.buildConstant(S32, 0); + auto Zero64 = MIRBuilder.buildConstant(S64, 0); + + auto LZ = MIRBuilder.buildCTLZ_ZERO_UNDEF(S32, Src); + + auto K = MIRBuilder.buildConstant(S32, 127U + 63U); + auto Sub = MIRBuilder.buildSub(S32, K, LZ); + + auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64); + auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32); + + auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1); + auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ); + + auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0); + + auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL); + auto T = MIRBuilder.buildAnd(S64, U, Mask1); + + auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40)); + auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23)); + auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl)); + + auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL); + auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C); + auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C); + auto One = MIRBuilder.buildConstant(S32, 1); + + auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One); + auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32); + auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0); + MIRBuilder.buildAdd(Dst, V, R); + + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + if (SrcTy != LLT::scalar(64)) + return UnableToLegalize; + + if (DstTy == LLT::scalar(32)) { + // TODO: SelectionDAG has several alternative expansions to port which may + // be more reasonble depending on the available instructions. If a target + // has sitofp, does not have CTLZ, or can efficiently use f64 as an + // intermediate type, this is probably worse. + return lowerU64ToF32BitOps(MI); + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + const LLT S1 = LLT::scalar(1); + + if (SrcTy != S64) + return UnableToLegalize; + + if (DstTy == S32) { + // signed cl2f(long l) { + // long s = l >> 63; + // float r = cul2f((l + s) ^ s); + // return s ? -r : r; + // } + Register L = Src; + auto SignBit = MIRBuilder.buildConstant(S64, 63); + auto S = MIRBuilder.buildAShr(S64, L, SignBit); + + auto LPlusS = MIRBuilder.buildAdd(S64, L, S); + auto Xor = MIRBuilder.buildXor(S64, LPlusS, S); + auto R = MIRBuilder.buildUITOFP(S32, Xor); + + auto RNeg = MIRBuilder.buildFNeg(S32, R); + auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S, + MIRBuilder.buildConstant(S64, 0)); + MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFPTOUI(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + + if (SrcTy != S64 && SrcTy != S32) + return UnableToLegalize; + if (DstTy != S32 && DstTy != S64) + return UnableToLegalize; + + // FPTOSI gives same result as FPTOUI for positive signed integers. + // FPTOUI needs to deal with fp values that convert to unsigned integers + // greater or equal to 2^31 for float or 2^63 for double. For brevity 2^Exp. + + APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits()); + APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle() + : APFloat::IEEEdouble(), + APInt::getNullValue(SrcTy.getSizeInBits())); + TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven); + + MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src); + + MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP); + // For fp Value greater or equal to Threshold(2^Exp), we use FPTOSI on + // (Value - 2^Exp) and add 2^Exp by setting highest bit in result to 1. + MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold); + MachineInstrBuilder ResLowBits = MIRBuilder.buildFPTOSI(DstTy, FSub); + MachineInstrBuilder ResHighBit = MIRBuilder.buildConstant(DstTy, TwoPExpInt); + MachineInstrBuilder Res = MIRBuilder.buildXor(DstTy, ResLowBits, ResHighBit); + + MachineInstrBuilder FCMP = + MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, DstTy, Src, Threshold); + MIRBuilder.buildSelect(Dst, FCMP, FPTOSI, Res); + + MI.eraseFromParent(); + return Legalized; +} + +static CmpInst::Predicate minMaxToCompare(unsigned Opc) { + switch (Opc) { + case TargetOpcode::G_SMIN: + return CmpInst::ICMP_SLT; + case TargetOpcode::G_SMAX: + return CmpInst::ICMP_SGT; + case TargetOpcode::G_UMIN: + return CmpInst::ICMP_ULT; + case TargetOpcode::G_UMAX: + return CmpInst::ICMP_UGT; + default: + llvm_unreachable("not in integer min/max"); + } +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); + + const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode()); + LLT CmpType = MRI.getType(Dst).changeElementSize(1); + + auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1); + MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); + + const LLT Src0Ty = MRI.getType(Src0); + const LLT Src1Ty = MRI.getType(Src1); + + const int Src0Size = Src0Ty.getScalarSizeInBits(); + const int Src1Size = Src1Ty.getScalarSizeInBits(); + + auto SignBitMask = MIRBuilder.buildConstant( + Src0Ty, APInt::getSignMask(Src0Size)); + + auto NotSignBitMask = MIRBuilder.buildConstant( + Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1)); + + auto And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask); + MachineInstr *Or; + + if (Src0Ty == Src1Ty) { + auto And1 = MIRBuilder.buildAnd(Src1Ty, Src0, SignBitMask); + Or = MIRBuilder.buildOr(Dst, And0, And1); + } else if (Src0Size > Src1Size) { + auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size); + auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1); + auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt); + auto And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask); + Or = MIRBuilder.buildOr(Dst, And0, And1); + } else { + auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size); + auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt); + auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift); + auto And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask); + Or = MIRBuilder.buildOr(Dst, And0, And1); + } + + // Be careful about setting nsz/nnan/ninf on every instruction, since the + // constants are a nan and -0.0, but the final result should preserve + // everything. + if (unsigned Flags = MI.getFlags()) + Or->setFlags(Flags); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) { + unsigned NewOp = MI.getOpcode() == TargetOpcode::G_FMINNUM ? + TargetOpcode::G_FMINNUM_IEEE : TargetOpcode::G_FMAXNUM_IEEE; + + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); + LLT Ty = MRI.getType(Dst); + + if (!MI.getFlag(MachineInstr::FmNoNans)) { + // Insert canonicalizes if it's possible we need to quiet to get correct + // sNaN behavior. + + // Note this must be done here, and not as an optimization combine in the + // absence of a dedicate quiet-snan instruction as we're using an + // omni-purpose G_FCANONICALIZE. + if (!isKnownNeverSNaN(Src0, MRI)) + Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0); + + if (!isKnownNeverSNaN(Src1, MRI)) + Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0); + } + + // If there are no nans, it's safe to simply replace this with the non-IEEE + // version. + MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags()); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) { + // Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c + Register DstReg = MI.getOperand(0).getReg(); + LLT Ty = MRI.getType(DstReg); + unsigned Flags = MI.getFlags(); + + auto Mul = MIRBuilder.buildFMul(Ty, MI.getOperand(1), MI.getOperand(2), + Flags); + MIRBuilder.buildFAdd(DstReg, Mul, MI.getOperand(3), Flags); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) { + const unsigned NumDst = MI.getNumOperands() - 1; + const Register SrcReg = MI.getOperand(NumDst).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + Register Dst0Reg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(Dst0Reg); + + + // Expand scalarizing unmerge as bitcast to integer and shift. + if (!DstTy.isVector() && SrcTy.isVector() && + SrcTy.getElementType() == DstTy) { + LLT IntTy = LLT::scalar(SrcTy.getSizeInBits()); + Register Cast = MIRBuilder.buildBitcast(IntTy, SrcReg).getReg(0); + + MIRBuilder.buildTrunc(Dst0Reg, Cast); + + const unsigned DstSize = DstTy.getSizeInBits(); + unsigned Offset = DstSize; + for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) { + auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset); + auto Shift = MIRBuilder.buildLShr(IntTy, Cast, ShiftAmt); + MIRBuilder.buildTrunc(MI.getOperand(I), Shift); + } + + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerShuffleVector(MachineInstr &MI) { + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(1).getReg(); + Register Src1Reg = MI.getOperand(2).getReg(); + LLT Src0Ty = MRI.getType(Src0Reg); + LLT DstTy = MRI.getType(DstReg); + LLT IdxTy = LLT::scalar(32); + + const Constant *ShufMask = MI.getOperand(3).getShuffleMask(); + + SmallVector<int, 32> Mask; + ShuffleVectorInst::getShuffleMask(ShufMask, Mask); + + if (DstTy.isScalar()) { + if (Src0Ty.isVector()) + return UnableToLegalize; + + // This is just a SELECT. + assert(Mask.size() == 1 && "Expected a single mask element"); + Register Val; + if (Mask[0] < 0 || Mask[0] > 1) + Val = MIRBuilder.buildUndef(DstTy).getReg(0); + else + Val = Mask[0] == 0 ? Src0Reg : Src1Reg; + MIRBuilder.buildCopy(DstReg, Val); + MI.eraseFromParent(); + return Legalized; + } + + Register Undef; + SmallVector<Register, 32> BuildVec; + LLT EltTy = DstTy.getElementType(); + + for (int Idx : Mask) { + if (Idx < 0) { + if (!Undef.isValid()) + Undef = MIRBuilder.buildUndef(EltTy).getReg(0); + BuildVec.push_back(Undef); + continue; + } + + if (Src0Ty.isScalar()) { + BuildVec.push_back(Idx == 0 ? Src0Reg : Src1Reg); + } else { + int NumElts = Src0Ty.getNumElements(); + Register SrcVec = Idx < NumElts ? Src0Reg : Src1Reg; + int ExtractIdx = Idx < NumElts ? Idx : Idx - NumElts; + auto IdxK = MIRBuilder.buildConstant(IdxTy, ExtractIdx); + auto Extract = MIRBuilder.buildExtractVectorElement(EltTy, SrcVec, IdxK); + BuildVec.push_back(Extract.getReg(0)); + } + } + + MIRBuilder.buildBuildVector(DstReg, BuildVec); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register AllocSize = MI.getOperand(1).getReg(); + unsigned Align = MI.getOperand(2).getImm(); + + const auto &MF = *MI.getMF(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + + LLT PtrTy = MRI.getType(Dst); + LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); + + Register SPReg = TLI.getStackPointerRegisterToSaveRestore(); + auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg); + SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp); + + // Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't + // have to generate an extra instruction to negate the alloc and then use + // G_GEP to add the negative offset. + auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize); + if (Align) { + APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true); + AlignMask.negate(); + auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask); + Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst); + } + + SPTmp = MIRBuilder.buildCast(PtrTy, Alloc); + MIRBuilder.buildCopy(SPReg, SPTmp); + MIRBuilder.buildCopy(Dst, SPTmp); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerExtract(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + unsigned Offset = MI.getOperand(2).getImm(); + + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + if (DstTy.isScalar() && + (SrcTy.isScalar() || + (SrcTy.isVector() && DstTy == SrcTy.getElementType()))) { + LLT SrcIntTy = SrcTy; + if (!SrcTy.isScalar()) { + SrcIntTy = LLT::scalar(SrcTy.getSizeInBits()); + Src = MIRBuilder.buildBitcast(SrcIntTy, Src).getReg(0); + } + + if (Offset == 0) + MIRBuilder.buildTrunc(Dst, Src); + else { + auto ShiftAmt = MIRBuilder.buildConstant(SrcIntTy, Offset); + auto Shr = MIRBuilder.buildLShr(SrcIntTy, Src, ShiftAmt); + MIRBuilder.buildTrunc(Dst, Shr); + } + + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerInsert(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + Register InsertSrc = MI.getOperand(2).getReg(); + uint64_t Offset = MI.getOperand(3).getImm(); + + LLT DstTy = MRI.getType(Src); + LLT InsertTy = MRI.getType(InsertSrc); + + if (InsertTy.isScalar() && + (DstTy.isScalar() || + (DstTy.isVector() && DstTy.getElementType() == InsertTy))) { + LLT IntDstTy = DstTy; + if (!DstTy.isScalar()) { + IntDstTy = LLT::scalar(DstTy.getSizeInBits()); + Src = MIRBuilder.buildBitcast(IntDstTy, Src).getReg(0); + } + + Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0); + if (Offset != 0) { + auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset); + ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0); + } + + APInt MaskVal = ~APInt::getBitsSet(DstTy.getSizeInBits(), Offset, + InsertTy.getSizeInBits()); + + auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal); + auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask); + auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc); + + MIRBuilder.buildBitcast(Dst, Or); + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) { + Register Dst0 = MI.getOperand(0).getReg(); + Register Dst1 = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + const bool IsAdd = MI.getOpcode() == TargetOpcode::G_SADDO; + + LLT Ty = MRI.getType(Dst0); + LLT BoolTy = MRI.getType(Dst1); + + if (IsAdd) + MIRBuilder.buildAdd(Dst0, LHS, RHS); + else + MIRBuilder.buildSub(Dst0, LHS, RHS); + + // TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow. + + auto Zero = MIRBuilder.buildConstant(Ty, 0); + + // For an addition, the result should be less than one of the operands (LHS) + // if and only if the other operand (RHS) is negative, otherwise there will + // be overflow. + // For a subtraction, the result should be less than one of the operands + // (LHS) if and only if the other operand (RHS) is (non-zero) positive, + // otherwise there will be overflow. + auto ResultLowerThanLHS = + MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, Dst0, LHS); + auto ConditionRHS = MIRBuilder.buildICmp( + IsAdd ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGT, BoolTy, RHS, Zero); + + MIRBuilder.buildXor(Dst1, ConditionRHS, ResultLowerThanLHS); + MI.eraseFromParent(); + return Legalized; +} diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp new file mode 100644 index 000000000000..70045512fae5 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -0,0 +1,748 @@ +//===- lib/CodeGen/GlobalISel/LegalizerInfo.cpp - Legalizer ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implement an interface to specify and query how an illegal operation on a +// given type should be expanded. +// +// Issues to be resolved: +// + Make it fast. +// + Support weird types like i3, <7 x i3>, ... +// + Operations with more than one type (ICMP, CMPXCHG, intrinsics, ...) +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/ADT/SmallBitVector.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LowLevelTypeImpl.h" +#include "llvm/Support/MathExtras.h" +#include <algorithm> +#include <map> + +using namespace llvm; +using namespace LegalizeActions; + +#define DEBUG_TYPE "legalizer-info" + +cl::opt<bool> llvm::DisableGISelLegalityCheck( + "disable-gisel-legality-check", + cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"), + cl::Hidden); + +raw_ostream &llvm::operator<<(raw_ostream &OS, LegalizeAction Action) { + switch (Action) { + case Legal: + OS << "Legal"; + break; + case NarrowScalar: + OS << "NarrowScalar"; + break; + case WidenScalar: + OS << "WidenScalar"; + break; + case FewerElements: + OS << "FewerElements"; + break; + case MoreElements: + OS << "MoreElements"; + break; + case Lower: + OS << "Lower"; + break; + case Libcall: + OS << "Libcall"; + break; + case Custom: + OS << "Custom"; + break; + case Unsupported: + OS << "Unsupported"; + break; + case NotFound: + OS << "NotFound"; + break; + case UseLegacyRules: + OS << "UseLegacyRules"; + break; + } + return OS; +} + +raw_ostream &LegalityQuery::print(raw_ostream &OS) const { + OS << Opcode << ", Tys={"; + for (const auto &Type : Types) { + OS << Type << ", "; + } + OS << "}, Opcode="; + + OS << Opcode << ", MMOs={"; + for (const auto &MMODescr : MMODescrs) { + OS << MMODescr.SizeInBits << ", "; + } + OS << "}"; + + return OS; +} + +#ifndef NDEBUG +// Make sure the rule won't (trivially) loop forever. +static bool hasNoSimpleLoops(const LegalizeRule &Rule, const LegalityQuery &Q, + const std::pair<unsigned, LLT> &Mutation) { + switch (Rule.getAction()) { + case Custom: + case Lower: + case MoreElements: + case FewerElements: + break; + default: + return Q.Types[Mutation.first] != Mutation.second; + } + return true; +} + +// Make sure the returned mutation makes sense for the match type. +static bool mutationIsSane(const LegalizeRule &Rule, + const LegalityQuery &Q, + std::pair<unsigned, LLT> Mutation) { + // If the user wants a custom mutation, then we can't really say much about + // it. Return true, and trust that they're doing the right thing. + if (Rule.getAction() == Custom) + return true; + + const unsigned TypeIdx = Mutation.first; + const LLT OldTy = Q.Types[TypeIdx]; + const LLT NewTy = Mutation.second; + + switch (Rule.getAction()) { + case FewerElements: + case MoreElements: { + if (!OldTy.isVector()) + return false; + + if (NewTy.isVector()) { + if (Rule.getAction() == FewerElements) { + // Make sure the element count really decreased. + if (NewTy.getNumElements() >= OldTy.getNumElements()) + return false; + } else { + // Make sure the element count really increased. + if (NewTy.getNumElements() <= OldTy.getNumElements()) + return false; + } + } + + // Make sure the element type didn't change. + return NewTy.getScalarType() == OldTy.getElementType(); + } + case NarrowScalar: + case WidenScalar: { + if (OldTy.isVector()) { + // Number of elements should not change. + if (!NewTy.isVector() || OldTy.getNumElements() != NewTy.getNumElements()) + return false; + } else { + // Both types must be vectors + if (NewTy.isVector()) + return false; + } + + if (Rule.getAction() == NarrowScalar) { + // Make sure the size really decreased. + if (NewTy.getScalarSizeInBits() >= OldTy.getScalarSizeInBits()) + return false; + } else { + // Make sure the size really increased. + if (NewTy.getScalarSizeInBits() <= OldTy.getScalarSizeInBits()) + return false; + } + + return true; + } + default: + return true; + } +} +#endif + +LegalizeActionStep LegalizeRuleSet::apply(const LegalityQuery &Query) const { + LLVM_DEBUG(dbgs() << "Applying legalizer ruleset to: "; Query.print(dbgs()); + dbgs() << "\n"); + if (Rules.empty()) { + LLVM_DEBUG(dbgs() << ".. fallback to legacy rules (no rules defined)\n"); + return {LegalizeAction::UseLegacyRules, 0, LLT{}}; + } + for (const LegalizeRule &Rule : Rules) { + if (Rule.match(Query)) { + LLVM_DEBUG(dbgs() << ".. match\n"); + std::pair<unsigned, LLT> Mutation = Rule.determineMutation(Query); + LLVM_DEBUG(dbgs() << ".. .. " << Rule.getAction() << ", " + << Mutation.first << ", " << Mutation.second << "\n"); + assert(mutationIsSane(Rule, Query, Mutation) && + "legality mutation invalid for match"); + assert(hasNoSimpleLoops(Rule, Query, Mutation) && "Simple loop detected"); + return {Rule.getAction(), Mutation.first, Mutation.second}; + } else + LLVM_DEBUG(dbgs() << ".. no match\n"); + } + LLVM_DEBUG(dbgs() << ".. unsupported\n"); + return {LegalizeAction::Unsupported, 0, LLT{}}; +} + +bool LegalizeRuleSet::verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const { +#ifndef NDEBUG + if (Rules.empty()) { + LLVM_DEBUG( + dbgs() << ".. type index coverage check SKIPPED: no rules defined\n"); + return true; + } + const int64_t FirstUncovered = TypeIdxsCovered.find_first_unset(); + if (FirstUncovered < 0) { + LLVM_DEBUG(dbgs() << ".. type index coverage check SKIPPED:" + " user-defined predicate detected\n"); + return true; + } + const bool AllCovered = (FirstUncovered >= NumTypeIdxs); + if (NumTypeIdxs > 0) + LLVM_DEBUG(dbgs() << ".. the first uncovered type index: " << FirstUncovered + << ", " << (AllCovered ? "OK" : "FAIL") << "\n"); + return AllCovered; +#else + return true; +#endif +} + +bool LegalizeRuleSet::verifyImmIdxsCoverage(unsigned NumImmIdxs) const { +#ifndef NDEBUG + if (Rules.empty()) { + LLVM_DEBUG( + dbgs() << ".. imm index coverage check SKIPPED: no rules defined\n"); + return true; + } + const int64_t FirstUncovered = ImmIdxsCovered.find_first_unset(); + if (FirstUncovered < 0) { + LLVM_DEBUG(dbgs() << ".. imm index coverage check SKIPPED:" + " user-defined predicate detected\n"); + return true; + } + const bool AllCovered = (FirstUncovered >= NumImmIdxs); + LLVM_DEBUG(dbgs() << ".. the first uncovered imm index: " << FirstUncovered + << ", " << (AllCovered ? "OK" : "FAIL") << "\n"); + return AllCovered; +#else + return true; +#endif +} + +LegalizerInfo::LegalizerInfo() : TablesInitialized(false) { + // Set defaults. + // FIXME: these two (G_ANYEXT and G_TRUNC?) can be legalized to the + // fundamental load/store Jakob proposed. Once loads & stores are supported. + setScalarAction(TargetOpcode::G_ANYEXT, 1, {{1, Legal}}); + setScalarAction(TargetOpcode::G_ZEXT, 1, {{1, Legal}}); + setScalarAction(TargetOpcode::G_SEXT, 1, {{1, Legal}}); + setScalarAction(TargetOpcode::G_TRUNC, 0, {{1, Legal}}); + setScalarAction(TargetOpcode::G_TRUNC, 1, {{1, Legal}}); + + setScalarAction(TargetOpcode::G_INTRINSIC, 0, {{1, Legal}}); + setScalarAction(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS, 0, {{1, Legal}}); + + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_IMPLICIT_DEF, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_ADD, 0, widenToLargerTypesAndNarrowToLargest); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_OR, 0, widenToLargerTypesAndNarrowToLargest); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_LOAD, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_STORE, 0, narrowToSmallerAndUnsupportedIfTooSmall); + + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_BRCOND, 0, widenToLargerTypesUnsupportedOtherwise); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_INSERT, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_EXTRACT, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_EXTRACT, 1, narrowToSmallerAndUnsupportedIfTooSmall); + setScalarAction(TargetOpcode::G_FNEG, 0, {{1, Lower}}); +} + +void LegalizerInfo::computeTables() { + assert(TablesInitialized == false); + + for (unsigned OpcodeIdx = 0; OpcodeIdx <= LastOp - FirstOp; ++OpcodeIdx) { + const unsigned Opcode = FirstOp + OpcodeIdx; + for (unsigned TypeIdx = 0; TypeIdx != SpecifiedActions[OpcodeIdx].size(); + ++TypeIdx) { + // 0. Collect information specified through the setAction API, i.e. + // for specific bit sizes. + // For scalar types: + SizeAndActionsVec ScalarSpecifiedActions; + // For pointer types: + std::map<uint16_t, SizeAndActionsVec> AddressSpace2SpecifiedActions; + // For vector types: + std::map<uint16_t, SizeAndActionsVec> ElemSize2SpecifiedActions; + for (auto LLT2Action : SpecifiedActions[OpcodeIdx][TypeIdx]) { + const LLT Type = LLT2Action.first; + const LegalizeAction Action = LLT2Action.second; + + auto SizeAction = std::make_pair(Type.getSizeInBits(), Action); + if (Type.isPointer()) + AddressSpace2SpecifiedActions[Type.getAddressSpace()].push_back( + SizeAction); + else if (Type.isVector()) + ElemSize2SpecifiedActions[Type.getElementType().getSizeInBits()] + .push_back(SizeAction); + else + ScalarSpecifiedActions.push_back(SizeAction); + } + + // 1. Handle scalar types + { + // Decide how to handle bit sizes for which no explicit specification + // was given. + SizeChangeStrategy S = &unsupportedForDifferentSizes; + if (TypeIdx < ScalarSizeChangeStrategies[OpcodeIdx].size() && + ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr) + S = ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx]; + llvm::sort(ScalarSpecifiedActions); + checkPartialSizeAndActionsVector(ScalarSpecifiedActions); + setScalarAction(Opcode, TypeIdx, S(ScalarSpecifiedActions)); + } + + // 2. Handle pointer types + for (auto PointerSpecifiedActions : AddressSpace2SpecifiedActions) { + llvm::sort(PointerSpecifiedActions.second); + checkPartialSizeAndActionsVector(PointerSpecifiedActions.second); + // For pointer types, we assume that there isn't a meaningfull way + // to change the number of bits used in the pointer. + setPointerAction( + Opcode, TypeIdx, PointerSpecifiedActions.first, + unsupportedForDifferentSizes(PointerSpecifiedActions.second)); + } + + // 3. Handle vector types + SizeAndActionsVec ElementSizesSeen; + for (auto VectorSpecifiedActions : ElemSize2SpecifiedActions) { + llvm::sort(VectorSpecifiedActions.second); + const uint16_t ElementSize = VectorSpecifiedActions.first; + ElementSizesSeen.push_back({ElementSize, Legal}); + checkPartialSizeAndActionsVector(VectorSpecifiedActions.second); + // For vector types, we assume that the best way to adapt the number + // of elements is to the next larger number of elements type for which + // the vector type is legal, unless there is no such type. In that case, + // legalize towards a vector type with a smaller number of elements. + SizeAndActionsVec NumElementsActions; + for (SizeAndAction BitsizeAndAction : VectorSpecifiedActions.second) { + assert(BitsizeAndAction.first % ElementSize == 0); + const uint16_t NumElements = BitsizeAndAction.first / ElementSize; + NumElementsActions.push_back({NumElements, BitsizeAndAction.second}); + } + setVectorNumElementAction( + Opcode, TypeIdx, ElementSize, + moreToWiderTypesAndLessToWidest(NumElementsActions)); + } + llvm::sort(ElementSizesSeen); + SizeChangeStrategy VectorElementSizeChangeStrategy = + &unsupportedForDifferentSizes; + if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() && + VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr) + VectorElementSizeChangeStrategy = + VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx]; + setScalarInVectorAction( + Opcode, TypeIdx, VectorElementSizeChangeStrategy(ElementSizesSeen)); + } + } + + TablesInitialized = true; +} + +// FIXME: inefficient implementation for now. Without ComputeValueVTs we're +// probably going to need specialized lookup structures for various types before +// we have any hope of doing well with something like <13 x i3>. Even the common +// cases should do better than what we have now. +std::pair<LegalizeAction, LLT> +LegalizerInfo::getAspectAction(const InstrAspect &Aspect) const { + assert(TablesInitialized && "backend forgot to call computeTables"); + // These *have* to be implemented for now, they're the fundamental basis of + // how everything else is transformed. + if (Aspect.Type.isScalar() || Aspect.Type.isPointer()) + return findScalarLegalAction(Aspect); + assert(Aspect.Type.isVector()); + return findVectorLegalAction(Aspect); +} + +/// Helper function to get LLT for the given type index. +static LLT getTypeFromTypeIdx(const MachineInstr &MI, + const MachineRegisterInfo &MRI, unsigned OpIdx, + unsigned TypeIdx) { + assert(TypeIdx < MI.getNumOperands() && "Unexpected TypeIdx"); + // G_UNMERGE_VALUES has variable number of operands, but there is only + // one source type and one destination type as all destinations must be the + // same type. So, get the last operand if TypeIdx == 1. + if (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && TypeIdx == 1) + return MRI.getType(MI.getOperand(MI.getNumOperands() - 1).getReg()); + return MRI.getType(MI.getOperand(OpIdx).getReg()); +} + +unsigned LegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const { + assert(Opcode >= FirstOp && Opcode <= LastOp && "Unsupported opcode"); + return Opcode - FirstOp; +} + +unsigned LegalizerInfo::getActionDefinitionsIdx(unsigned Opcode) const { + unsigned OpcodeIdx = getOpcodeIdxForOpcode(Opcode); + if (unsigned Alias = RulesForOpcode[OpcodeIdx].getAlias()) { + LLVM_DEBUG(dbgs() << ".. opcode " << Opcode << " is aliased to " << Alias + << "\n"); + OpcodeIdx = getOpcodeIdxForOpcode(Alias); + assert(RulesForOpcode[OpcodeIdx].getAlias() == 0 && "Cannot chain aliases"); + } + + return OpcodeIdx; +} + +const LegalizeRuleSet & +LegalizerInfo::getActionDefinitions(unsigned Opcode) const { + unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); + return RulesForOpcode[OpcodeIdx]; +} + +LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder(unsigned Opcode) { + unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); + auto &Result = RulesForOpcode[OpcodeIdx]; + assert(!Result.isAliasedByAnother() && "Modifying this opcode will modify aliases"); + return Result; +} + +LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder( + std::initializer_list<unsigned> Opcodes) { + unsigned Representative = *Opcodes.begin(); + + assert(!llvm::empty(Opcodes) && Opcodes.begin() + 1 != Opcodes.end() && + "Initializer list must have at least two opcodes"); + + for (auto I = Opcodes.begin() + 1, E = Opcodes.end(); I != E; ++I) + aliasActionDefinitions(Representative, *I); + + auto &Return = getActionDefinitionsBuilder(Representative); + Return.setIsAliasedByAnother(); + return Return; +} + +void LegalizerInfo::aliasActionDefinitions(unsigned OpcodeTo, + unsigned OpcodeFrom) { + assert(OpcodeTo != OpcodeFrom && "Cannot alias to self"); + assert(OpcodeTo >= FirstOp && OpcodeTo <= LastOp && "Unsupported opcode"); + const unsigned OpcodeFromIdx = getOpcodeIdxForOpcode(OpcodeFrom); + RulesForOpcode[OpcodeFromIdx].aliasTo(OpcodeTo); +} + +LegalizeActionStep +LegalizerInfo::getAction(const LegalityQuery &Query) const { + LegalizeActionStep Step = getActionDefinitions(Query.Opcode).apply(Query); + if (Step.Action != LegalizeAction::UseLegacyRules) { + return Step; + } + + for (unsigned i = 0; i < Query.Types.size(); ++i) { + auto Action = getAspectAction({Query.Opcode, i, Query.Types[i]}); + if (Action.first != Legal) { + LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Action=" + << Action.first << ", " << Action.second << "\n"); + return {Action.first, i, Action.second}; + } else + LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Legal\n"); + } + LLVM_DEBUG(dbgs() << ".. (legacy) Legal\n"); + return {Legal, 0, LLT{}}; +} + +LegalizeActionStep +LegalizerInfo::getAction(const MachineInstr &MI, + const MachineRegisterInfo &MRI) const { + SmallVector<LLT, 2> Types; + SmallBitVector SeenTypes(8); + const MCOperandInfo *OpInfo = MI.getDesc().OpInfo; + // FIXME: probably we'll need to cache the results here somehow? + for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) { + if (!OpInfo[i].isGenericType()) + continue; + + // We must only record actions once for each TypeIdx; otherwise we'd + // try to legalize operands multiple times down the line. + unsigned TypeIdx = OpInfo[i].getGenericTypeIndex(); + if (SeenTypes[TypeIdx]) + continue; + + SeenTypes.set(TypeIdx); + + LLT Ty = getTypeFromTypeIdx(MI, MRI, i, TypeIdx); + Types.push_back(Ty); + } + + SmallVector<LegalityQuery::MemDesc, 2> MemDescrs; + for (const auto &MMO : MI.memoperands()) + MemDescrs.push_back({8 * MMO->getSize() /* in bits */, + 8 * MMO->getAlignment(), + MMO->getOrdering()}); + + return getAction({MI.getOpcode(), Types, MemDescrs}); +} + +bool LegalizerInfo::isLegal(const MachineInstr &MI, + const MachineRegisterInfo &MRI) const { + return getAction(MI, MRI).Action == Legal; +} + +bool LegalizerInfo::isLegalOrCustom(const MachineInstr &MI, + const MachineRegisterInfo &MRI) const { + auto Action = getAction(MI, MRI).Action; + // If the action is custom, it may not necessarily modify the instruction, + // so we have to assume it's legal. + return Action == Legal || Action == Custom; +} + +bool LegalizerInfo::legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const { + return false; +} + +LegalizerInfo::SizeAndActionsVec +LegalizerInfo::increaseToLargerTypesAndDecreaseToLargest( + const SizeAndActionsVec &v, LegalizeAction IncreaseAction, + LegalizeAction DecreaseAction) { + SizeAndActionsVec result; + unsigned LargestSizeSoFar = 0; + if (v.size() >= 1 && v[0].first != 1) + result.push_back({1, IncreaseAction}); + for (size_t i = 0; i < v.size(); ++i) { + result.push_back(v[i]); + LargestSizeSoFar = v[i].first; + if (i + 1 < v.size() && v[i + 1].first != v[i].first + 1) { + result.push_back({LargestSizeSoFar + 1, IncreaseAction}); + LargestSizeSoFar = v[i].first + 1; + } + } + result.push_back({LargestSizeSoFar + 1, DecreaseAction}); + return result; +} + +LegalizerInfo::SizeAndActionsVec +LegalizerInfo::decreaseToSmallerTypesAndIncreaseToSmallest( + const SizeAndActionsVec &v, LegalizeAction DecreaseAction, + LegalizeAction IncreaseAction) { + SizeAndActionsVec result; + if (v.size() == 0 || v[0].first != 1) + result.push_back({1, IncreaseAction}); + for (size_t i = 0; i < v.size(); ++i) { + result.push_back(v[i]); + if (i + 1 == v.size() || v[i + 1].first != v[i].first + 1) { + result.push_back({v[i].first + 1, DecreaseAction}); + } + } + return result; +} + +LegalizerInfo::SizeAndAction +LegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Size) { + assert(Size >= 1); + // Find the last element in Vec that has a bitsize equal to or smaller than + // the requested bit size. + // That is the element just before the first element that is bigger than Size. + auto It = partition_point( + Vec, [=](const SizeAndAction &A) { return A.first <= Size; }); + assert(It != Vec.begin() && "Does Vec not start with size 1?"); + int VecIdx = It - Vec.begin() - 1; + + LegalizeAction Action = Vec[VecIdx].second; + switch (Action) { + case Legal: + case Lower: + case Libcall: + case Custom: + return {Size, Action}; + case FewerElements: + // FIXME: is this special case still needed and correct? + // Special case for scalarization: + if (Vec == SizeAndActionsVec({{1, FewerElements}})) + return {1, FewerElements}; + LLVM_FALLTHROUGH; + case NarrowScalar: { + // The following needs to be a loop, as for now, we do allow needing to + // go over "Unsupported" bit sizes before finding a legalizable bit size. + // e.g. (s8, WidenScalar), (s9, Unsupported), (s32, Legal). if Size==8, + // we need to iterate over s9, and then to s32 to return (s32, Legal). + // If we want to get rid of the below loop, we should have stronger asserts + // when building the SizeAndActionsVecs, probably not allowing + // "Unsupported" unless at the ends of the vector. + for (int i = VecIdx - 1; i >= 0; --i) + if (!needsLegalizingToDifferentSize(Vec[i].second) && + Vec[i].second != Unsupported) + return {Vec[i].first, Action}; + llvm_unreachable(""); + } + case WidenScalar: + case MoreElements: { + // See above, the following needs to be a loop, at least for now. + for (std::size_t i = VecIdx + 1; i < Vec.size(); ++i) + if (!needsLegalizingToDifferentSize(Vec[i].second) && + Vec[i].second != Unsupported) + return {Vec[i].first, Action}; + llvm_unreachable(""); + } + case Unsupported: + return {Size, Unsupported}; + case NotFound: + case UseLegacyRules: + llvm_unreachable("NotFound"); + } + llvm_unreachable("Action has an unknown enum value"); +} + +std::pair<LegalizeAction, LLT> +LegalizerInfo::findScalarLegalAction(const InstrAspect &Aspect) const { + assert(Aspect.Type.isScalar() || Aspect.Type.isPointer()); + if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) + return {NotFound, LLT()}; + const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode); + if (Aspect.Type.isPointer() && + AddrSpace2PointerActions[OpcodeIdx].find(Aspect.Type.getAddressSpace()) == + AddrSpace2PointerActions[OpcodeIdx].end()) { + return {NotFound, LLT()}; + } + const SmallVector<SizeAndActionsVec, 1> &Actions = + Aspect.Type.isPointer() + ? AddrSpace2PointerActions[OpcodeIdx] + .find(Aspect.Type.getAddressSpace()) + ->second + : ScalarActions[OpcodeIdx]; + if (Aspect.Idx >= Actions.size()) + return {NotFound, LLT()}; + const SizeAndActionsVec &Vec = Actions[Aspect.Idx]; + // FIXME: speed up this search, e.g. by using a results cache for repeated + // queries? + auto SizeAndAction = findAction(Vec, Aspect.Type.getSizeInBits()); + return {SizeAndAction.second, + Aspect.Type.isScalar() ? LLT::scalar(SizeAndAction.first) + : LLT::pointer(Aspect.Type.getAddressSpace(), + SizeAndAction.first)}; +} + +std::pair<LegalizeAction, LLT> +LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const { + assert(Aspect.Type.isVector()); + // First legalize the vector element size, then legalize the number of + // lanes in the vector. + if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) + return {NotFound, Aspect.Type}; + const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode); + const unsigned TypeIdx = Aspect.Idx; + if (TypeIdx >= ScalarInVectorActions[OpcodeIdx].size()) + return {NotFound, Aspect.Type}; + const SizeAndActionsVec &ElemSizeVec = + ScalarInVectorActions[OpcodeIdx][TypeIdx]; + + LLT IntermediateType; + auto ElementSizeAndAction = + findAction(ElemSizeVec, Aspect.Type.getScalarSizeInBits()); + IntermediateType = + LLT::vector(Aspect.Type.getNumElements(), ElementSizeAndAction.first); + if (ElementSizeAndAction.second != Legal) + return {ElementSizeAndAction.second, IntermediateType}; + + auto i = NumElements2Actions[OpcodeIdx].find( + IntermediateType.getScalarSizeInBits()); + if (i == NumElements2Actions[OpcodeIdx].end()) { + return {NotFound, IntermediateType}; + } + const SizeAndActionsVec &NumElementsVec = (*i).second[TypeIdx]; + auto NumElementsAndAction = + findAction(NumElementsVec, IntermediateType.getNumElements()); + return {NumElementsAndAction.second, + LLT::vector(NumElementsAndAction.first, + IntermediateType.getScalarSizeInBits())}; +} + +bool LegalizerInfo::legalizeIntrinsic(MachineInstr &MI, + MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const { + return true; +} + +/// \pre Type indices of every opcode form a dense set starting from 0. +void LegalizerInfo::verify(const MCInstrInfo &MII) const { +#ifndef NDEBUG + std::vector<unsigned> FailedOpcodes; + for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) { + const MCInstrDesc &MCID = MII.get(Opcode); + const unsigned NumTypeIdxs = std::accumulate( + MCID.opInfo_begin(), MCID.opInfo_end(), 0U, + [](unsigned Acc, const MCOperandInfo &OpInfo) { + return OpInfo.isGenericType() + ? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc) + : Acc; + }); + const unsigned NumImmIdxs = std::accumulate( + MCID.opInfo_begin(), MCID.opInfo_end(), 0U, + [](unsigned Acc, const MCOperandInfo &OpInfo) { + return OpInfo.isGenericImm() + ? std::max(OpInfo.getGenericImmIndex() + 1U, Acc) + : Acc; + }); + LLVM_DEBUG(dbgs() << MII.getName(Opcode) << " (opcode " << Opcode + << "): " << NumTypeIdxs << " type ind" + << (NumTypeIdxs == 1 ? "ex" : "ices") << ", " + << NumImmIdxs << " imm ind" + << (NumImmIdxs == 1 ? "ex" : "ices") << "\n"); + const LegalizeRuleSet &RuleSet = getActionDefinitions(Opcode); + if (!RuleSet.verifyTypeIdxsCoverage(NumTypeIdxs)) + FailedOpcodes.push_back(Opcode); + else if (!RuleSet.verifyImmIdxsCoverage(NumImmIdxs)) + FailedOpcodes.push_back(Opcode); + } + if (!FailedOpcodes.empty()) { + errs() << "The following opcodes have ill-defined legalization rules:"; + for (unsigned Opcode : FailedOpcodes) + errs() << " " << MII.getName(Opcode); + errs() << "\n"; + + report_fatal_error("ill-defined LegalizerInfo" + ", try -debug-only=legalizer-info for details"); + } +#endif +} + +#ifndef NDEBUG +// FIXME: This should be in the MachineVerifier, but it can't use the +// LegalizerInfo as it's currently in the separate GlobalISel library. +// Note that RegBankSelected property already checked in the verifier +// has the same layering problem, but we only use inline methods so +// end up not needing to link against the GlobalISel library. +const MachineInstr *llvm::machineFunctionIsIllegal(const MachineFunction &MF) { + if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo()) { + const MachineRegisterInfo &MRI = MF.getRegInfo(); + for (const MachineBasicBlock &MBB : MF) + for (const MachineInstr &MI : MBB) + if (isPreISelGenericOpcode(MI.getOpcode()) && + !MLI->isLegalOrCustom(MI, MRI)) + return &MI; + } + return nullptr; +} +#endif diff --git a/llvm/lib/CodeGen/GlobalISel/Localizer.cpp b/llvm/lib/CodeGen/GlobalISel/Localizer.cpp new file mode 100644 index 000000000000..f882ecbf5db3 --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/Localizer.cpp @@ -0,0 +1,225 @@ +//===- Localizer.cpp ---------------------- Localize some instrs -*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the Localizer class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Localizer.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "localizer" + +using namespace llvm; + +char Localizer::ID = 0; +INITIALIZE_PASS_BEGIN(Localizer, DEBUG_TYPE, + "Move/duplicate certain instructions close to their use", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) +INITIALIZE_PASS_END(Localizer, DEBUG_TYPE, + "Move/duplicate certain instructions close to their use", + false, false) + +Localizer::Localizer() : MachineFunctionPass(ID) { } + +void Localizer::init(MachineFunction &MF) { + MRI = &MF.getRegInfo(); + TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(MF.getFunction()); +} + +bool Localizer::shouldLocalize(const MachineInstr &MI) { + // Assuming a spill and reload of a value has a cost of 1 instruction each, + // this helper function computes the maximum number of uses we should consider + // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We + // break even in terms of code size when the original MI has 2 users vs + // choosing to potentially spill. Any more than 2 users we we have a net code + // size increase. This doesn't take into account register pressure though. + auto maxUses = [](unsigned RematCost) { + // A cost of 1 means remats are basically free. + if (RematCost == 1) + return UINT_MAX; + if (RematCost == 2) + return 2U; + + // Remat is too expensive, only sink if there's one user. + if (RematCost > 2) + return 1U; + llvm_unreachable("Unexpected remat cost"); + }; + + // Helper to walk through uses and terminate if we've reached a limit. Saves + // us spending time traversing uses if all we want to know is if it's >= min. + auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) { + unsigned NumUses = 0; + auto UI = MRI->use_instr_nodbg_begin(Reg), UE = MRI->use_instr_nodbg_end(); + for (; UI != UE && NumUses < MaxUses; ++UI) { + NumUses++; + } + // If we haven't reached the end yet then there are more than MaxUses users. + return UI == UE; + }; + + switch (MI.getOpcode()) { + default: + return false; + // Constants-like instructions should be close to their users. + // We don't want long live-ranges for them. + case TargetOpcode::G_CONSTANT: + case TargetOpcode::G_FCONSTANT: + case TargetOpcode::G_FRAME_INDEX: + case TargetOpcode::G_INTTOPTR: + return true; + case TargetOpcode::G_GLOBAL_VALUE: { + unsigned RematCost = TTI->getGISelRematGlobalCost(); + Register Reg = MI.getOperand(0).getReg(); + unsigned MaxUses = maxUses(RematCost); + if (MaxUses == UINT_MAX) + return true; // Remats are "free" so always localize. + bool B = isUsesAtMost(Reg, MaxUses); + return B; + } + } +} + +void Localizer::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetTransformInfoWrapperPass>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool Localizer::isLocalUse(MachineOperand &MOUse, const MachineInstr &Def, + MachineBasicBlock *&InsertMBB) { + MachineInstr &MIUse = *MOUse.getParent(); + InsertMBB = MIUse.getParent(); + if (MIUse.isPHI()) + InsertMBB = MIUse.getOperand(MIUse.getOperandNo(&MOUse) + 1).getMBB(); + return InsertMBB == Def.getParent(); +} + +bool Localizer::localizeInterBlock(MachineFunction &MF, + LocalizedSetVecT &LocalizedInstrs) { + bool Changed = false; + DenseMap<std::pair<MachineBasicBlock *, unsigned>, unsigned> MBBWithLocalDef; + + // Since the IRTranslator only emits constants into the entry block, and the + // rest of the GISel pipeline generally emits constants close to their users, + // we only localize instructions in the entry block here. This might change if + // we start doing CSE across blocks. + auto &MBB = MF.front(); + for (auto RI = MBB.rbegin(), RE = MBB.rend(); RI != RE; ++RI) { + MachineInstr &MI = *RI; + if (!shouldLocalize(MI)) + continue; + LLVM_DEBUG(dbgs() << "Should localize: " << MI); + assert(MI.getDesc().getNumDefs() == 1 && + "More than one definition not supported yet"); + Register Reg = MI.getOperand(0).getReg(); + // Check if all the users of MI are local. + // We are going to invalidation the list of use operands, so we + // can't use range iterator. + for (auto MOIt = MRI->use_begin(Reg), MOItEnd = MRI->use_end(); + MOIt != MOItEnd;) { + MachineOperand &MOUse = *MOIt++; + // Check if the use is already local. + MachineBasicBlock *InsertMBB; + LLVM_DEBUG(MachineInstr &MIUse = *MOUse.getParent(); + dbgs() << "Checking use: " << MIUse + << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n'); + if (isLocalUse(MOUse, MI, InsertMBB)) + continue; + LLVM_DEBUG(dbgs() << "Fixing non-local use\n"); + Changed = true; + auto MBBAndReg = std::make_pair(InsertMBB, Reg); + auto NewVRegIt = MBBWithLocalDef.find(MBBAndReg); + if (NewVRegIt == MBBWithLocalDef.end()) { + // Create the localized instruction. + MachineInstr *LocalizedMI = MF.CloneMachineInstr(&MI); + LocalizedInstrs.insert(LocalizedMI); + MachineInstr &UseMI = *MOUse.getParent(); + if (MRI->hasOneUse(Reg) && !UseMI.isPHI()) + InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(UseMI), LocalizedMI); + else + InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(InsertMBB->begin()), + LocalizedMI); + + // Set a new register for the definition. + Register NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg)); + MRI->setRegClassOrRegBank(NewReg, MRI->getRegClassOrRegBank(Reg)); + LocalizedMI->getOperand(0).setReg(NewReg); + NewVRegIt = + MBBWithLocalDef.insert(std::make_pair(MBBAndReg, NewReg)).first; + LLVM_DEBUG(dbgs() << "Inserted: " << *LocalizedMI); + } + LLVM_DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second) + << '\n'); + // Update the user reg. + MOUse.setReg(NewVRegIt->second); + } + } + return Changed; +} + +bool Localizer::localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs) { + bool Changed = false; + + // For each already-localized instruction which has multiple users, then we + // scan the block top down from the current position until we hit one of them. + + // FIXME: Consider doing inst duplication if live ranges are very long due to + // many users, but this case may be better served by regalloc improvements. + + for (MachineInstr *MI : LocalizedInstrs) { + Register Reg = MI->getOperand(0).getReg(); + MachineBasicBlock &MBB = *MI->getParent(); + // All of the user MIs of this reg. + SmallPtrSet<MachineInstr *, 32> Users; + for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { + if (!UseMI.isPHI()) + Users.insert(&UseMI); + } + // If all the users were PHIs then they're not going to be in our block, + // don't try to move this instruction. + if (Users.empty()) + continue; + + MachineBasicBlock::iterator II(MI); + ++II; + while (II != MBB.end() && !Users.count(&*II)) + ++II; + + LLVM_DEBUG(dbgs() << "Intra-block: moving " << *MI << " before " << *&*II + << "\n"); + assert(II != MBB.end() && "Didn't find the user in the MBB"); + MI->removeFromParent(); + MBB.insert(II, MI); + Changed = true; + } + return Changed; +} + +bool Localizer::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + LLVM_DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n'); + + init(MF); + + // Keep track of the instructions we localized. We'll do a second pass of + // intra-block localization to further reduce live ranges. + LocalizedSetVecT LocalizedInstrs; + + bool Changed = localizeInterBlock(MF, LocalizedInstrs); + Changed |= localizeIntraBlock(LocalizedInstrs); + return Changed; +} diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp new file mode 100644 index 000000000000..df770f6664ca --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -0,0 +1,1180 @@ +//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the MachineIRBuidler class. +//===----------------------------------------------------------------------===// +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" + +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/DebugInfo.h" + +using namespace llvm; + +void MachineIRBuilder::setMF(MachineFunction &MF) { + State.MF = &MF; + State.MBB = nullptr; + State.MRI = &MF.getRegInfo(); + State.TII = MF.getSubtarget().getInstrInfo(); + State.DL = DebugLoc(); + State.II = MachineBasicBlock::iterator(); + State.Observer = nullptr; +} + +void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) { + State.MBB = &MBB; + State.II = MBB.end(); + assert(&getMF() == MBB.getParent() && + "Basic block is in a different function"); +} + +void MachineIRBuilder::setInstr(MachineInstr &MI) { + assert(MI.getParent() && "Instruction is not part of a basic block"); + setMBB(*MI.getParent()); + State.II = MI.getIterator(); +} + +void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; } + +void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB, + MachineBasicBlock::iterator II) { + assert(MBB.getParent() == &getMF() && + "Basic block is in a different function"); + State.MBB = &MBB; + State.II = II; +} + +void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const { + if (State.Observer) + State.Observer->createdInstr(*InsertedInstr); +} + +void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) { + State.Observer = &Observer; +} + +void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; } + +//------------------------------------------------------------------------------ +// Build instruction variants. +//------------------------------------------------------------------------------ + +MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) { + return insertInstr(buildInstrNoInsert(Opcode)); +} + +MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { + MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { + getMBB().insert(getInsertPt(), MIB); + recordInsertion(MIB); + return MIB; +} + +MachineInstrBuilder +MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + return insertInstr(BuildMI(getMF(), getDL(), + getTII().get(TargetOpcode::DBG_VALUE), + /*IsIndirect*/ false, Reg, Variable, Expr)); +} + +MachineInstrBuilder +MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + // DBG_VALUE insts now carry IR-level indirection in their DIExpression + // rather than encoding it in the instruction itself. + const DIExpression *DIExpr = cast<DIExpression>(Expr); + DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref}); + return insertInstr(BuildMI(getMF(), getDL(), + getTII().get(TargetOpcode::DBG_VALUE), + /*IsIndirect*/ false, Reg, Variable, DIExpr)); +} + +MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, + const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + // DBG_VALUE insts now carry IR-level indirection in their DIExpression + // rather than encoding it in the instruction itself. + const DIExpression *DIExpr = cast<DIExpression>(Expr); + DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref}); + return buildInstr(TargetOpcode::DBG_VALUE) + .addFrameIndex(FI) + .addReg(0) + .addMetadata(Variable) + .addMetadata(DIExpr); +} + +MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, + const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + auto MIB = buildInstr(TargetOpcode::DBG_VALUE); + if (auto *CI = dyn_cast<ConstantInt>(&C)) { + if (CI->getBitWidth() > 64) + MIB.addCImm(CI); + else + MIB.addImm(CI->getZExtValue()); + } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { + MIB.addFPImm(CFP); + } else { + // Insert %noreg if we didn't find a usable constant and had to drop it. + MIB.addReg(0U); + } + + return MIB.addReg(0).addMetadata(Variable).addMetadata(Expr); +} + +MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { + assert(isa<DILabel>(Label) && "not a label"); + assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && + "Expected inlined-at fields to agree"); + auto MIB = buildInstr(TargetOpcode::DBG_LABEL); + + return MIB.addMetadata(Label); +} + +MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, + const SrcOp &Size, + unsigned Align) { + assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); + auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); + Res.addDefToMIB(*getMRI(), MIB); + Size.addSrcToMIB(MIB); + MIB.addImm(Align); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, + int Idx) { + assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); + Res.addDefToMIB(*getMRI(), MIB); + MIB.addFrameIndex(Idx); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, + const GlobalValue *GV) { + assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + assert(Res.getLLTTy(*getMRI()).getAddressSpace() == + GV->getType()->getAddressSpace() && + "address space mismatch"); + + auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); + Res.addDefToMIB(*getMRI(), MIB); + MIB.addGlobalAddress(GV); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, + unsigned JTI) { + return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) + .addJumpTableIndex(JTI); +} + +void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0, + const LLT &Op1) { + assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); + assert((Res == Op0 && Res == Op1) && "type mismatch"); +} + +void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0, + const LLT &Op1) { + assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); + assert((Res == Op0) && "type mismatch"); +} + +MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1) { + assert(Res.getLLTTy(*getMRI()).isPointer() && + Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); + assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type"); + + return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1}); +} + +Optional<MachineInstrBuilder> +MachineIRBuilder::materializeGEP(Register &Res, Register Op0, + const LLT &ValueTy, uint64_t Value) { + assert(Res == 0 && "Res is a result argument"); + assert(ValueTy.isScalar() && "invalid offset type"); + + if (Value == 0) { + Res = Op0; + return None; + } + + Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); + auto Cst = buildConstant(ValueTy, Value); + return buildGEP(Res, Op0, Cst.getReg(0)); +} + +MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res, + const SrcOp &Op0, + uint32_t NumBits) { + assert(Res.getLLTTy(*getMRI()).isPointer() && + Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); + + auto MIB = buildInstr(TargetOpcode::G_PTR_MASK); + Res.addDefToMIB(*getMRI(), MIB); + Op0.addSrcToMIB(MIB); + MIB.addImm(NumBits); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { + return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); +} + +MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { + assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); + return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); +} + +MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, + unsigned JTI, + Register IndexReg) { + assert(getMRI()->getType(TablePtr).isPointer() && + "Table reg must be a pointer"); + return buildInstr(TargetOpcode::G_BRJT) + .addUse(TablePtr) + .addJumpTableIndex(JTI) + .addUse(IndexReg); +} + +MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::COPY, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, + const ConstantInt &Val) { + LLT Ty = Res.getLLTTy(*getMRI()); + LLT EltTy = Ty.getScalarType(); + assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && + "creating constant with the wrong size"); + + if (Ty.isVector()) { + auto Const = buildInstr(TargetOpcode::G_CONSTANT) + .addDef(getMRI()->createGenericVirtualRegister(EltTy)) + .addCImm(&Val); + return buildSplatVector(Res, Const); + } + + auto Const = buildInstr(TargetOpcode::G_CONSTANT); + Res.addDefToMIB(*getMRI(), Const); + Const.addCImm(&Val); + return Const; +} + +MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, + int64_t Val) { + auto IntN = IntegerType::get(getMF().getFunction().getContext(), + Res.getLLTTy(*getMRI()).getScalarSizeInBits()); + ConstantInt *CI = ConstantInt::get(IntN, Val, true); + return buildConstant(Res, *CI); +} + +MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, + const ConstantFP &Val) { + LLT Ty = Res.getLLTTy(*getMRI()); + LLT EltTy = Ty.getScalarType(); + + assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) + == EltTy.getSizeInBits() && + "creating fconstant with the wrong size"); + + assert(!Ty.isPointer() && "invalid operand type"); + + if (Ty.isVector()) { + auto Const = buildInstr(TargetOpcode::G_FCONSTANT) + .addDef(getMRI()->createGenericVirtualRegister(EltTy)) + .addFPImm(&Val); + + return buildSplatVector(Res, Const); + } + + auto Const = buildInstr(TargetOpcode::G_FCONSTANT); + Res.addDefToMIB(*getMRI(), Const); + Const.addFPImm(&Val); + return Const; +} + +MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, + const APInt &Val) { + ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); + return buildConstant(Res, *CI); +} + +MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, + double Val) { + LLT DstTy = Res.getLLTTy(*getMRI()); + auto &Ctx = getMF().getFunction().getContext(); + auto *CFP = + ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); + return buildFConstant(Res, *CFP); +} + +MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, + const APFloat &Val) { + auto &Ctx = getMF().getFunction().getContext(); + auto *CFP = ConstantFP::get(Ctx, Val); + return buildFConstant(Res, *CFP); +} + +MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst, + MachineBasicBlock &Dest) { + assert(getMRI()->getType(Tst).isScalar() && "invalid operand type"); + + return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest); +} + +MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Res, + const SrcOp &Addr, + MachineMemOperand &MMO) { + return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO); +} + +MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, + const DstOp &Res, + const SrcOp &Addr, + MachineMemOperand &MMO) { + assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); + assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + + auto MIB = buildInstr(Opcode); + Res.addDefToMIB(*getMRI(), MIB); + Addr.addSrcToMIB(MIB); + MIB.addMemOperand(&MMO); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, + const SrcOp &Addr, + MachineMemOperand &MMO) { + assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); + assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + + auto MIB = buildInstr(TargetOpcode::G_STORE); + Val.addSrcToMIB(MIB); + Addr.addSrcToMIB(MIB); + MIB.addMemOperand(&MMO); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildUAddo(const DstOp &Res, + const DstOp &CarryOut, + const SrcOp &Op0, + const SrcOp &Op1) { + return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1}); +} + +MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res, + const DstOp &CarryOut, + const SrcOp &Op0, + const SrcOp &Op1, + const SrcOp &CarryIn) { + return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut}, + {Op0, Op1, CarryIn}); +} + +MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_SEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_ZEXT, Res, Op); +} + +unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { + const auto *TLI = getMF().getSubtarget().getTargetLowering(); + switch (TLI->getBooleanContents(IsVec, IsFP)) { + case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: + return TargetOpcode::G_SEXT; + case TargetLoweringBase::ZeroOrOneBooleanContent: + return TargetOpcode::G_ZEXT; + default: + return TargetOpcode::G_ANYEXT; + } +} + +MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, + const SrcOp &Op, + bool IsFP) { + unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); + return buildInstr(ExtOp, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, + const DstOp &Res, + const SrcOp &Op) { + assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || + TargetOpcode::G_SEXT == ExtOpc) && + "Expecting Extending Opc"); + assert(Res.getLLTTy(*getMRI()).isScalar() || + Res.getLLTTy(*getMRI()).isVector()); + assert(Res.getLLTTy(*getMRI()).isScalar() == + Op.getLLTTy(*getMRI()).isScalar()); + + unsigned Opcode = TargetOpcode::COPY; + if (Res.getLLTTy(*getMRI()).getSizeInBits() > + Op.getLLTTy(*getMRI()).getSizeInBits()) + Opcode = ExtOpc; + else if (Res.getLLTTy(*getMRI()).getSizeInBits() < + Op.getLLTTy(*getMRI()).getSizeInBits()) + Opcode = TargetOpcode::G_TRUNC; + else + assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); + + return buildInstr(Opcode, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, + const SrcOp &Src) { + LLT SrcTy = Src.getLLTTy(*getMRI()); + LLT DstTy = Dst.getLLTTy(*getMRI()); + if (SrcTy == DstTy) + return buildCopy(Dst, Src); + + unsigned Opcode; + if (SrcTy.isPointer() && DstTy.isScalar()) + Opcode = TargetOpcode::G_PTRTOINT; + else if (DstTy.isPointer() && SrcTy.isScalar()) + Opcode = TargetOpcode::G_INTTOPTR; + else { + assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); + Opcode = TargetOpcode::G_BITCAST; + } + + return buildInstr(Opcode, Dst, Src); +} + +MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, + const SrcOp &Src, + uint64_t Index) { + LLT SrcTy = Src.getLLTTy(*getMRI()); + LLT DstTy = Dst.getLLTTy(*getMRI()); + +#ifndef NDEBUG + assert(SrcTy.isValid() && "invalid operand type"); + assert(DstTy.isValid() && "invalid operand type"); + assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && + "extracting off end of register"); +#endif + + if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { + assert(Index == 0 && "insertion past the end of a register"); + return buildCast(Dst, Src); + } + + auto Extract = buildInstr(TargetOpcode::G_EXTRACT); + Dst.addDefToMIB(*getMRI(), Extract); + Src.addSrcToMIB(Extract); + Extract.addImm(Index); + return Extract; +} + +void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, + ArrayRef<uint64_t> Indices) { +#ifndef NDEBUG + assert(Ops.size() == Indices.size() && "incompatible args"); + assert(!Ops.empty() && "invalid trivial sequence"); + assert(std::is_sorted(Indices.begin(), Indices.end()) && + "sequence offsets must be in ascending order"); + + assert(getMRI()->getType(Res).isValid() && "invalid operand type"); + for (auto Op : Ops) + assert(getMRI()->getType(Op).isValid() && "invalid operand type"); +#endif + + LLT ResTy = getMRI()->getType(Res); + LLT OpTy = getMRI()->getType(Ops[0]); + unsigned OpSize = OpTy.getSizeInBits(); + bool MaybeMerge = true; + for (unsigned i = 0; i < Ops.size(); ++i) { + if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { + MaybeMerge = false; + break; + } + } + + if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { + buildMerge(Res, Ops); + return; + } + + Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); + buildUndef(ResIn); + + for (unsigned i = 0; i < Ops.size(); ++i) { + Register ResOut = i + 1 == Ops.size() + ? Res + : getMRI()->createGenericVirtualRegister(ResTy); + buildInsert(ResOut, ResIn, Ops[i], Indices[i]); + ResIn = ResOut; + } +} + +MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { + return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); +} + +MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, + ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + assert(TmpVec.size() > 1); + return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); +} + +MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, + const SrcOp &Op) { + // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); + assert(TmpVec.size() > 1); + return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, + const SrcOp &Op) { + unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); + SmallVector<Register, 8> TmpVec; + for (unsigned I = 0; I != NumReg; ++I) + TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); + return buildUnmerge(TmpVec, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, + const SrcOp &Op) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); + assert(TmpVec.size() > 1); + return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, + ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); +} + +MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, + const SrcOp &Src) { + SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); + return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); +} + +MachineInstrBuilder +MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, + ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); +} + +MachineInstrBuilder +MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); +} + +MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src, + Register Op, unsigned Index) { + assert(Index + getMRI()->getType(Op).getSizeInBits() <= + getMRI()->getType(Res).getSizeInBits() && + "insertion past the end of a register"); + + if (getMRI()->getType(Res).getSizeInBits() == + getMRI()->getType(Op).getSizeInBits()) { + return buildCast(Res, Op); + } + + return buildInstr(TargetOpcode::G_INSERT) + .addDef(Res) + .addUse(Src) + .addUse(Op) + .addImm(Index); +} + +MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, + ArrayRef<Register> ResultRegs, + bool HasSideEffects) { + auto MIB = + buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS + : TargetOpcode::G_INTRINSIC); + for (unsigned ResultReg : ResultRegs) + MIB.addDef(ResultReg); + MIB.addIntrinsicID(ID); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, + ArrayRef<DstOp> Results, + bool HasSideEffects) { + auto MIB = + buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS + : TargetOpcode::G_INTRINSIC); + for (DstOp Result : Results) + Result.addDefToMIB(*getMRI(), MIB); + MIB.addIntrinsicID(ID); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_TRUNC, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, + const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1) { + return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); +} + +MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, + const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1, + Optional<unsigned> Flags) { + + return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); +} + +MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, + const SrcOp &Tst, + const SrcOp &Op0, + const SrcOp &Op1, + Optional<unsigned> Flags) { + + return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); +} + +MachineInstrBuilder +MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, + const SrcOp &Elt, const SrcOp &Idx) { + return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); +} + +MachineInstrBuilder +MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, + const SrcOp &Idx) { + return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); +} + +MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( + Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, + Register NewVal, MachineMemOperand &MMO) { +#ifndef NDEBUG + LLT OldValResTy = getMRI()->getType(OldValRes); + LLT SuccessResTy = getMRI()->getType(SuccessRes); + LLT AddrTy = getMRI()->getType(Addr); + LLT CmpValTy = getMRI()->getType(CmpVal); + LLT NewValTy = getMRI()->getType(NewVal); + assert(OldValResTy.isScalar() && "invalid operand type"); + assert(SuccessResTy.isScalar() && "invalid operand type"); + assert(AddrTy.isPointer() && "invalid operand type"); + assert(CmpValTy.isValid() && "invalid operand type"); + assert(NewValTy.isValid() && "invalid operand type"); + assert(OldValResTy == CmpValTy && "type mismatch"); + assert(OldValResTy == NewValTy && "type mismatch"); +#endif + + return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) + .addDef(OldValRes) + .addDef(SuccessRes) + .addUse(Addr) + .addUse(CmpVal) + .addUse(NewVal) + .addMemOperand(&MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, + Register CmpVal, Register NewVal, + MachineMemOperand &MMO) { +#ifndef NDEBUG + LLT OldValResTy = getMRI()->getType(OldValRes); + LLT AddrTy = getMRI()->getType(Addr); + LLT CmpValTy = getMRI()->getType(CmpVal); + LLT NewValTy = getMRI()->getType(NewVal); + assert(OldValResTy.isScalar() && "invalid operand type"); + assert(AddrTy.isPointer() && "invalid operand type"); + assert(CmpValTy.isValid() && "invalid operand type"); + assert(NewValTy.isValid() && "invalid operand type"); + assert(OldValResTy == CmpValTy && "type mismatch"); + assert(OldValResTy == NewValTy && "type mismatch"); +#endif + + return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) + .addDef(OldValRes) + .addUse(Addr) + .addUse(CmpVal) + .addUse(NewVal) + .addMemOperand(&MMO); +} + +MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( + unsigned Opcode, const DstOp &OldValRes, + const SrcOp &Addr, const SrcOp &Val, + MachineMemOperand &MMO) { + +#ifndef NDEBUG + LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); + LLT AddrTy = Addr.getLLTTy(*getMRI()); + LLT ValTy = Val.getLLTTy(*getMRI()); + assert(OldValResTy.isScalar() && "invalid operand type"); + assert(AddrTy.isPointer() && "invalid operand type"); + assert(ValTy.isValid() && "invalid operand type"); + assert(OldValResTy == ValTy && "type mismatch"); + assert(MMO.isAtomic() && "not atomic mem operand"); +#endif + + auto MIB = buildInstr(Opcode); + OldValRes.addDefToMIB(*getMRI(), MIB); + Addr.addSrcToMIB(MIB); + Val.addSrcToMIB(MIB); + MIB.addMemOperand(&MMO); + return MIB; +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, + Register Addr, + Register Val, + MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, + MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWFAdd( + const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, + MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, + MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, + MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, + MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { + return buildInstr(TargetOpcode::G_FENCE) + .addImm(Ordering) + .addImm(Scope); +} + +MachineInstrBuilder +MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { +#ifndef NDEBUG + assert(getMRI()->getType(Res).isPointer() && "invalid res type"); +#endif + + return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); +} + +void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy, + bool IsExtend) { +#ifndef NDEBUG + if (DstTy.isVector()) { + assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); + assert(SrcTy.getNumElements() == DstTy.getNumElements() && + "different number of elements in a trunc/ext"); + } else + assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); + + if (IsExtend) + assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && + "invalid narrowing extend"); + else + assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && + "invalid widening trunc"); +#endif +} + +void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy, + const LLT &Op0Ty, const LLT &Op1Ty) { +#ifndef NDEBUG + assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && + "invalid operand type"); + assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); + if (ResTy.isScalar() || ResTy.isPointer()) + assert(TstTy.isScalar() && "type mismatch"); + else + assert((TstTy.isScalar() || + (TstTy.isVector() && + TstTy.getNumElements() == Op0Ty.getNumElements())) && + "type mismatch"); +#endif +} + +MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, + ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + Optional<unsigned> Flags) { + switch (Opc) { + default: + break; + case TargetOpcode::G_SELECT: { + assert(DstOps.size() == 1 && "Invalid select"); + assert(SrcOps.size() == 3 && "Invalid select"); + validateSelectOp( + DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), + SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); + break; + } + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SUB: + case TargetOpcode::G_XOR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_SREM: + case TargetOpcode::G_SMIN: + case TargetOpcode::G_SMAX: + case TargetOpcode::G_UMIN: + case TargetOpcode::G_UMAX: { + // All these are binary ops. + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 2 && "Invalid Srcs"); + validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), + SrcOps[1].getLLTTy(*getMRI())); + break; + } + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: { + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 2 && "Invalid Srcs"); + validateShiftOp(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), + SrcOps[1].getLLTTy(*getMRI())); + break; + } + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_ANYEXT: + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 1 && "Invalid Srcs"); + validateTruncExt(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), true); + break; + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_FPTRUNC: { + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 1 && "Invalid Srcs"); + validateTruncExt(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), false); + break; + } + case TargetOpcode::COPY: + assert(DstOps.size() == 1 && "Invalid Dst"); + // If the caller wants to add a subreg source it has to be done separately + // so we may not have any SrcOps at this point yet. + break; + case TargetOpcode::G_FCMP: + case TargetOpcode::G_ICMP: { + assert(DstOps.size() == 1 && "Invalid Dst Operands"); + assert(SrcOps.size() == 3 && "Invalid Src Operands"); + // For F/ICMP, the first src operand is the predicate, followed by + // the two comparands. + assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && + "Expecting predicate"); + assert([&]() -> bool { + CmpInst::Predicate Pred = SrcOps[0].getPredicate(); + return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) + : CmpInst::isFPPredicate(Pred); + }() && "Invalid predicate"); + assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && + "Type mismatch"); + assert([&]() -> bool { + LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); + LLT DstTy = DstOps[0].getLLTTy(*getMRI()); + if (Op0Ty.isScalar() || Op0Ty.isPointer()) + return DstTy.isScalar(); + else + return DstTy.isVector() && + DstTy.getNumElements() == Op0Ty.getNumElements(); + }() && "Type Mismatch"); + break; + } + case TargetOpcode::G_UNMERGE_VALUES: { + assert(!DstOps.empty() && "Invalid trivial sequence"); + assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); + assert(std::all_of(DstOps.begin(), DstOps.end(), + [&, this](const DstOp &Op) { + return Op.getLLTTy(*getMRI()) == + DstOps[0].getLLTTy(*getMRI()); + }) && + "type mismatch in output list"); + assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == + SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input operands do not cover output register"); + break; + } + case TargetOpcode::G_MERGE_VALUES: { + assert(!SrcOps.empty() && "invalid trivial sequence"); + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(std::all_of(SrcOps.begin(), SrcOps.end(), + [&, this](const SrcOp &Op) { + return Op.getLLTTy(*getMRI()) == + SrcOps[0].getLLTTy(*getMRI()); + }) && + "type mismatch in input list"); + assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input operands do not cover output register"); + if (SrcOps.size() == 1) + return buildCast(DstOps[0], SrcOps[0]); + if (DstOps[0].getLLTTy(*getMRI()).isVector()) { + if (SrcOps[0].getLLTTy(*getMRI()).isVector()) + return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); + return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); + } + break; + } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: { + assert(DstOps.size() == 1 && "Invalid Dst size"); + assert(SrcOps.size() == 2 && "Invalid Src size"); + assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); + assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || + DstOps[0].getLLTTy(*getMRI()).isPointer()) && + "Invalid operand type"); + assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); + assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == + DstOps[0].getLLTTy(*getMRI()) && + "Type mismatch"); + break; + } + case TargetOpcode::G_INSERT_VECTOR_ELT: { + assert(DstOps.size() == 1 && "Invalid dst size"); + assert(SrcOps.size() == 3 && "Invalid src size"); + assert(DstOps[0].getLLTTy(*getMRI()).isVector() && + SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); + assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == + SrcOps[1].getLLTTy(*getMRI()) && + "Type mismatch"); + assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); + assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == + SrcOps[0].getLLTTy(*getMRI()).getNumElements() && + "Type mismatch"); + break; + } + case TargetOpcode::G_BUILD_VECTOR: { + assert((!SrcOps.empty() || SrcOps.size() < 2) && + "Must have at least 2 operands"); + assert(DstOps.size() == 1 && "Invalid DstOps"); + assert(DstOps[0].getLLTTy(*getMRI()).isVector() && + "Res type must be a vector"); + assert(std::all_of(SrcOps.begin(), SrcOps.end(), + [&, this](const SrcOp &Op) { + return Op.getLLTTy(*getMRI()) == + SrcOps[0].getLLTTy(*getMRI()); + }) && + "type mismatch in input list"); + assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input scalars do not exactly cover the output vector register"); + break; + } + case TargetOpcode::G_BUILD_VECTOR_TRUNC: { + assert((!SrcOps.empty() || SrcOps.size() < 2) && + "Must have at least 2 operands"); + assert(DstOps.size() == 1 && "Invalid DstOps"); + assert(DstOps[0].getLLTTy(*getMRI()).isVector() && + "Res type must be a vector"); + assert(std::all_of(SrcOps.begin(), SrcOps.end(), + [&, this](const SrcOp &Op) { + return Op.getLLTTy(*getMRI()) == + SrcOps[0].getLLTTy(*getMRI()); + }) && + "type mismatch in input list"); + if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) + return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); + break; + } + case TargetOpcode::G_CONCAT_VECTORS: { + assert(DstOps.size() == 1 && "Invalid DstOps"); + assert((!SrcOps.empty() || SrcOps.size() < 2) && + "Must have at least 2 operands"); + assert(std::all_of(SrcOps.begin(), SrcOps.end(), + [&, this](const SrcOp &Op) { + return (Op.getLLTTy(*getMRI()).isVector() && + Op.getLLTTy(*getMRI()) == + SrcOps[0].getLLTTy(*getMRI())); + }) && + "type mismatch in input list"); + assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input vectors do not exactly cover the output vector register"); + break; + } + case TargetOpcode::G_UADDE: { + assert(DstOps.size() == 2 && "Invalid no of dst operands"); + assert(SrcOps.size() == 3 && "Invalid no of src operands"); + assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); + assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && + (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && + "Invalid operand"); + assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); + assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && + "type mismatch"); + break; + } + } + + auto MIB = buildInstr(Opc); + for (const DstOp &Op : DstOps) + Op.addDefToMIB(*getMRI(), MIB); + for (const SrcOp &Op : SrcOps) + Op.addSrcToMIB(MIB); + if (Flags) + MIB->setFlags(*Flags); + return MIB; +} diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp new file mode 100644 index 000000000000..f0e35c65c53b --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -0,0 +1,1073 @@ +//==- llvm/CodeGen/GlobalISel/RegBankSelect.cpp - RegBankSelect --*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the RegBankSelect class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/RegBankSelect.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/CodeGen/GlobalISel/RegisterBank.h" +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineBlockFrequencyInfo.h" +#include "llvm/CodeGen/MachineBranchProbabilityInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" +#include "llvm/Pass.h" +#include "llvm/Support/BlockFrequency.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <limits> +#include <memory> +#include <utility> + +#define DEBUG_TYPE "regbankselect" + +using namespace llvm; + +static cl::opt<RegBankSelect::Mode> RegBankSelectMode( + cl::desc("Mode of the RegBankSelect pass"), cl::Hidden, cl::Optional, + cl::values(clEnumValN(RegBankSelect::Mode::Fast, "regbankselect-fast", + "Run the Fast mode (default mapping)"), + clEnumValN(RegBankSelect::Mode::Greedy, "regbankselect-greedy", + "Use the Greedy mode (best local mapping)"))); + +char RegBankSelect::ID = 0; + +INITIALIZE_PASS_BEGIN(RegBankSelect, DEBUG_TYPE, + "Assign register bank of generic virtual registers", + false, false); +INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo) +INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, + "Assign register bank of generic virtual registers", false, + false) + +RegBankSelect::RegBankSelect(Mode RunningMode) + : MachineFunctionPass(ID), OptMode(RunningMode) { + if (RegBankSelectMode.getNumOccurrences() != 0) { + OptMode = RegBankSelectMode; + if (RegBankSelectMode != RunningMode) + LLVM_DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n"); + } +} + +void RegBankSelect::init(MachineFunction &MF) { + RBI = MF.getSubtarget().getRegBankInfo(); + assert(RBI && "Cannot work without RegisterBankInfo"); + MRI = &MF.getRegInfo(); + TRI = MF.getSubtarget().getRegisterInfo(); + TPC = &getAnalysis<TargetPassConfig>(); + if (OptMode != Mode::Fast) { + MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); + MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); + } else { + MBFI = nullptr; + MBPI = nullptr; + } + MIRBuilder.setMF(MF); + MORE = std::make_unique<MachineOptimizationRemarkEmitter>(MF, MBFI); +} + +void RegBankSelect::getAnalysisUsage(AnalysisUsage &AU) const { + if (OptMode != Mode::Fast) { + // We could preserve the information from these two analysis but + // the APIs do not allow to do so yet. + AU.addRequired<MachineBlockFrequencyInfo>(); + AU.addRequired<MachineBranchProbabilityInfo>(); + } + AU.addRequired<TargetPassConfig>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool RegBankSelect::assignmentMatch( + Register Reg, const RegisterBankInfo::ValueMapping &ValMapping, + bool &OnlyAssign) const { + // By default we assume we will have to repair something. + OnlyAssign = false; + // Each part of a break down needs to end up in a different register. + // In other word, Reg assignment does not match. + if (ValMapping.NumBreakDowns != 1) + return false; + + const RegisterBank *CurRegBank = RBI->getRegBank(Reg, *MRI, *TRI); + const RegisterBank *DesiredRegBrank = ValMapping.BreakDown[0].RegBank; + // Reg is free of assignment, a simple assignment will make the + // register bank to match. + OnlyAssign = CurRegBank == nullptr; + LLVM_DEBUG(dbgs() << "Does assignment already match: "; + if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none"; + dbgs() << " against "; + assert(DesiredRegBrank && "The mapping must be valid"); + dbgs() << *DesiredRegBrank << '\n';); + return CurRegBank == DesiredRegBrank; +} + +bool RegBankSelect::repairReg( + MachineOperand &MO, const RegisterBankInfo::ValueMapping &ValMapping, + RegBankSelect::RepairingPlacement &RepairPt, + const iterator_range<SmallVectorImpl<Register>::const_iterator> &NewVRegs) { + + assert(ValMapping.NumBreakDowns == (unsigned)size(NewVRegs) && + "need new vreg for each breakdown"); + + // An empty range of new register means no repairing. + assert(!NewVRegs.empty() && "We should not have to repair"); + + MachineInstr *MI; + if (ValMapping.NumBreakDowns == 1) { + // Assume we are repairing a use and thus, the original reg will be + // the source of the repairing. + Register Src = MO.getReg(); + Register Dst = *NewVRegs.begin(); + + // If we repair a definition, swap the source and destination for + // the repairing. + if (MO.isDef()) + std::swap(Src, Dst); + + assert((RepairPt.getNumInsertPoints() == 1 || + Register::isPhysicalRegister(Dst)) && + "We are about to create several defs for Dst"); + + // Build the instruction used to repair, then clone it at the right + // places. Avoiding buildCopy bypasses the check that Src and Dst have the + // same types because the type is a placeholder when this function is called. + MI = MIRBuilder.buildInstrNoInsert(TargetOpcode::COPY) + .addDef(Dst) + .addUse(Src); + LLVM_DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst) + << '\n'); + } else { + // TODO: Support with G_IMPLICIT_DEF + G_INSERT sequence or G_EXTRACT + // sequence. + assert(ValMapping.partsAllUniform() && "irregular breakdowns not supported"); + + LLT RegTy = MRI->getType(MO.getReg()); + if (MO.isDef()) { + unsigned MergeOp; + if (RegTy.isVector()) { + if (ValMapping.NumBreakDowns == RegTy.getNumElements()) + MergeOp = TargetOpcode::G_BUILD_VECTOR; + else { + assert( + (ValMapping.BreakDown[0].Length * ValMapping.NumBreakDowns == + RegTy.getSizeInBits()) && + (ValMapping.BreakDown[0].Length % RegTy.getScalarSizeInBits() == + 0) && + "don't understand this value breakdown"); + + MergeOp = TargetOpcode::G_CONCAT_VECTORS; + } + } else + MergeOp = TargetOpcode::G_MERGE_VALUES; + + auto MergeBuilder = + MIRBuilder.buildInstrNoInsert(MergeOp) + .addDef(MO.getReg()); + + for (Register SrcReg : NewVRegs) + MergeBuilder.addUse(SrcReg); + + MI = MergeBuilder; + } else { + MachineInstrBuilder UnMergeBuilder = + MIRBuilder.buildInstrNoInsert(TargetOpcode::G_UNMERGE_VALUES); + for (Register DefReg : NewVRegs) + UnMergeBuilder.addDef(DefReg); + + UnMergeBuilder.addUse(MO.getReg()); + MI = UnMergeBuilder; + } + } + + if (RepairPt.getNumInsertPoints() != 1) + report_fatal_error("need testcase to support multiple insertion points"); + + // TODO: + // Check if MI is legal. if not, we need to legalize all the + // instructions we are going to insert. + std::unique_ptr<MachineInstr *[]> NewInstrs( + new MachineInstr *[RepairPt.getNumInsertPoints()]); + bool IsFirst = true; + unsigned Idx = 0; + for (const std::unique_ptr<InsertPoint> &InsertPt : RepairPt) { + MachineInstr *CurMI; + if (IsFirst) + CurMI = MI; + else + CurMI = MIRBuilder.getMF().CloneMachineInstr(MI); + InsertPt->insert(*CurMI); + NewInstrs[Idx++] = CurMI; + IsFirst = false; + } + // TODO: + // Legalize NewInstrs if need be. + return true; +} + +uint64_t RegBankSelect::getRepairCost( + const MachineOperand &MO, + const RegisterBankInfo::ValueMapping &ValMapping) const { + assert(MO.isReg() && "We should only repair register operand"); + assert(ValMapping.NumBreakDowns && "Nothing to map??"); + + bool IsSameNumOfValues = ValMapping.NumBreakDowns == 1; + const RegisterBank *CurRegBank = RBI->getRegBank(MO.getReg(), *MRI, *TRI); + // If MO does not have a register bank, we should have just been + // able to set one unless we have to break the value down. + assert(CurRegBank || MO.isDef()); + + // Def: Val <- NewDefs + // Same number of values: copy + // Different number: Val = build_sequence Defs1, Defs2, ... + // Use: NewSources <- Val. + // Same number of values: copy. + // Different number: Src1, Src2, ... = + // extract_value Val, Src1Begin, Src1Len, Src2Begin, Src2Len, ... + // We should remember that this value is available somewhere else to + // coalesce the value. + + if (ValMapping.NumBreakDowns != 1) + return RBI->getBreakDownCost(ValMapping, CurRegBank); + + if (IsSameNumOfValues) { + const RegisterBank *DesiredRegBrank = ValMapping.BreakDown[0].RegBank; + // If we repair a definition, swap the source and destination for + // the repairing. + if (MO.isDef()) + std::swap(CurRegBank, DesiredRegBrank); + // TODO: It may be possible to actually avoid the copy. + // If we repair something where the source is defined by a copy + // and the source of that copy is on the right bank, we can reuse + // it for free. + // E.g., + // RegToRepair<BankA> = copy AlternativeSrc<BankB> + // = op RegToRepair<BankA> + // We can simply propagate AlternativeSrc instead of copying RegToRepair + // into a new virtual register. + // We would also need to propagate this information in the + // repairing placement. + unsigned Cost = RBI->copyCost(*DesiredRegBrank, *CurRegBank, + RBI->getSizeInBits(MO.getReg(), *MRI, *TRI)); + // TODO: use a dedicated constant for ImpossibleCost. + if (Cost != std::numeric_limits<unsigned>::max()) + return Cost; + // Return the legalization cost of that repairing. + } + return std::numeric_limits<unsigned>::max(); +} + +const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping( + MachineInstr &MI, RegisterBankInfo::InstructionMappings &PossibleMappings, + SmallVectorImpl<RepairingPlacement> &RepairPts) { + assert(!PossibleMappings.empty() && + "Do not know how to map this instruction"); + + const RegisterBankInfo::InstructionMapping *BestMapping = nullptr; + MappingCost Cost = MappingCost::ImpossibleCost(); + SmallVector<RepairingPlacement, 4> LocalRepairPts; + for (const RegisterBankInfo::InstructionMapping *CurMapping : + PossibleMappings) { + MappingCost CurCost = + computeMapping(MI, *CurMapping, LocalRepairPts, &Cost); + if (CurCost < Cost) { + LLVM_DEBUG(dbgs() << "New best: " << CurCost << '\n'); + Cost = CurCost; + BestMapping = CurMapping; + RepairPts.clear(); + for (RepairingPlacement &RepairPt : LocalRepairPts) + RepairPts.emplace_back(std::move(RepairPt)); + } + } + if (!BestMapping && !TPC->isGlobalISelAbortEnabled()) { + // If none of the mapping worked that means they are all impossible. + // Thus, pick the first one and set an impossible repairing point. + // It will trigger the failed isel mode. + BestMapping = *PossibleMappings.begin(); + RepairPts.emplace_back( + RepairingPlacement(MI, 0, *TRI, *this, RepairingPlacement::Impossible)); + } else + assert(BestMapping && "No suitable mapping for instruction"); + return *BestMapping; +} + +void RegBankSelect::tryAvoidingSplit( + RegBankSelect::RepairingPlacement &RepairPt, const MachineOperand &MO, + const RegisterBankInfo::ValueMapping &ValMapping) const { + const MachineInstr &MI = *MO.getParent(); + assert(RepairPt.hasSplit() && "We should not have to adjust for split"); + // Splitting should only occur for PHIs or between terminators, + // because we only do local repairing. + assert((MI.isPHI() || MI.isTerminator()) && "Why do we split?"); + + assert(&MI.getOperand(RepairPt.getOpIdx()) == &MO && + "Repairing placement does not match operand"); + + // If we need splitting for phis, that means it is because we + // could not find an insertion point before the terminators of + // the predecessor block for this argument. In other words, + // the input value is defined by one of the terminators. + assert((!MI.isPHI() || !MO.isDef()) && "Need split for phi def?"); + + // We split to repair the use of a phi or a terminator. + if (!MO.isDef()) { + if (MI.isTerminator()) { + assert(&MI != &(*MI.getParent()->getFirstTerminator()) && + "Need to split for the first terminator?!"); + } else { + // For the PHI case, the split may not be actually required. + // In the copy case, a phi is already a copy on the incoming edge, + // therefore there is no need to split. + if (ValMapping.NumBreakDowns == 1) + // This is a already a copy, there is nothing to do. + RepairPt.switchTo(RepairingPlacement::RepairingKind::Reassign); + } + return; + } + + // At this point, we need to repair a defintion of a terminator. + + // Technically we need to fix the def of MI on all outgoing + // edges of MI to keep the repairing local. In other words, we + // will create several definitions of the same register. This + // does not work for SSA unless that definition is a physical + // register. + // However, there are other cases where we can get away with + // that while still keeping the repairing local. + assert(MI.isTerminator() && MO.isDef() && + "This code is for the def of a terminator"); + + // Since we use RPO traversal, if we need to repair a definition + // this means this definition could be: + // 1. Used by PHIs (i.e., this VReg has been visited as part of the + // uses of a phi.), or + // 2. Part of a target specific instruction (i.e., the target applied + // some register class constraints when creating the instruction.) + // If the constraints come for #2, the target said that another mapping + // is supported so we may just drop them. Indeed, if we do not change + // the number of registers holding that value, the uses will get fixed + // when we get to them. + // Uses in PHIs may have already been proceeded though. + // If the constraints come for #1, then, those are weak constraints and + // no actual uses may rely on them. However, the problem remains mainly + // the same as for #2. If the value stays in one register, we could + // just switch the register bank of the definition, but we would need to + // account for a repairing cost for each phi we silently change. + // + // In any case, if the value needs to be broken down into several + // registers, the repairing is not local anymore as we need to patch + // every uses to rebuild the value in just one register. + // + // To summarize: + // - If the value is in a physical register, we can do the split and + // fix locally. + // Otherwise if the value is in a virtual register: + // - If the value remains in one register, we do not have to split + // just switching the register bank would do, but we need to account + // in the repairing cost all the phi we changed. + // - If the value spans several registers, then we cannot do a local + // repairing. + + // Check if this is a physical or virtual register. + Register Reg = MO.getReg(); + if (Register::isPhysicalRegister(Reg)) { + // We are going to split every outgoing edges. + // Check that this is possible. + // FIXME: The machine representation is currently broken + // since it also several terminators in one basic block. + // Because of that we would technically need a way to get + // the targets of just one terminator to know which edges + // we have to split. + // Assert that we do not hit the ill-formed representation. + + // If there are other terminators before that one, some of + // the outgoing edges may not be dominated by this definition. + assert(&MI == &(*MI.getParent()->getFirstTerminator()) && + "Do not know which outgoing edges are relevant"); + const MachineInstr *Next = MI.getNextNode(); + assert((!Next || Next->isUnconditionalBranch()) && + "Do not know where each terminator ends up"); + if (Next) + // If the next terminator uses Reg, this means we have + // to split right after MI and thus we need a way to ask + // which outgoing edges are affected. + assert(!Next->readsRegister(Reg) && "Need to split between terminators"); + // We will split all the edges and repair there. + } else { + // This is a virtual register defined by a terminator. + if (ValMapping.NumBreakDowns == 1) { + // There is nothing to repair, but we may actually lie on + // the repairing cost because of the PHIs already proceeded + // as already stated. + // Though the code will be correct. + assert(false && "Repairing cost may not be accurate"); + } else { + // We need to do non-local repairing. Basically, patch all + // the uses (i.e., phis) that we already proceeded. + // For now, just say this mapping is not possible. + RepairPt.switchTo(RepairingPlacement::RepairingKind::Impossible); + } + } +} + +RegBankSelect::MappingCost RegBankSelect::computeMapping( + MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping, + SmallVectorImpl<RepairingPlacement> &RepairPts, + const RegBankSelect::MappingCost *BestCost) { + assert((MBFI || !BestCost) && "Costs comparison require MBFI"); + + if (!InstrMapping.isValid()) + return MappingCost::ImpossibleCost(); + + // If mapped with InstrMapping, MI will have the recorded cost. + MappingCost Cost(MBFI ? MBFI->getBlockFreq(MI.getParent()) : 1); + bool Saturated = Cost.addLocalCost(InstrMapping.getCost()); + assert(!Saturated && "Possible mapping saturated the cost"); + LLVM_DEBUG(dbgs() << "Evaluating mapping cost for: " << MI); + LLVM_DEBUG(dbgs() << "With: " << InstrMapping << '\n'); + RepairPts.clear(); + if (BestCost && Cost > *BestCost) { + LLVM_DEBUG(dbgs() << "Mapping is too expensive from the start\n"); + return Cost; + } + + // Moreover, to realize this mapping, the register bank of each operand must + // match this mapping. In other words, we may need to locally reassign the + // register banks. Account for that repairing cost as well. + // In this context, local means in the surrounding of MI. + for (unsigned OpIdx = 0, EndOpIdx = InstrMapping.getNumOperands(); + OpIdx != EndOpIdx; ++OpIdx) { + const MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (!Reg) + continue; + LLVM_DEBUG(dbgs() << "Opd" << OpIdx << '\n'); + const RegisterBankInfo::ValueMapping &ValMapping = + InstrMapping.getOperandMapping(OpIdx); + // If Reg is already properly mapped, this is free. + bool Assign; + if (assignmentMatch(Reg, ValMapping, Assign)) { + LLVM_DEBUG(dbgs() << "=> is free (match).\n"); + continue; + } + if (Assign) { + LLVM_DEBUG(dbgs() << "=> is free (simple assignment).\n"); + RepairPts.emplace_back(RepairingPlacement(MI, OpIdx, *TRI, *this, + RepairingPlacement::Reassign)); + continue; + } + + // Find the insertion point for the repairing code. + RepairPts.emplace_back( + RepairingPlacement(MI, OpIdx, *TRI, *this, RepairingPlacement::Insert)); + RepairingPlacement &RepairPt = RepairPts.back(); + + // If we need to split a basic block to materialize this insertion point, + // we may give a higher cost to this mapping. + // Nevertheless, we may get away with the split, so try that first. + if (RepairPt.hasSplit()) + tryAvoidingSplit(RepairPt, MO, ValMapping); + + // Check that the materialization of the repairing is possible. + if (!RepairPt.canMaterialize()) { + LLVM_DEBUG(dbgs() << "Mapping involves impossible repairing\n"); + return MappingCost::ImpossibleCost(); + } + + // Account for the split cost and repair cost. + // Unless the cost is already saturated or we do not care about the cost. + if (!BestCost || Saturated) + continue; + + // To get accurate information we need MBFI and MBPI. + // Thus, if we end up here this information should be here. + assert(MBFI && MBPI && "Cost computation requires MBFI and MBPI"); + + // FIXME: We will have to rework the repairing cost model. + // The repairing cost depends on the register bank that MO has. + // However, when we break down the value into different values, + // MO may not have a register bank while still needing repairing. + // For the fast mode, we don't compute the cost so that is fine, + // but still for the repairing code, we will have to make a choice. + // For the greedy mode, we should choose greedily what is the best + // choice based on the next use of MO. + + // Sums up the repairing cost of MO at each insertion point. + uint64_t RepairCost = getRepairCost(MO, ValMapping); + + // This is an impossible to repair cost. + if (RepairCost == std::numeric_limits<unsigned>::max()) + return MappingCost::ImpossibleCost(); + + // Bias used for splitting: 5%. + const uint64_t PercentageForBias = 5; + uint64_t Bias = (RepairCost * PercentageForBias + 99) / 100; + // We should not need more than a couple of instructions to repair + // an assignment. In other words, the computation should not + // overflow because the repairing cost is free of basic block + // frequency. + assert(((RepairCost < RepairCost * PercentageForBias) && + (RepairCost * PercentageForBias < + RepairCost * PercentageForBias + 99)) && + "Repairing involves more than a billion of instructions?!"); + for (const std::unique_ptr<InsertPoint> &InsertPt : RepairPt) { + assert(InsertPt->canMaterialize() && "We should not have made it here"); + // We will applied some basic block frequency and those uses uint64_t. + if (!InsertPt->isSplit()) + Saturated = Cost.addLocalCost(RepairCost); + else { + uint64_t CostForInsertPt = RepairCost; + // Again we shouldn't overflow here givent that + // CostForInsertPt is frequency free at this point. + assert(CostForInsertPt + Bias > CostForInsertPt && + "Repairing + split bias overflows"); + CostForInsertPt += Bias; + uint64_t PtCost = InsertPt->frequency(*this) * CostForInsertPt; + // Check if we just overflowed. + if ((Saturated = PtCost < CostForInsertPt)) + Cost.saturate(); + else + Saturated = Cost.addNonLocalCost(PtCost); + } + + // Stop looking into what it takes to repair, this is already + // too expensive. + if (BestCost && Cost > *BestCost) { + LLVM_DEBUG(dbgs() << "Mapping is too expensive, stop processing\n"); + return Cost; + } + + // No need to accumulate more cost information. + // We need to still gather the repairing information though. + if (Saturated) + break; + } + } + LLVM_DEBUG(dbgs() << "Total cost is: " << Cost << "\n"); + return Cost; +} + +bool RegBankSelect::applyMapping( + MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping, + SmallVectorImpl<RegBankSelect::RepairingPlacement> &RepairPts) { + // OpdMapper will hold all the information needed for the rewriting. + RegisterBankInfo::OperandsMapper OpdMapper(MI, InstrMapping, *MRI); + + // First, place the repairing code. + for (RepairingPlacement &RepairPt : RepairPts) { + if (!RepairPt.canMaterialize() || + RepairPt.getKind() == RepairingPlacement::Impossible) + return false; + assert(RepairPt.getKind() != RepairingPlacement::None && + "This should not make its way in the list"); + unsigned OpIdx = RepairPt.getOpIdx(); + MachineOperand &MO = MI.getOperand(OpIdx); + const RegisterBankInfo::ValueMapping &ValMapping = + InstrMapping.getOperandMapping(OpIdx); + Register Reg = MO.getReg(); + + switch (RepairPt.getKind()) { + case RepairingPlacement::Reassign: + assert(ValMapping.NumBreakDowns == 1 && + "Reassignment should only be for simple mapping"); + MRI->setRegBank(Reg, *ValMapping.BreakDown[0].RegBank); + break; + case RepairingPlacement::Insert: + OpdMapper.createVRegs(OpIdx); + if (!repairReg(MO, ValMapping, RepairPt, OpdMapper.getVRegs(OpIdx))) + return false; + break; + default: + llvm_unreachable("Other kind should not happen"); + } + } + + // Second, rewrite the instruction. + LLVM_DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n'); + RBI->applyMapping(OpdMapper); + + return true; +} + +bool RegBankSelect::assignInstr(MachineInstr &MI) { + LLVM_DEBUG(dbgs() << "Assign: " << MI); + // Remember the repairing placement for all the operands. + SmallVector<RepairingPlacement, 4> RepairPts; + + const RegisterBankInfo::InstructionMapping *BestMapping; + if (OptMode == RegBankSelect::Mode::Fast) { + BestMapping = &RBI->getInstrMapping(MI); + MappingCost DefaultCost = computeMapping(MI, *BestMapping, RepairPts); + (void)DefaultCost; + if (DefaultCost == MappingCost::ImpossibleCost()) + return false; + } else { + RegisterBankInfo::InstructionMappings PossibleMappings = + RBI->getInstrPossibleMappings(MI); + if (PossibleMappings.empty()) + return false; + BestMapping = &findBestMapping(MI, PossibleMappings, RepairPts); + } + // Make sure the mapping is valid for MI. + assert(BestMapping->verify(MI) && "Invalid instruction mapping"); + + LLVM_DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n'); + + // After this call, MI may not be valid anymore. + // Do not use it. + return applyMapping(MI, *BestMapping, RepairPts); +} + +bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n'); + const Function &F = MF.getFunction(); + Mode SaveOptMode = OptMode; + if (F.hasOptNone()) + OptMode = Mode::Fast; + init(MF); + +#ifndef NDEBUG + // Check that our input is fully legal: we require the function to have the + // Legalized property, so it should be. + // FIXME: This should be in the MachineVerifier. + if (!DisableGISelLegalityCheck) + if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) { + reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect", + "instruction is not legal", *MI); + return false; + } +#endif + + // Walk the function and assign register banks to all operands. + // Use a RPOT to make sure all registers are assigned before we choose + // the best mapping of the current instruction. + ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); + for (MachineBasicBlock *MBB : RPOT) { + // Set a sensible insertion point so that subsequent calls to + // MIRBuilder. + MIRBuilder.setMBB(*MBB); + for (MachineBasicBlock::iterator MII = MBB->begin(), End = MBB->end(); + MII != End;) { + // MI might be invalidated by the assignment, so move the + // iterator before hand. + MachineInstr &MI = *MII++; + + // Ignore target-specific post-isel instructions: they should use proper + // regclasses. + if (isTargetSpecificOpcode(MI.getOpcode()) && !MI.isPreISelOpcode()) + continue; + + if (!assignInstr(MI)) { + reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect", + "unable to map instruction", MI); + return false; + } + + // It's possible the mapping changed control flow, and moved the following + // instruction to a new block, so figure out the new parent. + if (MII != End) { + MachineBasicBlock *NextInstBB = MII->getParent(); + if (NextInstBB != MBB) { + LLVM_DEBUG(dbgs() << "Instruction mapping changed control flow\n"); + MBB = NextInstBB; + MIRBuilder.setMBB(*MBB); + End = MBB->end(); + } + } + } + } + + OptMode = SaveOptMode; + return false; +} + +//------------------------------------------------------------------------------ +// Helper Classes Implementation +//------------------------------------------------------------------------------ +RegBankSelect::RepairingPlacement::RepairingPlacement( + MachineInstr &MI, unsigned OpIdx, const TargetRegisterInfo &TRI, Pass &P, + RepairingPlacement::RepairingKind Kind) + // Default is, we are going to insert code to repair OpIdx. + : Kind(Kind), OpIdx(OpIdx), + CanMaterialize(Kind != RepairingKind::Impossible), P(P) { + const MachineOperand &MO = MI.getOperand(OpIdx); + assert(MO.isReg() && "Trying to repair a non-reg operand"); + + if (Kind != RepairingKind::Insert) + return; + + // Repairings for definitions happen after MI, uses happen before. + bool Before = !MO.isDef(); + + // Check if we are done with MI. + if (!MI.isPHI() && !MI.isTerminator()) { + addInsertPoint(MI, Before); + // We are done with the initialization. + return; + } + + // Now, look for the special cases. + if (MI.isPHI()) { + // - PHI must be the first instructions: + // * Before, we have to split the related incoming edge. + // * After, move the insertion point past the last phi. + if (!Before) { + MachineBasicBlock::iterator It = MI.getParent()->getFirstNonPHI(); + if (It != MI.getParent()->end()) + addInsertPoint(*It, /*Before*/ true); + else + addInsertPoint(*(--It), /*Before*/ false); + return; + } + // We repair a use of a phi, we may need to split the related edge. + MachineBasicBlock &Pred = *MI.getOperand(OpIdx + 1).getMBB(); + // Check if we can move the insertion point prior to the + // terminators of the predecessor. + Register Reg = MO.getReg(); + MachineBasicBlock::iterator It = Pred.getLastNonDebugInstr(); + for (auto Begin = Pred.begin(); It != Begin && It->isTerminator(); --It) + if (It->modifiesRegister(Reg, &TRI)) { + // We cannot hoist the repairing code in the predecessor. + // Split the edge. + addInsertPoint(Pred, *MI.getParent()); + return; + } + // At this point, we can insert in Pred. + + // - If It is invalid, Pred is empty and we can insert in Pred + // wherever we want. + // - If It is valid, It is the first non-terminator, insert after It. + if (It == Pred.end()) + addInsertPoint(Pred, /*Beginning*/ false); + else + addInsertPoint(*It, /*Before*/ false); + } else { + // - Terminators must be the last instructions: + // * Before, move the insert point before the first terminator. + // * After, we have to split the outcoming edges. + if (Before) { + // Check whether Reg is defined by any terminator. + MachineBasicBlock::reverse_iterator It = MI; + auto REnd = MI.getParent()->rend(); + + for (; It != REnd && It->isTerminator(); ++It) { + assert(!It->modifiesRegister(MO.getReg(), &TRI) && + "copy insertion in middle of terminators not handled"); + } + + if (It == REnd) { + addInsertPoint(*MI.getParent()->begin(), true); + return; + } + + // We are sure to be right before the first terminator. + addInsertPoint(*It, /*Before*/ false); + return; + } + // Make sure Reg is not redefined by other terminators, otherwise + // we do not know how to split. + for (MachineBasicBlock::iterator It = MI, End = MI.getParent()->end(); + ++It != End;) + // The machine verifier should reject this kind of code. + assert(It->modifiesRegister(MO.getReg(), &TRI) && + "Do not know where to split"); + // Split each outcoming edges. + MachineBasicBlock &Src = *MI.getParent(); + for (auto &Succ : Src.successors()) + addInsertPoint(Src, Succ); + } +} + +void RegBankSelect::RepairingPlacement::addInsertPoint(MachineInstr &MI, + bool Before) { + addInsertPoint(*new InstrInsertPoint(MI, Before)); +} + +void RegBankSelect::RepairingPlacement::addInsertPoint(MachineBasicBlock &MBB, + bool Beginning) { + addInsertPoint(*new MBBInsertPoint(MBB, Beginning)); +} + +void RegBankSelect::RepairingPlacement::addInsertPoint(MachineBasicBlock &Src, + MachineBasicBlock &Dst) { + addInsertPoint(*new EdgeInsertPoint(Src, Dst, P)); +} + +void RegBankSelect::RepairingPlacement::addInsertPoint( + RegBankSelect::InsertPoint &Point) { + CanMaterialize &= Point.canMaterialize(); + HasSplit |= Point.isSplit(); + InsertPoints.emplace_back(&Point); +} + +RegBankSelect::InstrInsertPoint::InstrInsertPoint(MachineInstr &Instr, + bool Before) + : InsertPoint(), Instr(Instr), Before(Before) { + // Since we do not support splitting, we do not need to update + // liveness and such, so do not do anything with P. + assert((!Before || !Instr.isPHI()) && + "Splitting before phis requires more points"); + assert((!Before || !Instr.getNextNode() || !Instr.getNextNode()->isPHI()) && + "Splitting between phis does not make sense"); +} + +void RegBankSelect::InstrInsertPoint::materialize() { + if (isSplit()) { + // Slice and return the beginning of the new block. + // If we need to split between the terminators, we theoritically + // need to know where the first and second set of terminators end + // to update the successors properly. + // Now, in pratice, we should have a maximum of 2 branch + // instructions; one conditional and one unconditional. Therefore + // we know how to update the successor by looking at the target of + // the unconditional branch. + // If we end up splitting at some point, then, we should update + // the liveness information and such. I.e., we would need to + // access P here. + // The machine verifier should actually make sure such cases + // cannot happen. + llvm_unreachable("Not yet implemented"); + } + // Otherwise the insertion point is just the current or next + // instruction depending on Before. I.e., there is nothing to do + // here. +} + +bool RegBankSelect::InstrInsertPoint::isSplit() const { + // If the insertion point is after a terminator, we need to split. + if (!Before) + return Instr.isTerminator(); + // If we insert before an instruction that is after a terminator, + // we are still after a terminator. + return Instr.getPrevNode() && Instr.getPrevNode()->isTerminator(); +} + +uint64_t RegBankSelect::InstrInsertPoint::frequency(const Pass &P) const { + // Even if we need to split, because we insert between terminators, + // this split has actually the same frequency as the instruction. + const MachineBlockFrequencyInfo *MBFI = + P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); + if (!MBFI) + return 1; + return MBFI->getBlockFreq(Instr.getParent()).getFrequency(); +} + +uint64_t RegBankSelect::MBBInsertPoint::frequency(const Pass &P) const { + const MachineBlockFrequencyInfo *MBFI = + P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); + if (!MBFI) + return 1; + return MBFI->getBlockFreq(&MBB).getFrequency(); +} + +void RegBankSelect::EdgeInsertPoint::materialize() { + // If we end up repairing twice at the same place before materializing the + // insertion point, we may think we have to split an edge twice. + // We should have a factory for the insert point such that identical points + // are the same instance. + assert(Src.isSuccessor(DstOrSplit) && DstOrSplit->isPredecessor(&Src) && + "This point has already been split"); + MachineBasicBlock *NewBB = Src.SplitCriticalEdge(DstOrSplit, P); + assert(NewBB && "Invalid call to materialize"); + // We reuse the destination block to hold the information of the new block. + DstOrSplit = NewBB; +} + +uint64_t RegBankSelect::EdgeInsertPoint::frequency(const Pass &P) const { + const MachineBlockFrequencyInfo *MBFI = + P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); + if (!MBFI) + return 1; + if (WasMaterialized) + return MBFI->getBlockFreq(DstOrSplit).getFrequency(); + + const MachineBranchProbabilityInfo *MBPI = + P.getAnalysisIfAvailable<MachineBranchProbabilityInfo>(); + if (!MBPI) + return 1; + // The basic block will be on the edge. + return (MBFI->getBlockFreq(&Src) * MBPI->getEdgeProbability(&Src, DstOrSplit)) + .getFrequency(); +} + +bool RegBankSelect::EdgeInsertPoint::canMaterialize() const { + // If this is not a critical edge, we should not have used this insert + // point. Indeed, either the successor or the predecessor should + // have do. + assert(Src.succ_size() > 1 && DstOrSplit->pred_size() > 1 && + "Edge is not critical"); + return Src.canSplitCriticalEdge(DstOrSplit); +} + +RegBankSelect::MappingCost::MappingCost(const BlockFrequency &LocalFreq) + : LocalFreq(LocalFreq.getFrequency()) {} + +bool RegBankSelect::MappingCost::addLocalCost(uint64_t Cost) { + // Check if this overflows. + if (LocalCost + Cost < LocalCost) { + saturate(); + return true; + } + LocalCost += Cost; + return isSaturated(); +} + +bool RegBankSelect::MappingCost::addNonLocalCost(uint64_t Cost) { + // Check if this overflows. + if (NonLocalCost + Cost < NonLocalCost) { + saturate(); + return true; + } + NonLocalCost += Cost; + return isSaturated(); +} + +bool RegBankSelect::MappingCost::isSaturated() const { + return LocalCost == UINT64_MAX - 1 && NonLocalCost == UINT64_MAX && + LocalFreq == UINT64_MAX; +} + +void RegBankSelect::MappingCost::saturate() { + *this = ImpossibleCost(); + --LocalCost; +} + +RegBankSelect::MappingCost RegBankSelect::MappingCost::ImpossibleCost() { + return MappingCost(UINT64_MAX, UINT64_MAX, UINT64_MAX); +} + +bool RegBankSelect::MappingCost::operator<(const MappingCost &Cost) const { + // Sort out the easy cases. + if (*this == Cost) + return false; + // If one is impossible to realize the other is cheaper unless it is + // impossible as well. + if ((*this == ImpossibleCost()) || (Cost == ImpossibleCost())) + return (*this == ImpossibleCost()) < (Cost == ImpossibleCost()); + // If one is saturated the other is cheaper, unless it is saturated + // as well. + if (isSaturated() || Cost.isSaturated()) + return isSaturated() < Cost.isSaturated(); + // At this point we know both costs hold sensible values. + + // If both values have a different base frequency, there is no much + // we can do but to scale everything. + // However, if they have the same base frequency we can avoid making + // complicated computation. + uint64_t ThisLocalAdjust; + uint64_t OtherLocalAdjust; + if (LLVM_LIKELY(LocalFreq == Cost.LocalFreq)) { + + // At this point, we know the local costs are comparable. + // Do the case that do not involve potential overflow first. + if (NonLocalCost == Cost.NonLocalCost) + // Since the non-local costs do not discriminate on the result, + // just compare the local costs. + return LocalCost < Cost.LocalCost; + + // The base costs are comparable so we may only keep the relative + // value to increase our chances of avoiding overflows. + ThisLocalAdjust = 0; + OtherLocalAdjust = 0; + if (LocalCost < Cost.LocalCost) + OtherLocalAdjust = Cost.LocalCost - LocalCost; + else + ThisLocalAdjust = LocalCost - Cost.LocalCost; + } else { + ThisLocalAdjust = LocalCost; + OtherLocalAdjust = Cost.LocalCost; + } + + // The non-local costs are comparable, just keep the relative value. + uint64_t ThisNonLocalAdjust = 0; + uint64_t OtherNonLocalAdjust = 0; + if (NonLocalCost < Cost.NonLocalCost) + OtherNonLocalAdjust = Cost.NonLocalCost - NonLocalCost; + else + ThisNonLocalAdjust = NonLocalCost - Cost.NonLocalCost; + // Scale everything to make them comparable. + uint64_t ThisScaledCost = ThisLocalAdjust * LocalFreq; + // Check for overflow on that operation. + bool ThisOverflows = ThisLocalAdjust && (ThisScaledCost < ThisLocalAdjust || + ThisScaledCost < LocalFreq); + uint64_t OtherScaledCost = OtherLocalAdjust * Cost.LocalFreq; + // Check for overflow on the last operation. + bool OtherOverflows = + OtherLocalAdjust && + (OtherScaledCost < OtherLocalAdjust || OtherScaledCost < Cost.LocalFreq); + // Add the non-local costs. + ThisOverflows |= ThisNonLocalAdjust && + ThisScaledCost + ThisNonLocalAdjust < ThisNonLocalAdjust; + ThisScaledCost += ThisNonLocalAdjust; + OtherOverflows |= OtherNonLocalAdjust && + OtherScaledCost + OtherNonLocalAdjust < OtherNonLocalAdjust; + OtherScaledCost += OtherNonLocalAdjust; + // If both overflows, we cannot compare without additional + // precision, e.g., APInt. Just give up on that case. + if (ThisOverflows && OtherOverflows) + return false; + // If one overflows but not the other, we can still compare. + if (ThisOverflows || OtherOverflows) + return ThisOverflows < OtherOverflows; + // Otherwise, just compare the values. + return ThisScaledCost < OtherScaledCost; +} + +bool RegBankSelect::MappingCost::operator==(const MappingCost &Cost) const { + return LocalCost == Cost.LocalCost && NonLocalCost == Cost.NonLocalCost && + LocalFreq == Cost.LocalFreq; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegBankSelect::MappingCost::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +void RegBankSelect::MappingCost::print(raw_ostream &OS) const { + if (*this == ImpossibleCost()) { + OS << "impossible"; + return; + } + if (isSaturated()) { + OS << "saturated"; + return; + } + OS << LocalFreq << " * " << LocalCost << " + " << NonLocalCost; +} diff --git a/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp b/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp new file mode 100644 index 000000000000..fc9c802693ab --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp @@ -0,0 +1,114 @@ +//===- llvm/CodeGen/GlobalISel/RegisterBank.cpp - Register Bank --*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the RegisterBank class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/RegisterBank.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "registerbank" + +using namespace llvm; + +const unsigned RegisterBank::InvalidID = UINT_MAX; + +RegisterBank::RegisterBank( + unsigned ID, const char *Name, unsigned Size, + const uint32_t *CoveredClasses, unsigned NumRegClasses) + : ID(ID), Name(Name), Size(Size) { + ContainedRegClasses.resize(NumRegClasses); + ContainedRegClasses.setBitsInMask(CoveredClasses); +} + +bool RegisterBank::verify(const TargetRegisterInfo &TRI) const { + assert(isValid() && "Invalid register bank"); + for (unsigned RCId = 0, End = TRI.getNumRegClasses(); RCId != End; ++RCId) { + const TargetRegisterClass &RC = *TRI.getRegClass(RCId); + + if (!covers(RC)) + continue; + // Verify that the register bank covers all the sub classes of the + // classes it covers. + + // Use a different (slow in that case) method than + // RegisterBankInfo to find the subclasses of RC, to make sure + // both agree on the covers. + for (unsigned SubRCId = 0; SubRCId != End; ++SubRCId) { + const TargetRegisterClass &SubRC = *TRI.getRegClass(RCId); + + if (!RC.hasSubClassEq(&SubRC)) + continue; + + // Verify that the Size of the register bank is big enough to cover + // all the register classes it covers. + assert(getSize() >= TRI.getRegSizeInBits(SubRC) && + "Size is not big enough for all the subclasses!"); + assert(covers(SubRC) && "Not all subclasses are covered"); + } + } + return true; +} + +bool RegisterBank::covers(const TargetRegisterClass &RC) const { + assert(isValid() && "RB hasn't been initialized yet"); + return ContainedRegClasses.test(RC.getID()); +} + +bool RegisterBank::isValid() const { + return ID != InvalidID && Name != nullptr && Size != 0 && + // A register bank that does not cover anything is useless. + !ContainedRegClasses.empty(); +} + +bool RegisterBank::operator==(const RegisterBank &OtherRB) const { + // There must be only one instance of a given register bank alive + // for the whole compilation. + // The RegisterBankInfo is supposed to enforce that. + assert((OtherRB.getID() != getID() || &OtherRB == this) && + "ID does not uniquely identify a RegisterBank"); + return &OtherRB == this; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBank::dump(const TargetRegisterInfo *TRI) const { + print(dbgs(), /* IsForDebug */ true, TRI); +} +#endif + +void RegisterBank::print(raw_ostream &OS, bool IsForDebug, + const TargetRegisterInfo *TRI) const { + OS << getName(); + if (!IsForDebug) + return; + OS << "(ID:" << getID() << ", Size:" << getSize() << ")\n" + << "isValid:" << isValid() << '\n' + << "Number of Covered register classes: " << ContainedRegClasses.count() + << '\n'; + // Print all the subclasses if we can. + // This register classes may not be properly initialized yet. + if (!TRI || ContainedRegClasses.empty()) + return; + assert(ContainedRegClasses.size() == TRI->getNumRegClasses() && + "TRI does not match the initialization process?"); + bool IsFirst = true; + OS << "Covered register classes:\n"; + for (unsigned RCId = 0, End = TRI->getNumRegClasses(); RCId != End; ++RCId) { + const TargetRegisterClass &RC = *TRI->getRegClass(RCId); + + if (!covers(RC)) + continue; + + if (!IsFirst) + OS << ", "; + OS << TRI->getRegClassName(&RC); + IsFirst = false; + } +} diff --git a/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp new file mode 100644 index 000000000000..3fcc55286beb --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp @@ -0,0 +1,800 @@ +//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.cpp --------------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the RegisterBankInfo class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/CodeGen/GlobalISel/RegisterBank.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#include <algorithm> // For std::max. + +#define DEBUG_TYPE "registerbankinfo" + +using namespace llvm; + +STATISTIC(NumPartialMappingsCreated, + "Number of partial mappings dynamically created"); +STATISTIC(NumPartialMappingsAccessed, + "Number of partial mappings dynamically accessed"); +STATISTIC(NumValueMappingsCreated, + "Number of value mappings dynamically created"); +STATISTIC(NumValueMappingsAccessed, + "Number of value mappings dynamically accessed"); +STATISTIC(NumOperandsMappingsCreated, + "Number of operands mappings dynamically created"); +STATISTIC(NumOperandsMappingsAccessed, + "Number of operands mappings dynamically accessed"); +STATISTIC(NumInstructionMappingsCreated, + "Number of instruction mappings dynamically created"); +STATISTIC(NumInstructionMappingsAccessed, + "Number of instruction mappings dynamically accessed"); + +const unsigned RegisterBankInfo::DefaultMappingID = UINT_MAX; +const unsigned RegisterBankInfo::InvalidMappingID = UINT_MAX - 1; + +//------------------------------------------------------------------------------ +// RegisterBankInfo implementation. +//------------------------------------------------------------------------------ +RegisterBankInfo::RegisterBankInfo(RegisterBank **RegBanks, + unsigned NumRegBanks) + : RegBanks(RegBanks), NumRegBanks(NumRegBanks) { +#ifndef NDEBUG + for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) { + assert(RegBanks[Idx] != nullptr && "Invalid RegisterBank"); + assert(RegBanks[Idx]->isValid() && "RegisterBank should be valid"); + } +#endif // NDEBUG +} + +bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const { +#ifndef NDEBUG + for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) { + const RegisterBank &RegBank = getRegBank(Idx); + assert(Idx == RegBank.getID() && + "ID does not match the index in the array"); + LLVM_DEBUG(dbgs() << "Verify " << RegBank << '\n'); + assert(RegBank.verify(TRI) && "RegBank is invalid"); + } +#endif // NDEBUG + return true; +} + +const RegisterBank * +RegisterBankInfo::getRegBank(Register Reg, const MachineRegisterInfo &MRI, + const TargetRegisterInfo &TRI) const { + if (Register::isPhysicalRegister(Reg)) + return &getRegBankFromRegClass(getMinimalPhysRegClass(Reg, TRI)); + + assert(Reg && "NoRegister does not have a register bank"); + const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); + if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) + return RB; + if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) + return &getRegBankFromRegClass(*RC); + return nullptr; +} + +const TargetRegisterClass & +RegisterBankInfo::getMinimalPhysRegClass(Register Reg, + const TargetRegisterInfo &TRI) const { + assert(Register::isPhysicalRegister(Reg) && "Reg must be a physreg"); + const auto &RegRCIt = PhysRegMinimalRCs.find(Reg); + if (RegRCIt != PhysRegMinimalRCs.end()) + return *RegRCIt->second; + const TargetRegisterClass *PhysRC = TRI.getMinimalPhysRegClass(Reg); + PhysRegMinimalRCs[Reg] = PhysRC; + return *PhysRC; +} + +const RegisterBank *RegisterBankInfo::getRegBankFromConstraints( + const MachineInstr &MI, unsigned OpIdx, const TargetInstrInfo &TII, + const TargetRegisterInfo &TRI) const { + // The mapping of the registers may be available via the + // register class constraints. + const TargetRegisterClass *RC = MI.getRegClassConstraint(OpIdx, &TII, &TRI); + + if (!RC) + return nullptr; + + const RegisterBank &RegBank = getRegBankFromRegClass(*RC); + // Sanity check that the target properly implemented getRegBankFromRegClass. + assert(RegBank.covers(*RC) && + "The mapping of the register bank does not make sense"); + return &RegBank; +} + +const TargetRegisterClass *RegisterBankInfo::constrainGenericRegister( + Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI) { + + // If the register already has a class, fallback to MRI::constrainRegClass. + auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); + if (RegClassOrBank.is<const TargetRegisterClass *>()) + return MRI.constrainRegClass(Reg, &RC); + + const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); + // Otherwise, all we can do is ensure the bank covers the class, and set it. + if (RB && !RB->covers(RC)) + return nullptr; + + // If nothing was set or the class is simply compatible, set it. + MRI.setRegClass(Reg, &RC); + return &RC; +} + +/// Check whether or not \p MI should be treated like a copy +/// for the mappings. +/// Copy like instruction are special for mapping because +/// they don't have actual register constraints. Moreover, +/// they sometimes have register classes assigned and we can +/// just use that instead of failing to provide a generic mapping. +static bool isCopyLike(const MachineInstr &MI) { + return MI.isCopy() || MI.isPHI() || + MI.getOpcode() == TargetOpcode::REG_SEQUENCE; +} + +const RegisterBankInfo::InstructionMapping & +RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const { + // For copies we want to walk over the operands and try to find one + // that has a register bank since the instruction itself will not get + // us any constraint. + bool IsCopyLike = isCopyLike(MI); + // For copy like instruction, only the mapping of the definition + // is important. The rest is not constrained. + unsigned NumOperandsForMapping = IsCopyLike ? 1 : MI.getNumOperands(); + + const MachineFunction &MF = *MI.getMF(); + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + // We may need to query the instruction encoding to guess the mapping. + const TargetInstrInfo &TII = *STI.getInstrInfo(); + + // Before doing anything complicated check if the mapping is not + // directly available. + bool CompleteMapping = true; + + SmallVector<const ValueMapping *, 8> OperandsMapping(NumOperandsForMapping); + for (unsigned OpIdx = 0, EndIdx = MI.getNumOperands(); OpIdx != EndIdx; + ++OpIdx) { + const MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (!Reg) + continue; + // The register bank of Reg is just a side effect of the current + // excution and in particular, there is no reason to believe this + // is the best default mapping for the current instruction. Keep + // it as an alternative register bank if we cannot figure out + // something. + const RegisterBank *AltRegBank = getRegBank(Reg, MRI, TRI); + // For copy-like instruction, we want to reuse the register bank + // that is already set on Reg, if any, since those instructions do + // not have any constraints. + const RegisterBank *CurRegBank = IsCopyLike ? AltRegBank : nullptr; + if (!CurRegBank) { + // If this is a target specific instruction, we can deduce + // the register bank from the encoding constraints. + CurRegBank = getRegBankFromConstraints(MI, OpIdx, TII, TRI); + if (!CurRegBank) { + // All our attempts failed, give up. + CompleteMapping = false; + + if (!IsCopyLike) + // MI does not carry enough information to guess the mapping. + return getInvalidInstructionMapping(); + continue; + } + } + + unsigned Size = getSizeInBits(Reg, MRI, TRI); + const ValueMapping *ValMapping = &getValueMapping(0, Size, *CurRegBank); + if (IsCopyLike) { + if (!OperandsMapping[0]) { + if (MI.isRegSequence()) { + // For reg_sequence, the result size does not match the input. + unsigned ResultSize = getSizeInBits(MI.getOperand(0).getReg(), + MRI, TRI); + OperandsMapping[0] = &getValueMapping(0, ResultSize, *CurRegBank); + } else { + OperandsMapping[0] = ValMapping; + } + } + + // The default handling assumes any register bank can be copied to any + // other. If this isn't the case, the target should specially deal with + // reg_sequence/phi. There may also be unsatisfiable copies. + for (; OpIdx != EndIdx; ++OpIdx) { + const MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (!Reg) + continue; + + const RegisterBank *AltRegBank = getRegBank(Reg, MRI, TRI); + if (AltRegBank && + cannotCopy(*CurRegBank, *AltRegBank, getSizeInBits(Reg, MRI, TRI))) + return getInvalidInstructionMapping(); + } + + CompleteMapping = true; + break; + } + + OperandsMapping[OpIdx] = ValMapping; + } + + if (IsCopyLike && !CompleteMapping) { + // No way to deduce the type from what we have. + return getInvalidInstructionMapping(); + } + + assert(CompleteMapping && "Setting an uncomplete mapping"); + return getInstructionMapping( + DefaultMappingID, /*Cost*/ 1, + /*OperandsMapping*/ getOperandsMapping(OperandsMapping), + NumOperandsForMapping); +} + +/// Hashing function for PartialMapping. +static hash_code hashPartialMapping(unsigned StartIdx, unsigned Length, + const RegisterBank *RegBank) { + return hash_combine(StartIdx, Length, RegBank ? RegBank->getID() : 0); +} + +/// Overloaded version of hash_value for a PartialMapping. +hash_code +llvm::hash_value(const RegisterBankInfo::PartialMapping &PartMapping) { + return hashPartialMapping(PartMapping.StartIdx, PartMapping.Length, + PartMapping.RegBank); +} + +const RegisterBankInfo::PartialMapping & +RegisterBankInfo::getPartialMapping(unsigned StartIdx, unsigned Length, + const RegisterBank &RegBank) const { + ++NumPartialMappingsAccessed; + + hash_code Hash = hashPartialMapping(StartIdx, Length, &RegBank); + const auto &It = MapOfPartialMappings.find(Hash); + if (It != MapOfPartialMappings.end()) + return *It->second; + + ++NumPartialMappingsCreated; + + auto &PartMapping = MapOfPartialMappings[Hash]; + PartMapping = std::make_unique<PartialMapping>(StartIdx, Length, RegBank); + return *PartMapping; +} + +const RegisterBankInfo::ValueMapping & +RegisterBankInfo::getValueMapping(unsigned StartIdx, unsigned Length, + const RegisterBank &RegBank) const { + return getValueMapping(&getPartialMapping(StartIdx, Length, RegBank), 1); +} + +static hash_code +hashValueMapping(const RegisterBankInfo::PartialMapping *BreakDown, + unsigned NumBreakDowns) { + if (LLVM_LIKELY(NumBreakDowns == 1)) + return hash_value(*BreakDown); + SmallVector<size_t, 8> Hashes(NumBreakDowns); + for (unsigned Idx = 0; Idx != NumBreakDowns; ++Idx) + Hashes.push_back(hash_value(BreakDown[Idx])); + return hash_combine_range(Hashes.begin(), Hashes.end()); +} + +const RegisterBankInfo::ValueMapping & +RegisterBankInfo::getValueMapping(const PartialMapping *BreakDown, + unsigned NumBreakDowns) const { + ++NumValueMappingsAccessed; + + hash_code Hash = hashValueMapping(BreakDown, NumBreakDowns); + const auto &It = MapOfValueMappings.find(Hash); + if (It != MapOfValueMappings.end()) + return *It->second; + + ++NumValueMappingsCreated; + + auto &ValMapping = MapOfValueMappings[Hash]; + ValMapping = std::make_unique<ValueMapping>(BreakDown, NumBreakDowns); + return *ValMapping; +} + +template <typename Iterator> +const RegisterBankInfo::ValueMapping * +RegisterBankInfo::getOperandsMapping(Iterator Begin, Iterator End) const { + + ++NumOperandsMappingsAccessed; + + // The addresses of the value mapping are unique. + // Therefore, we can use them directly to hash the operand mapping. + hash_code Hash = hash_combine_range(Begin, End); + auto &Res = MapOfOperandsMappings[Hash]; + if (Res) + return Res.get(); + + ++NumOperandsMappingsCreated; + + // Create the array of ValueMapping. + // Note: this array will not hash to this instance of operands + // mapping, because we use the pointer of the ValueMapping + // to hash and we expect them to uniquely identify an instance + // of value mapping. + Res = std::make_unique<ValueMapping[]>(std::distance(Begin, End)); + unsigned Idx = 0; + for (Iterator It = Begin; It != End; ++It, ++Idx) { + const ValueMapping *ValMap = *It; + if (!ValMap) + continue; + Res[Idx] = *ValMap; + } + return Res.get(); +} + +const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping( + const SmallVectorImpl<const RegisterBankInfo::ValueMapping *> &OpdsMapping) + const { + return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end()); +} + +const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping( + std::initializer_list<const RegisterBankInfo::ValueMapping *> OpdsMapping) + const { + return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end()); +} + +static hash_code +hashInstructionMapping(unsigned ID, unsigned Cost, + const RegisterBankInfo::ValueMapping *OperandsMapping, + unsigned NumOperands) { + return hash_combine(ID, Cost, OperandsMapping, NumOperands); +} + +const RegisterBankInfo::InstructionMapping & +RegisterBankInfo::getInstructionMappingImpl( + bool IsInvalid, unsigned ID, unsigned Cost, + const RegisterBankInfo::ValueMapping *OperandsMapping, + unsigned NumOperands) const { + assert(((IsInvalid && ID == InvalidMappingID && Cost == 0 && + OperandsMapping == nullptr && NumOperands == 0) || + !IsInvalid) && + "Mismatch argument for invalid input"); + ++NumInstructionMappingsAccessed; + + hash_code Hash = + hashInstructionMapping(ID, Cost, OperandsMapping, NumOperands); + const auto &It = MapOfInstructionMappings.find(Hash); + if (It != MapOfInstructionMappings.end()) + return *It->second; + + ++NumInstructionMappingsCreated; + + auto &InstrMapping = MapOfInstructionMappings[Hash]; + InstrMapping = std::make_unique<InstructionMapping>( + ID, Cost, OperandsMapping, NumOperands); + return *InstrMapping; +} + +const RegisterBankInfo::InstructionMapping & +RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { + const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI); + if (Mapping.isValid()) + return Mapping; + llvm_unreachable("The target must implement this"); +} + +RegisterBankInfo::InstructionMappings +RegisterBankInfo::getInstrPossibleMappings(const MachineInstr &MI) const { + InstructionMappings PossibleMappings; + const auto &Mapping = getInstrMapping(MI); + if (Mapping.isValid()) { + // Put the default mapping first. + PossibleMappings.push_back(&Mapping); + } + + // Then the alternative mapping, if any. + InstructionMappings AltMappings = getInstrAlternativeMappings(MI); + for (const InstructionMapping *AltMapping : AltMappings) + PossibleMappings.push_back(AltMapping); +#ifndef NDEBUG + for (const InstructionMapping *Mapping : PossibleMappings) + assert(Mapping->verify(MI) && "Mapping is invalid"); +#endif + return PossibleMappings; +} + +RegisterBankInfo::InstructionMappings +RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const { + // No alternative for MI. + return InstructionMappings(); +} + +void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) { + MachineInstr &MI = OpdMapper.getMI(); + MachineRegisterInfo &MRI = OpdMapper.getMRI(); + LLVM_DEBUG(dbgs() << "Applying default-like mapping\n"); + for (unsigned OpIdx = 0, + EndIdx = OpdMapper.getInstrMapping().getNumOperands(); + OpIdx != EndIdx; ++OpIdx) { + LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx); + MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) { + LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n"); + continue; + } + if (!MO.getReg()) { + LLVM_DEBUG(dbgs() << " is %%noreg, nothing to be done\n"); + continue; + } + assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns != + 0 && + "Invalid mapping"); + assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns == + 1 && + "This mapping is too complex for this function"); + iterator_range<SmallVectorImpl<Register>::const_iterator> NewRegs = + OpdMapper.getVRegs(OpIdx); + if (NewRegs.empty()) { + LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n"); + continue; + } + Register OrigReg = MO.getReg(); + Register NewReg = *NewRegs.begin(); + LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr)); + MO.setReg(NewReg); + LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr)); + + // The OperandsMapper creates plain scalar, we may have to fix that. + // Check if the types match and if not, fix that. + LLT OrigTy = MRI.getType(OrigReg); + LLT NewTy = MRI.getType(NewReg); + if (OrigTy != NewTy) { + // The default mapping is not supposed to change the size of + // the storage. However, right now we don't necessarily bump all + // the types to storage size. For instance, we can consider + // s16 G_AND legal whereas the storage size is going to be 32. + assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() && + "Types with difference size cannot be handled by the default " + "mapping"); + LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to " + << OrigTy); + MRI.setType(NewReg, OrigTy); + } + LLVM_DEBUG(dbgs() << '\n'); + } +} + +unsigned RegisterBankInfo::getSizeInBits(Register Reg, + const MachineRegisterInfo &MRI, + const TargetRegisterInfo &TRI) const { + if (Register::isPhysicalRegister(Reg)) { + // The size is not directly available for physical registers. + // Instead, we need to access a register class that contains Reg and + // get the size of that register class. + // Because this is expensive, we'll cache the register class by calling + auto *RC = &getMinimalPhysRegClass(Reg, TRI); + assert(RC && "Expecting Register class"); + return TRI.getRegSizeInBits(*RC); + } + return TRI.getRegSizeInBits(Reg, MRI); +} + +//------------------------------------------------------------------------------ +// Helper classes implementation. +//------------------------------------------------------------------------------ +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::PartialMapping::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +bool RegisterBankInfo::PartialMapping::verify() const { + assert(RegBank && "Register bank not set"); + assert(Length && "Empty mapping"); + assert((StartIdx <= getHighBitIdx()) && "Overflow, switch to APInt?"); + // Check if the minimum width fits into RegBank. + assert(RegBank->getSize() >= Length && "Register bank too small for Mask"); + return true; +} + +void RegisterBankInfo::PartialMapping::print(raw_ostream &OS) const { + OS << "[" << StartIdx << ", " << getHighBitIdx() << "], RegBank = "; + if (RegBank) + OS << *RegBank; + else + OS << "nullptr"; +} + +bool RegisterBankInfo::ValueMapping::partsAllUniform() const { + if (NumBreakDowns < 2) + return true; + + const PartialMapping *First = begin(); + for (const PartialMapping *Part = First + 1; Part != end(); ++Part) { + if (Part->Length != First->Length || Part->RegBank != First->RegBank) + return false; + } + + return true; +} + +bool RegisterBankInfo::ValueMapping::verify(unsigned MeaningfulBitWidth) const { + assert(NumBreakDowns && "Value mapped nowhere?!"); + unsigned OrigValueBitWidth = 0; + for (const RegisterBankInfo::PartialMapping &PartMap : *this) { + // Check that each register bank is big enough to hold the partial value: + // this check is done by PartialMapping::verify + assert(PartMap.verify() && "Partial mapping is invalid"); + // The original value should completely be mapped. + // Thus the maximum accessed index + 1 is the size of the original value. + OrigValueBitWidth = + std::max(OrigValueBitWidth, PartMap.getHighBitIdx() + 1); + } + assert(OrigValueBitWidth >= MeaningfulBitWidth && + "Meaningful bits not covered by the mapping"); + APInt ValueMask(OrigValueBitWidth, 0); + for (const RegisterBankInfo::PartialMapping &PartMap : *this) { + // Check that the union of the partial mappings covers the whole value, + // without overlaps. + // The high bit is exclusive in the APInt API, thus getHighBitIdx + 1. + APInt PartMapMask = APInt::getBitsSet(OrigValueBitWidth, PartMap.StartIdx, + PartMap.getHighBitIdx() + 1); + ValueMask ^= PartMapMask; + assert((ValueMask & PartMapMask) == PartMapMask && + "Some partial mappings overlap"); + } + assert(ValueMask.isAllOnesValue() && "Value is not fully mapped"); + return true; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::ValueMapping::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +void RegisterBankInfo::ValueMapping::print(raw_ostream &OS) const { + OS << "#BreakDown: " << NumBreakDowns << " "; + bool IsFirst = true; + for (const PartialMapping &PartMap : *this) { + if (!IsFirst) + OS << ", "; + OS << '[' << PartMap << ']'; + IsFirst = false; + } +} + +bool RegisterBankInfo::InstructionMapping::verify( + const MachineInstr &MI) const { + // Check that all the register operands are properly mapped. + // Check the constructor invariant. + // For PHI, we only care about mapping the definition. + assert(NumOperands == (isCopyLike(MI) ? 1 : MI.getNumOperands()) && + "NumOperands must match, see constructor"); + assert(MI.getParent() && MI.getMF() && + "MI must be connected to a MachineFunction"); + const MachineFunction &MF = *MI.getMF(); + const RegisterBankInfo *RBI = MF.getSubtarget().getRegBankInfo(); + (void)RBI; + + for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { + const MachineOperand &MO = MI.getOperand(Idx); + if (!MO.isReg()) { + assert(!getOperandMapping(Idx).isValid() && + "We should not care about non-reg mapping"); + continue; + } + Register Reg = MO.getReg(); + if (!Reg) + continue; + assert(getOperandMapping(Idx).isValid() && + "We must have a mapping for reg operands"); + const RegisterBankInfo::ValueMapping &MOMapping = getOperandMapping(Idx); + (void)MOMapping; + // Register size in bits. + // This size must match what the mapping expects. + assert(MOMapping.verify(RBI->getSizeInBits( + Reg, MF.getRegInfo(), *MF.getSubtarget().getRegisterInfo())) && + "Value mapping is invalid"); + } + return true; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::InstructionMapping::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +void RegisterBankInfo::InstructionMapping::print(raw_ostream &OS) const { + OS << "ID: " << getID() << " Cost: " << getCost() << " Mapping: "; + + for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { + const ValueMapping &ValMapping = getOperandMapping(OpIdx); + if (OpIdx) + OS << ", "; + OS << "{ Idx: " << OpIdx << " Map: " << ValMapping << '}'; + } +} + +const int RegisterBankInfo::OperandsMapper::DontKnowIdx = -1; + +RegisterBankInfo::OperandsMapper::OperandsMapper( + MachineInstr &MI, const InstructionMapping &InstrMapping, + MachineRegisterInfo &MRI) + : MRI(MRI), MI(MI), InstrMapping(InstrMapping) { + unsigned NumOpds = InstrMapping.getNumOperands(); + OpToNewVRegIdx.resize(NumOpds, OperandsMapper::DontKnowIdx); + assert(InstrMapping.verify(MI) && "Invalid mapping for MI"); +} + +iterator_range<SmallVectorImpl<Register>::iterator> +RegisterBankInfo::OperandsMapper::getVRegsMem(unsigned OpIdx) { + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + unsigned NumPartialVal = + getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns; + int StartIdx = OpToNewVRegIdx[OpIdx]; + + if (StartIdx == OperandsMapper::DontKnowIdx) { + // This is the first time we try to access OpIdx. + // Create the cells that will hold all the partial values at the + // end of the list of NewVReg. + StartIdx = NewVRegs.size(); + OpToNewVRegIdx[OpIdx] = StartIdx; + for (unsigned i = 0; i < NumPartialVal; ++i) + NewVRegs.push_back(0); + } + SmallVectorImpl<Register>::iterator End = + getNewVRegsEnd(StartIdx, NumPartialVal); + + return make_range(&NewVRegs[StartIdx], End); +} + +SmallVectorImpl<Register>::const_iterator +RegisterBankInfo::OperandsMapper::getNewVRegsEnd(unsigned StartIdx, + unsigned NumVal) const { + return const_cast<OperandsMapper *>(this)->getNewVRegsEnd(StartIdx, NumVal); +} +SmallVectorImpl<Register>::iterator +RegisterBankInfo::OperandsMapper::getNewVRegsEnd(unsigned StartIdx, + unsigned NumVal) { + assert((NewVRegs.size() == StartIdx + NumVal || + NewVRegs.size() > StartIdx + NumVal) && + "NewVRegs too small to contain all the partial mapping"); + return NewVRegs.size() <= StartIdx + NumVal ? NewVRegs.end() + : &NewVRegs[StartIdx + NumVal]; +} + +void RegisterBankInfo::OperandsMapper::createVRegs(unsigned OpIdx) { + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + iterator_range<SmallVectorImpl<Register>::iterator> NewVRegsForOpIdx = + getVRegsMem(OpIdx); + const ValueMapping &ValMapping = getInstrMapping().getOperandMapping(OpIdx); + const PartialMapping *PartMap = ValMapping.begin(); + for (Register &NewVReg : NewVRegsForOpIdx) { + assert(PartMap != ValMapping.end() && "Out-of-bound access"); + assert(NewVReg == 0 && "Register has already been created"); + // The new registers are always bound to scalar with the right size. + // The actual type has to be set when the target does the mapping + // of the instruction. + // The rationale is that this generic code cannot guess how the + // target plans to split the input type. + NewVReg = MRI.createGenericVirtualRegister(LLT::scalar(PartMap->Length)); + MRI.setRegBank(NewVReg, *PartMap->RegBank); + ++PartMap; + } +} + +void RegisterBankInfo::OperandsMapper::setVRegs(unsigned OpIdx, + unsigned PartialMapIdx, + Register NewVReg) { + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + assert(getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns > + PartialMapIdx && + "Out-of-bound access for partial mapping"); + // Make sure the memory is initialized for that operand. + (void)getVRegsMem(OpIdx); + assert(NewVRegs[OpToNewVRegIdx[OpIdx] + PartialMapIdx] == 0 && + "This value is already set"); + NewVRegs[OpToNewVRegIdx[OpIdx] + PartialMapIdx] = NewVReg; +} + +iterator_range<SmallVectorImpl<Register>::const_iterator> +RegisterBankInfo::OperandsMapper::getVRegs(unsigned OpIdx, + bool ForDebug) const { + (void)ForDebug; + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + int StartIdx = OpToNewVRegIdx[OpIdx]; + + if (StartIdx == OperandsMapper::DontKnowIdx) + return make_range(NewVRegs.end(), NewVRegs.end()); + + unsigned PartMapSize = + getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns; + SmallVectorImpl<Register>::const_iterator End = + getNewVRegsEnd(StartIdx, PartMapSize); + iterator_range<SmallVectorImpl<Register>::const_iterator> Res = + make_range(&NewVRegs[StartIdx], End); +#ifndef NDEBUG + for (Register VReg : Res) + assert((VReg || ForDebug) && "Some registers are uninitialized"); +#endif + return Res; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::OperandsMapper::dump() const { + print(dbgs(), true); + dbgs() << '\n'; +} +#endif + +void RegisterBankInfo::OperandsMapper::print(raw_ostream &OS, + bool ForDebug) const { + unsigned NumOpds = getInstrMapping().getNumOperands(); + if (ForDebug) { + OS << "Mapping for " << getMI() << "\nwith " << getInstrMapping() << '\n'; + // Print out the internal state of the index table. + OS << "Populated indices (CellNumber, IndexInNewVRegs): "; + bool IsFirst = true; + for (unsigned Idx = 0; Idx != NumOpds; ++Idx) { + if (OpToNewVRegIdx[Idx] != DontKnowIdx) { + if (!IsFirst) + OS << ", "; + OS << '(' << Idx << ", " << OpToNewVRegIdx[Idx] << ')'; + IsFirst = false; + } + } + OS << '\n'; + } else + OS << "Mapping ID: " << getInstrMapping().getID() << ' '; + + OS << "Operand Mapping: "; + // If we have a function, we can pretty print the name of the registers. + // Otherwise we will print the raw numbers. + const TargetRegisterInfo *TRI = + getMI().getParent() && getMI().getMF() + ? getMI().getMF()->getSubtarget().getRegisterInfo() + : nullptr; + bool IsFirst = true; + for (unsigned Idx = 0; Idx != NumOpds; ++Idx) { + if (OpToNewVRegIdx[Idx] == DontKnowIdx) + continue; + if (!IsFirst) + OS << ", "; + IsFirst = false; + OS << '(' << printReg(getMI().getOperand(Idx).getReg(), TRI) << ", ["; + bool IsFirstNewVReg = true; + for (Register VReg : getVRegs(Idx)) { + if (!IsFirstNewVReg) + OS << ", "; + IsFirstNewVReg = false; + OS << printReg(VReg, TRI); + } + OS << "])"; + } +} diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp new file mode 100644 index 000000000000..45618d7992ad --- /dev/null +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -0,0 +1,450 @@ +//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file This file implements the utility functions used by the GlobalISel +/// pipeline. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/Twine.h" +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/StackProtector.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/IR/Constants.h" + +#define DEBUG_TYPE "globalisel-utils" + +using namespace llvm; + +unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI, + const TargetInstrInfo &TII, + const RegisterBankInfo &RBI, unsigned Reg, + const TargetRegisterClass &RegClass) { + if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) + return MRI.createVirtualRegister(&RegClass); + + return Reg; +} + +unsigned llvm::constrainOperandRegClass( + const MachineFunction &MF, const TargetRegisterInfo &TRI, + MachineRegisterInfo &MRI, const TargetInstrInfo &TII, + const RegisterBankInfo &RBI, MachineInstr &InsertPt, + const TargetRegisterClass &RegClass, const MachineOperand &RegMO, + unsigned OpIdx) { + Register Reg = RegMO.getReg(); + // Assume physical registers are properly constrained. + assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); + + unsigned ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass); + // If we created a new virtual register because the class is not compatible + // then create a copy between the new and the old register. + if (ConstrainedReg != Reg) { + MachineBasicBlock::iterator InsertIt(&InsertPt); + MachineBasicBlock &MBB = *InsertPt.getParent(); + if (RegMO.isUse()) { + BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(), + TII.get(TargetOpcode::COPY), ConstrainedReg) + .addReg(Reg); + } else { + assert(RegMO.isDef() && "Must be a definition"); + BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(), + TII.get(TargetOpcode::COPY), Reg) + .addReg(ConstrainedReg); + } + } + return ConstrainedReg; +} + +unsigned llvm::constrainOperandRegClass( + const MachineFunction &MF, const TargetRegisterInfo &TRI, + MachineRegisterInfo &MRI, const TargetInstrInfo &TII, + const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, + const MachineOperand &RegMO, unsigned OpIdx) { + Register Reg = RegMO.getReg(); + // Assume physical registers are properly constrained. + assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); + + const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); + // Some of the target independent instructions, like COPY, may not impose any + // register class constraints on some of their operands: If it's a use, we can + // skip constraining as the instruction defining the register would constrain + // it. + + // We can't constrain unallocatable register classes, because we can't create + // virtual registers for these classes, so we need to let targets handled this + // case. + if (RegClass && !RegClass->isAllocatable()) + RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); + + if (!RegClass) { + assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && + "Register class constraint is required unless either the " + "instruction is target independent or the operand is a use"); + // FIXME: Just bailing out like this here could be not enough, unless we + // expect the users of this function to do the right thing for PHIs and + // COPY: + // v1 = COPY v0 + // v2 = COPY v1 + // v1 here may end up not being constrained at all. Please notice that to + // reproduce the issue we likely need a destination pattern of a selection + // rule producing such extra copies, not just an input GMIR with them as + // every existing target using selectImpl handles copies before calling it + // and they never reach this function. + return Reg; + } + return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass, + RegMO, OpIdx); +} + +bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, + const TargetInstrInfo &TII, + const TargetRegisterInfo &TRI, + const RegisterBankInfo &RBI) { + assert(!isPreISelGenericOpcode(I.getOpcode()) && + "A selected instruction is expected"); + MachineBasicBlock &MBB = *I.getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { + MachineOperand &MO = I.getOperand(OpI); + + // There's nothing to be done on non-register operands. + if (!MO.isReg()) + continue; + + LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); + assert(MO.isReg() && "Unsupported non-reg operand"); + + Register Reg = MO.getReg(); + // Physical registers don't need to be constrained. + if (Register::isPhysicalRegister(Reg)) + continue; + + // Register operands with a value of 0 (e.g. predicate operands) don't need + // to be constrained. + if (Reg == 0) + continue; + + // If the operand is a vreg, we should constrain its regclass, and only + // insert COPYs if that's impossible. + // constrainOperandRegClass does that for us. + MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), + MO, OpI)); + + // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been + // done. + if (MO.isUse()) { + int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); + if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) + I.tieOperands(DefIdx, OpI); + } + } + return true; +} + +bool llvm::isTriviallyDead(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { + // If we can move an instruction, we can remove it. Otherwise, it has + // a side-effect of some sort. + bool SawStore = false; + if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) + return false; + + // Instructions without side-effects are dead iff they only define dead vregs. + for (auto &MO : MI.operands()) { + if (!MO.isReg() || !MO.isDef()) + continue; + + Register Reg = MO.getReg(); + if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) + return false; + } + return true; +} + +void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, + MachineOptimizationRemarkEmitter &MORE, + MachineOptimizationRemarkMissed &R) { + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + + // Print the function name explicitly if we don't have a debug location (which + // makes the diagnostic less useful) or if we're going to emit a raw error. + if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) + R << (" (in function: " + MF.getName() + ")").str(); + + if (TPC.isGlobalISelAbortEnabled()) + report_fatal_error(R.getMsg()); + else + MORE.emit(R); +} + +void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, + MachineOptimizationRemarkEmitter &MORE, + const char *PassName, StringRef Msg, + const MachineInstr &MI) { + MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", + MI.getDebugLoc(), MI.getParent()); + R << Msg; + // Printing MI is expensive; only do it if expensive remarks are enabled. + if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) + R << ": " << ore::MNV("Inst", MI); + reportGISelFailure(MF, TPC, MORE, R); +} + +Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg, + const MachineRegisterInfo &MRI) { + Optional<ValueAndVReg> ValAndVReg = + getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); + assert((!ValAndVReg || ValAndVReg->VReg == VReg) && + "Value found while looking through instrs"); + if (!ValAndVReg) + return None; + return ValAndVReg->Value; +} + +Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( + unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, + bool HandleFConstant) { + SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; + MachineInstr *MI; + auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { + return Opcode == TargetOpcode::G_CONSTANT || + (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); + }; + auto GetImmediateValue = [HandleFConstant, + &MRI](const MachineInstr &MI) -> Optional<APInt> { + const MachineOperand &CstVal = MI.getOperand(1); + if (!CstVal.isImm() && !CstVal.isCImm() && + (!HandleFConstant || !CstVal.isFPImm())) + return None; + if (!CstVal.isFPImm()) { + unsigned BitWidth = + MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) + : CstVal.getCImm()->getValue(); + assert(Val.getBitWidth() == BitWidth && + "Value bitwidth doesn't match definition type"); + return Val; + } + return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); + }; + while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && + LookThroughInstrs) { + switch (MI->getOpcode()) { + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + SeenOpcodes.push_back(std::make_pair( + MI->getOpcode(), + MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); + VReg = MI->getOperand(1).getReg(); + break; + case TargetOpcode::COPY: + VReg = MI->getOperand(1).getReg(); + if (Register::isPhysicalRegister(VReg)) + return None; + break; + case TargetOpcode::G_INTTOPTR: + VReg = MI->getOperand(1).getReg(); + break; + default: + return None; + } + } + if (!MI || !IsConstantOpcode(MI->getOpcode())) + return None; + + Optional<APInt> MaybeVal = GetImmediateValue(*MI); + if (!MaybeVal) + return None; + APInt &Val = *MaybeVal; + while (!SeenOpcodes.empty()) { + std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); + switch (OpcodeAndSize.first) { + case TargetOpcode::G_TRUNC: + Val = Val.trunc(OpcodeAndSize.second); + break; + case TargetOpcode::G_SEXT: + Val = Val.sext(OpcodeAndSize.second); + break; + case TargetOpcode::G_ZEXT: + Val = Val.zext(OpcodeAndSize.second); + break; + } + } + + if (Val.getBitWidth() > 64) + return None; + + return ValueAndVReg{Val.getSExtValue(), VReg}; +} + +const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg, + const MachineRegisterInfo &MRI) { + MachineInstr *MI = MRI.getVRegDef(VReg); + if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) + return nullptr; + return MI->getOperand(1).getFPImm(); +} + +llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg, + const MachineRegisterInfo &MRI) { + auto *DefMI = MRI.getVRegDef(Reg); + auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); + if (!DstTy.isValid()) + return nullptr; + while (DefMI->getOpcode() == TargetOpcode::COPY) { + Register SrcReg = DefMI->getOperand(1).getReg(); + auto SrcTy = MRI.getType(SrcReg); + if (!SrcTy.isValid() || SrcTy != DstTy) + break; + DefMI = MRI.getVRegDef(SrcReg); + } + return DefMI; +} + +llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, + const MachineRegisterInfo &MRI) { + MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); + return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; +} + +APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { + if (Size == 32) + return APFloat(float(Val)); + if (Size == 64) + return APFloat(Val); + if (Size != 16) + llvm_unreachable("Unsupported FPConstant size"); + bool Ignored; + APFloat APF(Val); + APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); + return APF; +} + +Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1, + const unsigned Op2, + const MachineRegisterInfo &MRI) { + auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); + if (MaybeOp1Cst && MaybeOp2Cst) { + LLT Ty = MRI.getType(Op1); + APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); + APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true); + switch (Opcode) { + default: + break; + case TargetOpcode::G_ADD: + return C1 + C2; + case TargetOpcode::G_AND: + return C1 & C2; + case TargetOpcode::G_ASHR: + return C1.ashr(C2); + case TargetOpcode::G_LSHR: + return C1.lshr(C2); + case TargetOpcode::G_MUL: + return C1 * C2; + case TargetOpcode::G_OR: + return C1 | C2; + case TargetOpcode::G_SHL: + return C1 << C2; + case TargetOpcode::G_SUB: + return C1 - C2; + case TargetOpcode::G_XOR: + return C1 ^ C2; + case TargetOpcode::G_UDIV: + if (!C2.getBoolValue()) + break; + return C1.udiv(C2); + case TargetOpcode::G_SDIV: + if (!C2.getBoolValue()) + break; + return C1.sdiv(C2); + case TargetOpcode::G_UREM: + if (!C2.getBoolValue()) + break; + return C1.urem(C2); + case TargetOpcode::G_SREM: + if (!C2.getBoolValue()) + break; + return C1.srem(C2); + } + } + return None; +} + +bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, + bool SNaN) { + const MachineInstr *DefMI = MRI.getVRegDef(Val); + if (!DefMI) + return false; + + if (DefMI->getFlag(MachineInstr::FmNoNans)) + return true; + + if (SNaN) { + // FP operations quiet. For now, just handle the ones inserted during + // legalization. + switch (DefMI->getOpcode()) { + case TargetOpcode::G_FPEXT: + case TargetOpcode::G_FPTRUNC: + case TargetOpcode::G_FCANONICALIZE: + return true; + default: + return false; + } + } + + return false; +} + +Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const unsigned Op1, + uint64_t Imm, + const MachineRegisterInfo &MRI) { + auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + if (MaybeOp1Cst) { + LLT Ty = MRI.getType(Op1); + APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); + switch (Opcode) { + default: + break; + case TargetOpcode::G_SEXT_INREG: + return C1.trunc(Imm).sext(C1.getBitWidth()); + } + } + return None; +} + +void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { + AU.addPreserved<StackProtector>(); +} + +MVT llvm::getMVTForLLT(LLT Ty) { + if (!Ty.isVector()) + return MVT::getIntegerVT(Ty.getSizeInBits()); + + return MVT::getVectorVT( + MVT::getIntegerVT(Ty.getElementType().getSizeInBits()), + Ty.getNumElements()); +} + +LLT llvm::getLLTForMVT(MVT Ty) { + if (!Ty.isVector()) + return LLT::scalar(Ty.getSizeInBits()); + + return LLT::vector(Ty.getVectorNumElements(), + Ty.getVectorElementType().getSizeInBits()); +} |
