summaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp1180
1 files changed, 1180 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
new file mode 100644
index 000000000000..df770f6664ca
--- /dev/null
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -0,0 +1,1180 @@
+//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the MachineIRBuidler class.
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/DebugInfo.h"
+
+using namespace llvm;
+
+void MachineIRBuilder::setMF(MachineFunction &MF) {
+ State.MF = &MF;
+ State.MBB = nullptr;
+ State.MRI = &MF.getRegInfo();
+ State.TII = MF.getSubtarget().getInstrInfo();
+ State.DL = DebugLoc();
+ State.II = MachineBasicBlock::iterator();
+ State.Observer = nullptr;
+}
+
+void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) {
+ State.MBB = &MBB;
+ State.II = MBB.end();
+ assert(&getMF() == MBB.getParent() &&
+ "Basic block is in a different function");
+}
+
+void MachineIRBuilder::setInstr(MachineInstr &MI) {
+ assert(MI.getParent() && "Instruction is not part of a basic block");
+ setMBB(*MI.getParent());
+ State.II = MI.getIterator();
+}
+
+void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
+
+void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II) {
+ assert(MBB.getParent() == &getMF() &&
+ "Basic block is in a different function");
+ State.MBB = &MBB;
+ State.II = II;
+}
+
+void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
+ if (State.Observer)
+ State.Observer->createdInstr(*InsertedInstr);
+}
+
+void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) {
+ State.Observer = &Observer;
+}
+
+void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; }
+
+//------------------------------------------------------------------------------
+// Build instruction variants.
+//------------------------------------------------------------------------------
+
+MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
+ return insertInstr(buildInstrNoInsert(Opcode));
+}
+
+MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
+ MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
+ getMBB().insert(getInsertPt(), MIB);
+ recordInsertion(MIB);
+ return MIB;
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
+ const MDNode *Expr) {
+ assert(isa<DILocalVariable>(Variable) && "not a variable");
+ assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
+ return insertInstr(BuildMI(getMF(), getDL(),
+ getTII().get(TargetOpcode::DBG_VALUE),
+ /*IsIndirect*/ false, Reg, Variable, Expr));
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
+ const MDNode *Expr) {
+ assert(isa<DILocalVariable>(Variable) && "not a variable");
+ assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
+ // DBG_VALUE insts now carry IR-level indirection in their DIExpression
+ // rather than encoding it in the instruction itself.
+ const DIExpression *DIExpr = cast<DIExpression>(Expr);
+ DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref});
+ return insertInstr(BuildMI(getMF(), getDL(),
+ getTII().get(TargetOpcode::DBG_VALUE),
+ /*IsIndirect*/ false, Reg, Variable, DIExpr));
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
+ const MDNode *Variable,
+ const MDNode *Expr) {
+ assert(isa<DILocalVariable>(Variable) && "not a variable");
+ assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
+ // DBG_VALUE insts now carry IR-level indirection in their DIExpression
+ // rather than encoding it in the instruction itself.
+ const DIExpression *DIExpr = cast<DIExpression>(Expr);
+ DIExpr = DIExpression::append(DIExpr, {dwarf::DW_OP_deref});
+ return buildInstr(TargetOpcode::DBG_VALUE)
+ .addFrameIndex(FI)
+ .addReg(0)
+ .addMetadata(Variable)
+ .addMetadata(DIExpr);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
+ const MDNode *Variable,
+ const MDNode *Expr) {
+ assert(isa<DILocalVariable>(Variable) && "not a variable");
+ assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
+ auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
+ if (auto *CI = dyn_cast<ConstantInt>(&C)) {
+ if (CI->getBitWidth() > 64)
+ MIB.addCImm(CI);
+ else
+ MIB.addImm(CI->getZExtValue());
+ } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
+ MIB.addFPImm(CFP);
+ } else {
+ // Insert %noreg if we didn't find a usable constant and had to drop it.
+ MIB.addReg(0U);
+ }
+
+ return MIB.addReg(0).addMetadata(Variable).addMetadata(Expr);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
+ assert(isa<DILabel>(Label) && "not a label");
+ assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
+ "Expected inlined-at fields to agree");
+ auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
+
+ return MIB.addMetadata(Label);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
+ const SrcOp &Size,
+ unsigned Align) {
+ assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
+ auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
+ Res.addDefToMIB(*getMRI(), MIB);
+ Size.addSrcToMIB(MIB);
+ MIB.addImm(Align);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
+ int Idx) {
+ assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
+ auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
+ Res.addDefToMIB(*getMRI(), MIB);
+ MIB.addFrameIndex(Idx);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
+ const GlobalValue *GV) {
+ assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
+ assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
+ GV->getType()->getAddressSpace() &&
+ "address space mismatch");
+
+ auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
+ Res.addDefToMIB(*getMRI(), MIB);
+ MIB.addGlobalAddress(GV);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
+ unsigned JTI) {
+ return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
+ .addJumpTableIndex(JTI);
+}
+
+void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
+ const LLT &Op1) {
+ assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
+ assert((Res == Op0 && Res == Op1) && "type mismatch");
+}
+
+void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
+ const LLT &Op1) {
+ assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
+ assert((Res == Op0) && "type mismatch");
+}
+
+MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res,
+ const SrcOp &Op0,
+ const SrcOp &Op1) {
+ assert(Res.getLLTTy(*getMRI()).isPointer() &&
+ Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
+ assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
+
+ return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1});
+}
+
+Optional<MachineInstrBuilder>
+MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
+ const LLT &ValueTy, uint64_t Value) {
+ assert(Res == 0 && "Res is a result argument");
+ assert(ValueTy.isScalar() && "invalid offset type");
+
+ if (Value == 0) {
+ Res = Op0;
+ return None;
+ }
+
+ Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
+ auto Cst = buildConstant(ValueTy, Value);
+ return buildGEP(Res, Op0, Cst.getReg(0));
+}
+
+MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res,
+ const SrcOp &Op0,
+ uint32_t NumBits) {
+ assert(Res.getLLTTy(*getMRI()).isPointer() &&
+ Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
+
+ auto MIB = buildInstr(TargetOpcode::G_PTR_MASK);
+ Res.addDefToMIB(*getMRI(), MIB);
+ Op0.addSrcToMIB(MIB);
+ MIB.addImm(NumBits);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
+ return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
+ assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
+ return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
+ unsigned JTI,
+ Register IndexReg) {
+ assert(getMRI()->getType(TablePtr).isPointer() &&
+ "Table reg must be a pointer");
+ return buildInstr(TargetOpcode::G_BRJT)
+ .addUse(TablePtr)
+ .addJumpTableIndex(JTI)
+ .addUse(IndexReg);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildInstr(TargetOpcode::COPY, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
+ const ConstantInt &Val) {
+ LLT Ty = Res.getLLTTy(*getMRI());
+ LLT EltTy = Ty.getScalarType();
+ assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
+ "creating constant with the wrong size");
+
+ if (Ty.isVector()) {
+ auto Const = buildInstr(TargetOpcode::G_CONSTANT)
+ .addDef(getMRI()->createGenericVirtualRegister(EltTy))
+ .addCImm(&Val);
+ return buildSplatVector(Res, Const);
+ }
+
+ auto Const = buildInstr(TargetOpcode::G_CONSTANT);
+ Res.addDefToMIB(*getMRI(), Const);
+ Const.addCImm(&Val);
+ return Const;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
+ int64_t Val) {
+ auto IntN = IntegerType::get(getMF().getFunction().getContext(),
+ Res.getLLTTy(*getMRI()).getScalarSizeInBits());
+ ConstantInt *CI = ConstantInt::get(IntN, Val, true);
+ return buildConstant(Res, *CI);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
+ const ConstantFP &Val) {
+ LLT Ty = Res.getLLTTy(*getMRI());
+ LLT EltTy = Ty.getScalarType();
+
+ assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
+ == EltTy.getSizeInBits() &&
+ "creating fconstant with the wrong size");
+
+ assert(!Ty.isPointer() && "invalid operand type");
+
+ if (Ty.isVector()) {
+ auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
+ .addDef(getMRI()->createGenericVirtualRegister(EltTy))
+ .addFPImm(&Val);
+
+ return buildSplatVector(Res, Const);
+ }
+
+ auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
+ Res.addDefToMIB(*getMRI(), Const);
+ Const.addFPImm(&Val);
+ return Const;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
+ const APInt &Val) {
+ ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
+ return buildConstant(Res, *CI);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
+ double Val) {
+ LLT DstTy = Res.getLLTTy(*getMRI());
+ auto &Ctx = getMF().getFunction().getContext();
+ auto *CFP =
+ ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
+ return buildFConstant(Res, *CFP);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
+ const APFloat &Val) {
+ auto &Ctx = getMF().getFunction().getContext();
+ auto *CFP = ConstantFP::get(Ctx, Val);
+ return buildFConstant(Res, *CFP);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst,
+ MachineBasicBlock &Dest) {
+ assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
+
+ return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Res,
+ const SrcOp &Addr,
+ MachineMemOperand &MMO) {
+ return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
+ const DstOp &Res,
+ const SrcOp &Addr,
+ MachineMemOperand &MMO) {
+ assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
+ assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
+
+ auto MIB = buildInstr(Opcode);
+ Res.addDefToMIB(*getMRI(), MIB);
+ Addr.addSrcToMIB(MIB);
+ MIB.addMemOperand(&MMO);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
+ const SrcOp &Addr,
+ MachineMemOperand &MMO) {
+ assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
+ assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
+
+ auto MIB = buildInstr(TargetOpcode::G_STORE);
+ Val.addSrcToMIB(MIB);
+ Addr.addSrcToMIB(MIB);
+ MIB.addMemOperand(&MMO);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildUAddo(const DstOp &Res,
+ const DstOp &CarryOut,
+ const SrcOp &Op0,
+ const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
+}
+
+MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res,
+ const DstOp &CarryOut,
+ const SrcOp &Op0,
+ const SrcOp &Op1,
+ const SrcOp &CarryIn) {
+ return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
+ {Op0, Op1, CarryIn});
+}
+
+MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildInstr(TargetOpcode::G_SEXT, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
+}
+
+unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
+ const auto *TLI = getMF().getSubtarget().getTargetLowering();
+ switch (TLI->getBooleanContents(IsVec, IsFP)) {
+ case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
+ return TargetOpcode::G_SEXT;
+ case TargetLoweringBase::ZeroOrOneBooleanContent:
+ return TargetOpcode::G_ZEXT;
+ default:
+ return TargetOpcode::G_ANYEXT;
+ }
+}
+
+MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
+ const SrcOp &Op,
+ bool IsFP) {
+ unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
+ return buildInstr(ExtOp, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
+ const DstOp &Res,
+ const SrcOp &Op) {
+ assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
+ TargetOpcode::G_SEXT == ExtOpc) &&
+ "Expecting Extending Opc");
+ assert(Res.getLLTTy(*getMRI()).isScalar() ||
+ Res.getLLTTy(*getMRI()).isVector());
+ assert(Res.getLLTTy(*getMRI()).isScalar() ==
+ Op.getLLTTy(*getMRI()).isScalar());
+
+ unsigned Opcode = TargetOpcode::COPY;
+ if (Res.getLLTTy(*getMRI()).getSizeInBits() >
+ Op.getLLTTy(*getMRI()).getSizeInBits())
+ Opcode = ExtOpc;
+ else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
+ Op.getLLTTy(*getMRI()).getSizeInBits())
+ Opcode = TargetOpcode::G_TRUNC;
+ else
+ assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
+
+ return buildInstr(Opcode, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
+ const SrcOp &Src) {
+ LLT SrcTy = Src.getLLTTy(*getMRI());
+ LLT DstTy = Dst.getLLTTy(*getMRI());
+ if (SrcTy == DstTy)
+ return buildCopy(Dst, Src);
+
+ unsigned Opcode;
+ if (SrcTy.isPointer() && DstTy.isScalar())
+ Opcode = TargetOpcode::G_PTRTOINT;
+ else if (DstTy.isPointer() && SrcTy.isScalar())
+ Opcode = TargetOpcode::G_INTTOPTR;
+ else {
+ assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
+ Opcode = TargetOpcode::G_BITCAST;
+ }
+
+ return buildInstr(Opcode, Dst, Src);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
+ const SrcOp &Src,
+ uint64_t Index) {
+ LLT SrcTy = Src.getLLTTy(*getMRI());
+ LLT DstTy = Dst.getLLTTy(*getMRI());
+
+#ifndef NDEBUG
+ assert(SrcTy.isValid() && "invalid operand type");
+ assert(DstTy.isValid() && "invalid operand type");
+ assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
+ "extracting off end of register");
+#endif
+
+ if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
+ assert(Index == 0 && "insertion past the end of a register");
+ return buildCast(Dst, Src);
+ }
+
+ auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
+ Dst.addDefToMIB(*getMRI(), Extract);
+ Src.addSrcToMIB(Extract);
+ Extract.addImm(Index);
+ return Extract;
+}
+
+void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
+ ArrayRef<uint64_t> Indices) {
+#ifndef NDEBUG
+ assert(Ops.size() == Indices.size() && "incompatible args");
+ assert(!Ops.empty() && "invalid trivial sequence");
+ assert(std::is_sorted(Indices.begin(), Indices.end()) &&
+ "sequence offsets must be in ascending order");
+
+ assert(getMRI()->getType(Res).isValid() && "invalid operand type");
+ for (auto Op : Ops)
+ assert(getMRI()->getType(Op).isValid() && "invalid operand type");
+#endif
+
+ LLT ResTy = getMRI()->getType(Res);
+ LLT OpTy = getMRI()->getType(Ops[0]);
+ unsigned OpSize = OpTy.getSizeInBits();
+ bool MaybeMerge = true;
+ for (unsigned i = 0; i < Ops.size(); ++i) {
+ if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
+ MaybeMerge = false;
+ break;
+ }
+ }
+
+ if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
+ buildMerge(Res, Ops);
+ return;
+ }
+
+ Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
+ buildUndef(ResIn);
+
+ for (unsigned i = 0; i < Ops.size(); ++i) {
+ Register ResOut = i + 1 == Ops.size()
+ ? Res
+ : getMRI()->createGenericVirtualRegister(ResTy);
+ buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
+ ResIn = ResOut;
+ }
+}
+
+MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
+ return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
+}
+
+MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
+ ArrayRef<Register> Ops) {
+ // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
+ // we need some temporary storage for the DstOp objects. Here we use a
+ // sufficiently large SmallVector to not go through the heap.
+ SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
+ assert(TmpVec.size() > 1);
+ return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
+ const SrcOp &Op) {
+ // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
+ // we need some temporary storage for the DstOp objects. Here we use a
+ // sufficiently large SmallVector to not go through the heap.
+ SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
+ assert(TmpVec.size() > 1);
+ return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
+ const SrcOp &Op) {
+ unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
+ SmallVector<Register, 8> TmpVec;
+ for (unsigned I = 0; I != NumReg; ++I)
+ TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
+ return buildUnmerge(TmpVec, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
+ const SrcOp &Op) {
+ // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
+ // we need some temporary storage for the DstOp objects. Here we use a
+ // sufficiently large SmallVector to not go through the heap.
+ SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
+ assert(TmpVec.size() > 1);
+ return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
+ ArrayRef<Register> Ops) {
+ // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
+ // we need some temporary storage for the DstOp objects. Here we use a
+ // sufficiently large SmallVector to not go through the heap.
+ SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
+ return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
+ const SrcOp &Src) {
+ SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
+ return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
+ ArrayRef<Register> Ops) {
+ // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
+ // we need some temporary storage for the DstOp objects. Here we use a
+ // sufficiently large SmallVector to not go through the heap.
+ SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
+ return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
+ // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
+ // we need some temporary storage for the DstOp objects. Here we use a
+ // sufficiently large SmallVector to not go through the heap.
+ SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
+ return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
+ Register Op, unsigned Index) {
+ assert(Index + getMRI()->getType(Op).getSizeInBits() <=
+ getMRI()->getType(Res).getSizeInBits() &&
+ "insertion past the end of a register");
+
+ if (getMRI()->getType(Res).getSizeInBits() ==
+ getMRI()->getType(Op).getSizeInBits()) {
+ return buildCast(Res, Op);
+ }
+
+ return buildInstr(TargetOpcode::G_INSERT)
+ .addDef(Res)
+ .addUse(Src)
+ .addUse(Op)
+ .addImm(Index);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
+ ArrayRef<Register> ResultRegs,
+ bool HasSideEffects) {
+ auto MIB =
+ buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
+ : TargetOpcode::G_INTRINSIC);
+ for (unsigned ResultReg : ResultRegs)
+ MIB.addDef(ResultReg);
+ MIB.addIntrinsicID(ID);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
+ ArrayRef<DstOp> Results,
+ bool HasSideEffects) {
+ auto MIB =
+ buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
+ : TargetOpcode::G_INTRINSIC);
+ for (DstOp Result : Results)
+ Result.addDefToMIB(*getMRI(), MIB);
+ MIB.addIntrinsicID(ID);
+ return MIB;
+}
+
+MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
+ const SrcOp &Op) {
+ return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
+ const DstOp &Res,
+ const SrcOp &Op0,
+ const SrcOp &Op1) {
+ return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
+}
+
+MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
+ const DstOp &Res,
+ const SrcOp &Op0,
+ const SrcOp &Op1,
+ Optional<unsigned> Flags) {
+
+ return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
+ const SrcOp &Tst,
+ const SrcOp &Op0,
+ const SrcOp &Op1,
+ Optional<unsigned> Flags) {
+
+ return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
+ const SrcOp &Elt, const SrcOp &Idx) {
+ return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
+ const SrcOp &Idx) {
+ return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
+}
+
+MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
+ Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
+ Register NewVal, MachineMemOperand &MMO) {
+#ifndef NDEBUG
+ LLT OldValResTy = getMRI()->getType(OldValRes);
+ LLT SuccessResTy = getMRI()->getType(SuccessRes);
+ LLT AddrTy = getMRI()->getType(Addr);
+ LLT CmpValTy = getMRI()->getType(CmpVal);
+ LLT NewValTy = getMRI()->getType(NewVal);
+ assert(OldValResTy.isScalar() && "invalid operand type");
+ assert(SuccessResTy.isScalar() && "invalid operand type");
+ assert(AddrTy.isPointer() && "invalid operand type");
+ assert(CmpValTy.isValid() && "invalid operand type");
+ assert(NewValTy.isValid() && "invalid operand type");
+ assert(OldValResTy == CmpValTy && "type mismatch");
+ assert(OldValResTy == NewValTy && "type mismatch");
+#endif
+
+ return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
+ .addDef(OldValRes)
+ .addDef(SuccessRes)
+ .addUse(Addr)
+ .addUse(CmpVal)
+ .addUse(NewVal)
+ .addMemOperand(&MMO);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
+ Register CmpVal, Register NewVal,
+ MachineMemOperand &MMO) {
+#ifndef NDEBUG
+ LLT OldValResTy = getMRI()->getType(OldValRes);
+ LLT AddrTy = getMRI()->getType(Addr);
+ LLT CmpValTy = getMRI()->getType(CmpVal);
+ LLT NewValTy = getMRI()->getType(NewVal);
+ assert(OldValResTy.isScalar() && "invalid operand type");
+ assert(AddrTy.isPointer() && "invalid operand type");
+ assert(CmpValTy.isValid() && "invalid operand type");
+ assert(NewValTy.isValid() && "invalid operand type");
+ assert(OldValResTy == CmpValTy && "type mismatch");
+ assert(OldValResTy == NewValTy && "type mismatch");
+#endif
+
+ return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
+ .addDef(OldValRes)
+ .addUse(Addr)
+ .addUse(CmpVal)
+ .addUse(NewVal)
+ .addMemOperand(&MMO);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
+ unsigned Opcode, const DstOp &OldValRes,
+ const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO) {
+
+#ifndef NDEBUG
+ LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
+ LLT AddrTy = Addr.getLLTTy(*getMRI());
+ LLT ValTy = Val.getLLTTy(*getMRI());
+ assert(OldValResTy.isScalar() && "invalid operand type");
+ assert(AddrTy.isPointer() && "invalid operand type");
+ assert(ValTy.isValid() && "invalid operand type");
+ assert(OldValResTy == ValTy && "type mismatch");
+ assert(MMO.isAtomic() && "not atomic mem operand");
+#endif
+
+ auto MIB = buildInstr(Opcode);
+ OldValRes.addDefToMIB(*getMRI(), MIB);
+ Addr.addSrcToMIB(MIB);
+ Val.addSrcToMIB(MIB);
+ MIB.addMemOperand(&MMO);
+ return MIB;
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
+ Register Addr,
+ Register Val,
+ MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
+ Register Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
+ MMO);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWFAdd(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
+ MMO);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
+ MMO);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
+ return buildInstr(TargetOpcode::G_FENCE)
+ .addImm(Ordering)
+ .addImm(Scope);
+}
+
+MachineInstrBuilder
+MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
+#ifndef NDEBUG
+ assert(getMRI()->getType(Res).isPointer() && "invalid res type");
+#endif
+
+ return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
+}
+
+void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
+ bool IsExtend) {
+#ifndef NDEBUG
+ if (DstTy.isVector()) {
+ assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
+ assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
+ "different number of elements in a trunc/ext");
+ } else
+ assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
+
+ if (IsExtend)
+ assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
+ "invalid narrowing extend");
+ else
+ assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
+ "invalid widening trunc");
+#endif
+}
+
+void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
+ const LLT &Op0Ty, const LLT &Op1Ty) {
+#ifndef NDEBUG
+ assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
+ "invalid operand type");
+ assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
+ if (ResTy.isScalar() || ResTy.isPointer())
+ assert(TstTy.isScalar() && "type mismatch");
+ else
+ assert((TstTy.isScalar() ||
+ (TstTy.isVector() &&
+ TstTy.getNumElements() == Op0Ty.getNumElements())) &&
+ "type mismatch");
+#endif
+}
+
+MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
+ ArrayRef<DstOp> DstOps,
+ ArrayRef<SrcOp> SrcOps,
+ Optional<unsigned> Flags) {
+ switch (Opc) {
+ default:
+ break;
+ case TargetOpcode::G_SELECT: {
+ assert(DstOps.size() == 1 && "Invalid select");
+ assert(SrcOps.size() == 3 && "Invalid select");
+ validateSelectOp(
+ DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
+ SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
+ break;
+ }
+ case TargetOpcode::G_ADD:
+ case TargetOpcode::G_AND:
+ case TargetOpcode::G_MUL:
+ case TargetOpcode::G_OR:
+ case TargetOpcode::G_SUB:
+ case TargetOpcode::G_XOR:
+ case TargetOpcode::G_UDIV:
+ case TargetOpcode::G_SDIV:
+ case TargetOpcode::G_UREM:
+ case TargetOpcode::G_SREM:
+ case TargetOpcode::G_SMIN:
+ case TargetOpcode::G_SMAX:
+ case TargetOpcode::G_UMIN:
+ case TargetOpcode::G_UMAX: {
+ // All these are binary ops.
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(SrcOps.size() == 2 && "Invalid Srcs");
+ validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
+ SrcOps[0].getLLTTy(*getMRI()),
+ SrcOps[1].getLLTTy(*getMRI()));
+ break;
+ }
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR: {
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(SrcOps.size() == 2 && "Invalid Srcs");
+ validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
+ SrcOps[0].getLLTTy(*getMRI()),
+ SrcOps[1].getLLTTy(*getMRI()));
+ break;
+ }
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_ANYEXT:
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(SrcOps.size() == 1 && "Invalid Srcs");
+ validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
+ SrcOps[0].getLLTTy(*getMRI()), true);
+ break;
+ case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_FPTRUNC: {
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(SrcOps.size() == 1 && "Invalid Srcs");
+ validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
+ SrcOps[0].getLLTTy(*getMRI()), false);
+ break;
+ }
+ case TargetOpcode::COPY:
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ // If the caller wants to add a subreg source it has to be done separately
+ // so we may not have any SrcOps at this point yet.
+ break;
+ case TargetOpcode::G_FCMP:
+ case TargetOpcode::G_ICMP: {
+ assert(DstOps.size() == 1 && "Invalid Dst Operands");
+ assert(SrcOps.size() == 3 && "Invalid Src Operands");
+ // For F/ICMP, the first src operand is the predicate, followed by
+ // the two comparands.
+ assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
+ "Expecting predicate");
+ assert([&]() -> bool {
+ CmpInst::Predicate Pred = SrcOps[0].getPredicate();
+ return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
+ : CmpInst::isFPPredicate(Pred);
+ }() && "Invalid predicate");
+ assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
+ "Type mismatch");
+ assert([&]() -> bool {
+ LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
+ LLT DstTy = DstOps[0].getLLTTy(*getMRI());
+ if (Op0Ty.isScalar() || Op0Ty.isPointer())
+ return DstTy.isScalar();
+ else
+ return DstTy.isVector() &&
+ DstTy.getNumElements() == Op0Ty.getNumElements();
+ }() && "Type Mismatch");
+ break;
+ }
+ case TargetOpcode::G_UNMERGE_VALUES: {
+ assert(!DstOps.empty() && "Invalid trivial sequence");
+ assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
+ assert(std::all_of(DstOps.begin(), DstOps.end(),
+ [&, this](const DstOp &Op) {
+ return Op.getLLTTy(*getMRI()) ==
+ DstOps[0].getLLTTy(*getMRI());
+ }) &&
+ "type mismatch in output list");
+ assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
+ "input operands do not cover output register");
+ break;
+ }
+ case TargetOpcode::G_MERGE_VALUES: {
+ assert(!SrcOps.empty() && "invalid trivial sequence");
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(std::all_of(SrcOps.begin(), SrcOps.end(),
+ [&, this](const SrcOp &Op) {
+ return Op.getLLTTy(*getMRI()) ==
+ SrcOps[0].getLLTTy(*getMRI());
+ }) &&
+ "type mismatch in input list");
+ assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
+ "input operands do not cover output register");
+ if (SrcOps.size() == 1)
+ return buildCast(DstOps[0], SrcOps[0]);
+ if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
+ if (SrcOps[0].getLLTTy(*getMRI()).isVector())
+ return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
+ return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
+ }
+ break;
+ }
+ case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
+ assert(DstOps.size() == 1 && "Invalid Dst size");
+ assert(SrcOps.size() == 2 && "Invalid Src size");
+ assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
+ assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
+ DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
+ "Invalid operand type");
+ assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
+ assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
+ DstOps[0].getLLTTy(*getMRI()) &&
+ "Type mismatch");
+ break;
+ }
+ case TargetOpcode::G_INSERT_VECTOR_ELT: {
+ assert(DstOps.size() == 1 && "Invalid dst size");
+ assert(SrcOps.size() == 3 && "Invalid src size");
+ assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
+ SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
+ assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
+ SrcOps[1].getLLTTy(*getMRI()) &&
+ "Type mismatch");
+ assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
+ assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
+ SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
+ "Type mismatch");
+ break;
+ }
+ case TargetOpcode::G_BUILD_VECTOR: {
+ assert((!SrcOps.empty() || SrcOps.size() < 2) &&
+ "Must have at least 2 operands");
+ assert(DstOps.size() == 1 && "Invalid DstOps");
+ assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
+ "Res type must be a vector");
+ assert(std::all_of(SrcOps.begin(), SrcOps.end(),
+ [&, this](const SrcOp &Op) {
+ return Op.getLLTTy(*getMRI()) ==
+ SrcOps[0].getLLTTy(*getMRI());
+ }) &&
+ "type mismatch in input list");
+ assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
+ "input scalars do not exactly cover the output vector register");
+ break;
+ }
+ case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
+ assert((!SrcOps.empty() || SrcOps.size() < 2) &&
+ "Must have at least 2 operands");
+ assert(DstOps.size() == 1 && "Invalid DstOps");
+ assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
+ "Res type must be a vector");
+ assert(std::all_of(SrcOps.begin(), SrcOps.end(),
+ [&, this](const SrcOp &Op) {
+ return Op.getLLTTy(*getMRI()) ==
+ SrcOps[0].getLLTTy(*getMRI());
+ }) &&
+ "type mismatch in input list");
+ if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
+ return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
+ break;
+ }
+ case TargetOpcode::G_CONCAT_VECTORS: {
+ assert(DstOps.size() == 1 && "Invalid DstOps");
+ assert((!SrcOps.empty() || SrcOps.size() < 2) &&
+ "Must have at least 2 operands");
+ assert(std::all_of(SrcOps.begin(), SrcOps.end(),
+ [&, this](const SrcOp &Op) {
+ return (Op.getLLTTy(*getMRI()).isVector() &&
+ Op.getLLTTy(*getMRI()) ==
+ SrcOps[0].getLLTTy(*getMRI()));
+ }) &&
+ "type mismatch in input list");
+ assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
+ DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
+ "input vectors do not exactly cover the output vector register");
+ break;
+ }
+ case TargetOpcode::G_UADDE: {
+ assert(DstOps.size() == 2 && "Invalid no of dst operands");
+ assert(SrcOps.size() == 3 && "Invalid no of src operands");
+ assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
+ assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
+ (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
+ "Invalid operand");
+ assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
+ assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
+ "type mismatch");
+ break;
+ }
+ }
+
+ auto MIB = buildInstr(Opc);
+ for (const DstOp &Op : DstOps)
+ Op.addDefToMIB(*getMRI(), MIB);
+ for (const SrcOp &Op : SrcOps)
+ Op.addSrcToMIB(MIB);
+ if (Flags)
+ MIB->setFlags(*Flags);
+ return MIB;
+}