summaryrefslogtreecommitdiff
path: root/include/llvm/CodeGen/GlobalISel
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/CodeGen/GlobalISel')
-rw-r--r--include/llvm/CodeGen/GlobalISel/CallLowering.h2
-rw-r--r--include/llvm/CodeGen/GlobalISel/GISelAccessor.h39
-rw-r--r--include/llvm/CodeGen/GlobalISel/GISelWorkList.h69
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelector.h124
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h728
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h287
-rw-r--r--include/llvm/CodeGen/GlobalISel/Legalizer.h3
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerHelper.h3
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerInfo.h378
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h146
-rw-r--r--include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h13
-rw-r--r--include/llvm/CodeGen/GlobalISel/Utils.h6
12 files changed, 1477 insertions, 321 deletions
diff --git a/include/llvm/CodeGen/GlobalISel/CallLowering.h b/include/llvm/CodeGen/GlobalISel/CallLowering.h
index e7ce1946889e..ba84d76de164 100644
--- a/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -18,10 +18,10 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetCallingConv.h"
#include <cstdint>
#include <functional>
diff --git a/include/llvm/CodeGen/GlobalISel/GISelAccessor.h b/include/llvm/CodeGen/GlobalISel/GISelAccessor.h
deleted file mode 100644
index 8dea38059ea4..000000000000
--- a/include/llvm/CodeGen/GlobalISel/GISelAccessor.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===-- GISelAccessor.h - GISel Accessor ------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// This file declares the API to access the various APIs related
-/// to GlobalISel.
-//
-//===----------------------------------------------------------------------===/
-
-#ifndef LLVM_CODEGEN_GLOBALISEL_GISELACCESSOR_H
-#define LLVM_CODEGEN_GLOBALISEL_GISELACCESSOR_H
-
-namespace llvm {
-class CallLowering;
-class InstructionSelector;
-class LegalizerInfo;
-class RegisterBankInfo;
-
-/// The goal of this helper class is to gather the accessor to all
-/// the APIs related to GlobalISel.
-/// It should be derived to feature an actual accessor to the GISel APIs.
-/// The reason why this is not simply done into the subtarget is to avoid
-/// spreading ifdefs around.
-struct GISelAccessor {
- virtual ~GISelAccessor() {}
- virtual const CallLowering *getCallLowering() const { return nullptr;}
- virtual const InstructionSelector *getInstructionSelector() const {
- return nullptr;
- }
- virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }
- virtual const RegisterBankInfo *getRegBankInfo() const { return nullptr;}
-};
-} // End namespace llvm;
-#endif
diff --git a/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
new file mode 100644
index 000000000000..167905dc9aa1
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -0,0 +1,69 @@
+//===- GISelWorkList.h - Worklist for GISel passes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_GISEL_WORKLIST_H
+#define LLVM_GISEL_WORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+// Worklist which mostly works similar to InstCombineWorkList, but on MachineInstrs.
+// The main difference with something like a SetVector is that erasing an element doesn't
+// move all elements over one place - instead just nulls out the element of the vector.
+// FIXME: Does it make sense to factor out common code with the instcombinerWorkList?
+template<unsigned N>
+class GISelWorkList {
+ SmallVector<MachineInstr*, N> Worklist;
+ DenseMap<MachineInstr*, unsigned> WorklistMap;
+
+public:
+ GISelWorkList() = default;
+
+ bool empty() const { return WorklistMap.empty(); }
+
+ unsigned size() const { return WorklistMap.size(); }
+
+ /// Add - Add the specified instruction to the worklist if it isn't already
+ /// in it.
+ void insert(MachineInstr *I) {
+ if (WorklistMap.try_emplace(I, Worklist.size()).second) {
+ Worklist.push_back(I);
+ }
+ }
+
+ /// Remove - remove I from the worklist if it exists.
+ void remove(MachineInstr *I) {
+ auto It = WorklistMap.find(I);
+ if (It == WorklistMap.end()) return; // Not in worklist.
+
+ // Don't bother moving everything down, just null out the slot.
+ Worklist[It->second] = nullptr;
+
+ WorklistMap.erase(It);
+ }
+
+ MachineInstr *pop_back_val() {
+ MachineInstr *I;
+ do {
+ I = Worklist.pop_back_val();
+ } while(!I);
+ assert(I && "Pop back on empty worklist");
+ WorklistMap.erase(I);
+ return I;
+ }
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 1060d8fd667e..e599a1b179ec 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -16,7 +16,10 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CodeGenCoverage.h"
#include <bitset>
#include <cstddef>
#include <cstdint>
@@ -26,9 +29,12 @@
namespace llvm {
+class APInt;
+class APFloat;
class LLT;
class MachineInstr;
class MachineInstrBuilder;
+class MachineFunction;
class MachineOperand;
class MachineRegisterInfo;
class RegisterBankInfo;
@@ -63,6 +69,18 @@ public:
};
enum {
+ /// Begin a try-block to attempt a match and jump to OnFail if it is
+ /// unsuccessful.
+ /// - OnFail - The MatchTable entry at which to resume if the match fails.
+ ///
+ /// FIXME: This ought to take an argument indicating the number of try-blocks
+ /// to exit on failure. It's usually one but the last match attempt of
+ /// a block will need more. The (implemented) alternative is to tack a
+ /// GIM_Reject on the end of each try-block which is simpler but
+ /// requires an extra opcode and iteration in the interpreter on each
+ /// failed match.
+ GIM_Try,
+
/// Record the specified instruction
/// - NewInsnID - Instruction ID to define
/// - InsnID - Instruction ID
@@ -81,12 +99,35 @@ enum {
/// - InsnID - Instruction ID
/// - Expected number of operands
GIM_CheckNumOperands,
+ /// Check an immediate predicate on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - The predicate to test
+ GIM_CheckI64ImmPredicate,
+ /// Check an immediate predicate on the specified instruction via an APInt.
+ /// - InsnID - Instruction ID
+ /// - The predicate to test
+ GIM_CheckAPIntImmPredicate,
+ /// Check a floating point immediate predicate on the specified instruction.
+ /// - InsnID - Instruction ID
+ /// - The predicate to test
+ GIM_CheckAPFloatImmPredicate,
+ /// Check a memory operation has the specified atomic ordering.
+ /// - InsnID - Instruction ID
+ /// - Ordering - The AtomicOrdering value
+ GIM_CheckAtomicOrdering,
+ GIM_CheckAtomicOrderingOrStrongerThan,
+ GIM_CheckAtomicOrderingWeakerThan,
/// Check the type for the specified operand
/// - InsnID - Instruction ID
/// - OpIdx - Operand index
/// - Expected type
GIM_CheckType,
+ /// Check the type of a pointer to any address space.
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - SizeInBits - The size of the pointer value in bits.
+ GIM_CheckPointerToAny,
/// Check the register bank for the specified operand
/// - InsnID - Instruction ID
/// - OpIdx - Operand index
@@ -124,6 +165,17 @@ enum {
/// - InsnID - Instruction ID
GIM_CheckIsSafeToFold,
+ /// Check the specified operands are identical.
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - OtherInsnID - Other instruction ID
+ /// - OtherOpIdx - Other operand index
+ GIM_CheckIsSameOperand,
+
+ /// Fail the current try-block, or completely fail to match if there is no
+ /// current try-block.
+ GIM_Reject,
+
//=== Renderers ===
/// Mutate an instruction
@@ -141,6 +193,13 @@ enum {
/// - OldInsnID - Instruction ID to copy from
/// - OpIdx - The operand to copy
GIR_Copy,
+ /// Copy an operand to the specified instruction or add a zero register if the
+ /// operand is a zero immediate.
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ /// - ZeroReg - The zero register to use
+ GIR_CopyOrAddZeroReg,
/// Copy an operand to the specified instruction
/// - NewInsnID - Instruction ID to modify
/// - OldInsnID - Instruction ID to copy from
@@ -159,6 +218,10 @@ enum {
/// - InsnID - Instruction ID to modify
/// - RegNum - The register to add
GIR_AddRegister,
+ /// Add a a temporary register to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - TempRegID - The temporary register ID to add
+ GIR_AddTempRegister,
/// Add an immediate to the specified instruction
/// - InsnID - Instruction ID to modify
/// - Imm - The immediate to add
@@ -167,6 +230,17 @@ enum {
/// - InsnID - Instruction ID to modify
/// - RendererID - The renderer to call
GIR_ComplexRenderer,
+ /// Render sub-operands of complex operands to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RendererID - The renderer to call
+ /// - RenderOpID - The suboperand to render.
+ GIR_ComplexSubOperandRenderer,
+
+ /// Render a G_CONSTANT operator as a sign-extended immediate.
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// The operand index is implicitly 1.
+ GIR_CopyConstantAsSImm,
/// Constrain an instruction operand to a register class.
/// - InsnID - Instruction ID to modify
@@ -179,18 +253,39 @@ enum {
GIR_ConstrainSelectedInstOperands,
/// Merge all memory operands into instruction.
/// - InsnID - Instruction ID to modify
+ /// - MergeInsnID... - One or more Instruction ID to merge into the result.
+ /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
+ /// merge.
GIR_MergeMemOperands,
/// Erase from parent.
/// - InsnID - Instruction ID to erase
GIR_EraseFromParent,
+ /// Create a new temporary register that's not constrained.
+ /// - TempRegID - The temporary register ID to initialize.
+ /// - Expected type
+ GIR_MakeTempReg,
/// A successful emission
GIR_Done,
+
+ /// Increment the rule coverage counter.
+ /// - RuleID - The ID of the rule that was covered.
+ GIR_Coverage,
+};
+
+enum {
+ /// Indicates the end of the variable-length MergeInsnID list in a
+ /// GIR_MergeMemOperands opcode.
+ GIU_MergeMemOperands_EndOfList = -1,
};
/// Provides the logic to select generic machine instructions.
class InstructionSelector {
public:
+ using I64ImmediatePredicateFn = bool (*)(int64_t);
+ using APIntImmediatePredicateFn = bool (*)(const APInt &);
+ using APFloatImmediatePredicateFn = bool (*)(const APFloat &);
+
virtual ~InstructionSelector() = default;
/// Select the (possibly generic) instruction \p I to only use target-specific
@@ -203,17 +298,18 @@ public:
/// if returns true:
/// for I in all mutated/inserted instructions:
/// !isPreISelGenericOpcode(I.getOpcode())
- ///
- virtual bool select(MachineInstr &I) const = 0;
+ virtual bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const = 0;
protected:
- using ComplexRendererFn = std::function<void(MachineInstrBuilder &)>;
+ using ComplexRendererFns =
+ Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
using RecordedMIVector = SmallVector<MachineInstr *, 4>;
using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
struct MatcherState {
- std::vector<ComplexRendererFn> Renderers;
+ std::vector<ComplexRendererFns::value_type> Renderers;
RecordedMIVector MIs;
+ DenseMap<unsigned, unsigned> TempRegisters;
MatcherState(unsigned MaxRenderers);
};
@@ -223,7 +319,10 @@ public:
struct MatcherInfoTy {
const LLT *TypeObjects;
const PredicateBitset *FeatureBitsets;
- const std::vector<ComplexMatcherMemFn> ComplexPredicates;
+ const I64ImmediatePredicateFn *I64ImmPredicateFns;
+ const APIntImmediatePredicateFn *APIntImmPredicateFns;
+ const APFloatImmediatePredicateFn *APFloatImmPredicateFns;
+ const ComplexMatcherMemFn *ComplexPredicates;
};
protected:
@@ -238,8 +337,8 @@ protected:
const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
const int64_t *MatchTable, const TargetInstrInfo &TII,
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI,
- const PredicateBitset &AvailableFeatures) const;
+ const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+ CodeGenCoverage &CoverageInfo) const;
/// Constrain a register operand of an instruction \p I to a specified
/// register class. This could involve inserting COPYs before (for uses) or
@@ -268,7 +367,16 @@ protected:
bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const;
- bool isObviouslySafeToFold(MachineInstr &MI) const;
+ /// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
+ /// right-hand side. GlobalISel's separation of pointer and integer types
+ /// means that we don't need to worry about G_OR with equivalent semantics.
+ bool isBaseWithConstantOffset(const MachineOperand &Root,
+ const MachineRegisterInfo &MRI) const;
+
+ /// Return true if MI can obviously be folded into IntoMI.
+ /// MI and IntoMI do not need to be in the same basic blocks, but MI must
+ /// preceed IntoMI.
+ bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
};
} // end namespace llvm
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index 98b6b859b9e2..ac2c055ab145 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -1,4 +1,4 @@
-//==-- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h ---------*- C++ -*-==//
+//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,7 +16,32 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
namespace llvm {
+
+/// GlobalISel PatFrag Predicates
+enum {
+ GIPFP_I64_Invalid = 0,
+ GIPFP_APInt_Invalid = 0,
+ GIPFP_APFloat_Invalid = 0,
+};
+
template <class TgtInstructionSelector, class PredicateBitset,
class ComplexMatcherMemFn>
bool InstructionSelector::executeMatchTable(
@@ -24,306 +49,687 @@ bool InstructionSelector::executeMatchTable(
const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
const int64_t *MatchTable, const TargetInstrInfo &TII,
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI,
- const PredicateBitset &AvailableFeatures) const {
- const int64_t *Command = MatchTable;
+ const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+ CodeGenCoverage &CoverageInfo) const {
+ uint64_t CurrentIdx = 0;
+ SmallVector<uint64_t, 8> OnFailResumeAt;
+
+ enum RejectAction { RejectAndGiveUp, RejectAndResume };
+ auto handleReject = [&]() -> RejectAction {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Rejected\n");
+ if (OnFailResumeAt.empty())
+ return RejectAndGiveUp;
+ CurrentIdx = OnFailResumeAt.back();
+ OnFailResumeAt.pop_back();
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
+ << OnFailResumeAt.size() << " try-blocks remain)\n");
+ return RejectAndResume;
+ };
+
while (true) {
- switch (*Command++) {
+ assert(CurrentIdx != ~0u && "Invalid MatchTable index");
+ switch (MatchTable[CurrentIdx++]) {
+ case GIM_Try: {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Begin try-block\n");
+ OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
+ break;
+ }
+
case GIM_RecordInsn: {
- int64_t NewInsnID = *Command++;
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
// As an optimisation we require that MIs[0] is always the root. Refuse
// any attempt to modify it.
assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
- (void)NewInsnID;
MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
if (!MO.isReg()) {
- DEBUG(dbgs() << "Rejected (not a register)\n");
- return false;
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Not a register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
}
if (TRI.isPhysicalRegister(MO.getReg())) {
- DEBUG(dbgs() << "Rejected (is a physical register)\n");
- return false;
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": Is a physical register\n");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
}
- assert((size_t)NewInsnID == State.MIs.size() &&
- "Expected to store MIs in order");
- State.MIs.push_back(MRI.getVRegDef(MO.getReg()));
- DEBUG(dbgs() << "MIs[" << NewInsnID << "] = GIM_RecordInsn(" << InsnID
- << ", " << OpIdx << ")\n");
+ MachineInstr *NewMI = MRI.getVRegDef(MO.getReg());
+ if ((size_t)NewInsnID < State.MIs.size())
+ State.MIs[NewInsnID] = NewMI;
+ else {
+ assert((size_t)NewInsnID == State.MIs.size() &&
+ "Expected to store MIs in order");
+ State.MIs.push_back(NewMI);
+ }
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": MIs[" << NewInsnID
+ << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
+ << ")\n");
break;
}
case GIM_CheckFeatures: {
- int64_t ExpectedBitsetID = *Command++;
- DEBUG(dbgs() << "GIM_CheckFeatures(ExpectedBitsetID=" << ExpectedBitsetID
- << ")\n");
+ int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckFeatures(ExpectedBitsetID="
+ << ExpectedBitsetID << ")\n");
if ((AvailableFeatures & MatcherInfo.FeatureBitsets[ExpectedBitsetID]) !=
MatcherInfo.FeatureBitsets[ExpectedBitsetID]) {
- DEBUG(dbgs() << "Rejected\n");
- return false;
+ if (handleReject() == RejectAndGiveUp)
+ return false;
}
break;
}
case GIM_CheckOpcode: {
- int64_t InsnID = *Command++;
- int64_t Expected = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Expected = MatchTable[CurrentIdx++];
unsigned Opcode = State.MIs[InsnID]->getOpcode();
- DEBUG(dbgs() << "GIM_CheckOpcode(MIs[" << InsnID << "], ExpectedOpcode="
- << Expected << ") // Got=" << Opcode << "\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+ << "], ExpectedOpcode=" << Expected
+ << ") // Got=" << Opcode << "\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (Opcode != Expected)
- return false;
+ if (Opcode != Expected) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
+
case GIM_CheckNumOperands: {
- int64_t InsnID = *Command++;
- int64_t Expected = *Command++;
- DEBUG(dbgs() << "GIM_CheckNumOperands(MIs[" << InsnID
- << "], Expected=" << Expected << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Expected = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
+ << InsnID << "], Expected=" << Expected << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (State.MIs[InsnID]->getNumOperands() != Expected)
- return false;
+ if (State.MIs[InsnID]->getNumOperands() != Expected) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckI64ImmPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckI64ImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+ "Expected G_CONSTANT");
+ assert(Predicate > GIPFP_I64_Invalid && "Expected a valid predicate");
+ int64_t Value = 0;
+ if (State.MIs[InsnID]->getOperand(1).isCImm())
+ Value = State.MIs[InsnID]->getOperand(1).getCImm()->getSExtValue();
+ else if (State.MIs[InsnID]->getOperand(1).isImm())
+ Value = State.MIs[InsnID]->getOperand(1).getImm();
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+
+ if (!MatcherInfo.I64ImmPredicateFns[Predicate](Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
break;
}
+ case GIM_CheckAPIntImmPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() && "Expected G_CONSTANT");
+ assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
+ APInt Value;
+ if (State.MIs[InsnID]->getOperand(1).isCImm())
+ Value = State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+
+ if (!MatcherInfo.APIntImmPredicateFns[Predicate](Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAPFloatImmPredicate: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Predicate = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
+ << InsnID << "], Predicate=" << Predicate << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+ "Expected G_FCONSTANT");
+ assert(State.MIs[InsnID]->getOperand(1).isFPImm() && "Expected FPImm operand");
+ assert(Predicate > GIPFP_APFloat_Invalid && "Expected a valid predicate");
+ APFloat Value = State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
+
+ if (!MatcherInfo.APFloatImmPredicateFns[Predicate](Value))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrdering: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (MMO->getOrdering() != Ordering)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrderingOrStrongerThan: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (!isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
+ case GIM_CheckAtomicOrderingWeakerThan: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ if (!State.MIs[InsnID]->hasOneMemOperand())
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+
+ for (const auto &MMO : State.MIs[InsnID]->memoperands())
+ if (!isStrongerThan(Ordering, MMO->getOrdering()))
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
+ }
case GIM_CheckType: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t TypeID = *Command++;
- DEBUG(dbgs() << "GIM_CheckType(MIs[" << InsnID << "]->getOperand("
- << OpIdx << "), TypeID=" << TypeID << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t TypeID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), TypeID=" << TypeID << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
if (MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg()) !=
- MatcherInfo.TypeObjects[TypeID])
- return false;
+ MatcherInfo.TypeObjects[TypeID]) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckPointerToAny: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t SizeInBits = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), SizeInBits=" << SizeInBits << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ // iPTR must be looked up in the target.
+ if (SizeInBits == 0) {
+ MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
+ SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
+ }
+
+ assert(SizeInBits != 0 && "Pointer size must be known");
+
+ const LLT &Ty = MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg());
+ if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
case GIM_CheckRegBankForClass: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t RCEnum = *Command++;
- DEBUG(dbgs() << "GIM_CheckRegBankForClass(MIs[" << InsnID
- << "]->getOperand(" << OpIdx << "), RCEnum=" << RCEnum
- << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RCEnum = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), RCEnum=" << RCEnum << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
if (&RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
- RBI.getRegBank(State.MIs[InsnID]->getOperand(OpIdx).getReg(), MRI, TRI))
- return false;
+ RBI.getRegBank(State.MIs[InsnID]->getOperand(OpIdx).getReg(), MRI,
+ TRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
+
case GIM_CheckComplexPattern: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t RendererID = *Command++;
- int64_t ComplexPredicateID = *Command++;
- DEBUG(dbgs() << "State.Renderers[" << RendererID
- << "] = GIM_CheckComplexPattern(MIs[" << InsnID
- << "]->getOperand(" << OpIdx
- << "), ComplexPredicateID=" << ComplexPredicateID << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RendererID = MatchTable[CurrentIdx++];
+ int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
+ << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), ComplexPredicateID=" << ComplexPredicateID
+ << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
// FIXME: Use std::invoke() when it's available.
- if (!(State.Renderers[RendererID] =
- (ISel.*MatcherInfo.ComplexPredicates[ComplexPredicateID])(
- State.MIs[InsnID]->getOperand(OpIdx))))
- return false;
+ ComplexRendererFns Renderer =
+ (ISel.*MatcherInfo.ComplexPredicates[ComplexPredicateID])(
+ State.MIs[InsnID]->getOperand(OpIdx));
+ if (Renderer.hasValue())
+ State.Renderers[RendererID] = Renderer.getValue();
+ else
+ if (handleReject() == RejectAndGiveUp)
+ return false;
break;
}
+
case GIM_CheckConstantInt: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t Value = *Command++;
- DEBUG(dbgs() << "GIM_CheckConstantInt(MIs[" << InsnID << "]->getOperand("
- << OpIdx << "), Value=" << Value << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!isOperandImmEqual(State.MIs[InsnID]->getOperand(OpIdx), Value, MRI))
- return false;
+
+ // isOperandImmEqual() will sign-extend to 64-bits, so should we.
+ LLT Ty = MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg());
+ Value = SignExtend64(Value, Ty.getSizeInBits());
+
+ if (!isOperandImmEqual(State.MIs[InsnID]->getOperand(OpIdx), Value,
+ MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
+
case GIM_CheckLiteralInt: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t Value = *Command++;
- DEBUG(dbgs() << "GIM_CheckLiteralInt(MIs[" << InsnID << "]->getOperand(" << OpIdx
- << "), Value=" << Value << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
- if (!OM.isCImm() || !OM.getCImm()->equalsInt(Value))
- return false;
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isCImm() || !MO.getCImm()->equalsInt(Value)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
+
case GIM_CheckIntrinsicID: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t Value = *Command++;
- DEBUG(dbgs() << "GIM_CheckIntrinsicID(MIs[" << InsnID << "]->getOperand(" << OpIdx
- << "), Value=" << Value << ")\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t Value = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
- if (!OM.isIntrinsicID() || OM.getIntrinsicID() != Value)
- return false;
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
+ if (handleReject() == RejectAndGiveUp)
+ return false;
break;
}
+
case GIM_CheckIsMBB: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- DEBUG(dbgs() << "GIM_CheckIsMBB(MIs[" << InsnID << "]->getOperand("
- << OpIdx << "))\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "))\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB())
- return false;
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
case GIM_CheckIsSafeToFold: {
- int64_t InsnID = *Command++;
- DEBUG(dbgs() << "GIM_CheckIsSafeToFold(MIs[" << InsnID << "])\n");
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
+ << InsnID << "])\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- if (!isObviouslySafeToFold(*State.MIs[InsnID]))
- return false;
+ if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ break;
+ }
+ case GIM_CheckIsSameOperand: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t OtherInsnID = MatchTable[CurrentIdx++];
+ int64_t OtherOpIdx = MatchTable[CurrentIdx++];
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
+ << InsnID << "][" << OpIdx << "], MIs["
+ << OtherInsnID << "][" << OtherOpIdx << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isIdenticalTo(
+ State.MIs[OtherInsnID]->getOperand(OtherOpIdx))) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
+ case GIM_Reject:
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_Reject");
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ break;
case GIR_MutateOpcode: {
- int64_t OldInsnID = *Command++;
- int64_t NewInsnID = *Command++;
- int64_t NewOpcode = *Command++;
- assert((size_t)NewInsnID == OutMIs.size() &&
- "Expected to store MIs in order");
- OutMIs.push_back(
- MachineInstrBuilder(*State.MIs[OldInsnID]->getParent()->getParent(),
- State.MIs[OldInsnID]));
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ uint64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t NewOpcode = MatchTable[CurrentIdx++];
+ if (NewInsnID >= OutMIs.size())
+ OutMIs.resize(NewInsnID + 1);
+
+ OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
+ State.MIs[OldInsnID]);
OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
- DEBUG(dbgs() << "GIR_MutateOpcode(OutMIs[" << NewInsnID << "], MIs["
- << OldInsnID << "], " << NewOpcode << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << NewOpcode << ")\n");
break;
}
+
case GIR_BuildMI: {
- int64_t InsnID = *Command++;
- int64_t Opcode = *Command++;
- assert((size_t)InsnID == OutMIs.size() &&
- "Expected to store MIs in order");
- (void)InsnID;
- OutMIs.push_back(BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
- State.MIs[0]->getDebugLoc(), TII.get(Opcode)));
- DEBUG(dbgs() << "GIR_BuildMI(OutMIs[" << InsnID << "], " << Opcode
- << ")\n");
+ uint64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t Opcode = MatchTable[CurrentIdx++];
+ if (NewInsnID >= OutMIs.size())
+ OutMIs.resize(NewInsnID + 1);
+
+ OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+ State.MIs[0]->getDebugLoc(), TII.get(Opcode));
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
+ << NewInsnID << "], " << Opcode << ")\n");
break;
}
case GIR_Copy: {
- int64_t NewInsnID = *Command++;
- int64_t OldInsnID = *Command++;
- int64_t OpIdx = *Command++;
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
- DEBUG(dbgs() << "GIR_Copy(OutMIs[" << NewInsnID << "], MIs[" << OldInsnID
- << "], " << OpIdx << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
+ << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
break;
}
+
+ case GIR_CopyOrAddZeroReg: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t ZeroReg = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
+ if (isOperandImmEqual(MO, 0, MRI))
+ OutMIs[NewInsnID].addReg(ZeroReg);
+ else
+ OutMIs[NewInsnID].add(MO);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << OpIdx << ", " << ZeroReg << ")\n");
+ break;
+ }
+
case GIR_CopySubReg: {
- int64_t NewInsnID = *Command++;
- int64_t OldInsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t SubRegIdx = *Command++;
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t SubRegIdx = MatchTable[CurrentIdx++];
assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
0, SubRegIdx);
- DEBUG(dbgs() << "GIR_CopySubReg(OutMIs[" << NewInsnID << "], MIs["
- << OldInsnID << "], " << OpIdx << ", " << SubRegIdx
- << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "], "
+ << OpIdx << ", " << SubRegIdx << ")\n");
break;
}
+
case GIR_AddImplicitDef: {
- int64_t InsnID = *Command++;
- int64_t RegNum = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RegNum = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
- DEBUG(dbgs() << "GIR_AddImplicitDef(OutMIs[" << InsnID << "], " << RegNum
- << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
+ << InsnID << "], " << RegNum << ")\n");
break;
}
+
case GIR_AddImplicitUse: {
- int64_t InsnID = *Command++;
- int64_t RegNum = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RegNum = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
- DEBUG(dbgs() << "GIR_AddImplicitUse(OutMIs[" << InsnID << "], " << RegNum
- << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
+ << InsnID << "], " << RegNum << ")\n");
break;
}
+
case GIR_AddRegister: {
- int64_t InsnID = *Command++;
- int64_t RegNum = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RegNum = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
OutMIs[InsnID].addReg(RegNum);
- DEBUG(dbgs() << "GIR_AddRegister(OutMIs[" << InsnID << "], " << RegNum
- << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+ << InsnID << "], " << RegNum << ")\n");
+ break;
+ }
+
+ case GIR_AddTempRegister: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t TempRegID = MatchTable[CurrentIdx++];
+ uint64_t TempRegFlags = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
+ << InsnID << "], TempRegisters[" << TempRegID
+ << "], " << TempRegFlags << ")\n");
break;
}
+
case GIR_AddImm: {
- int64_t InsnID = *Command++;
- int64_t Imm = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t Imm = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
OutMIs[InsnID].addImm(Imm);
- DEBUG(dbgs() << "GIR_AddImm(OutMIs[" << InsnID << "], " << Imm << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
+ << "], " << Imm << ")\n");
break;
}
+
case GIR_ComplexRenderer: {
- int64_t InsnID = *Command++;
- int64_t RendererID = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RendererID = MatchTable[CurrentIdx++];
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ for (const auto &RenderOpFn : State.Renderers[RendererID])
+ RenderOpFn(OutMIs[InsnID]);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ")\n");
+ break;
+ }
+ case GIR_ComplexSubOperandRenderer: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t RendererID = MatchTable[CurrentIdx++];
+ int64_t RenderOpID = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- State.Renderers[RendererID](OutMIs[InsnID]);
- DEBUG(dbgs() << "GIR_ComplexRenderer(OutMIs[" << InsnID << "], "
- << RendererID << ")\n");
+ State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ComplexSubOperandRenderer(OutMIs["
+ << InsnID << "], " << RendererID << ", "
+ << RenderOpID << ")\n");
+ break;
+ }
+
+ case GIR_CopyConstantAsSImm: {
+ int64_t NewInsnID = MatchTable[CurrentIdx++];
+ int64_t OldInsnID = MatchTable[CurrentIdx++];
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
+ if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
+ OutMIs[NewInsnID].addImm(
+ State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
+ } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
+ else
+ llvm_unreachable("Expected Imm or CImm operand");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
+ << NewInsnID << "], MIs[" << OldInsnID << "])\n");
break;
}
case GIR_ConstrainOperandRC: {
- int64_t InsnID = *Command++;
- int64_t OpIdx = *Command++;
- int64_t RCEnum = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ int64_t RCEnum = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
*TRI.getRegClass(RCEnum), TII, TRI, RBI);
- DEBUG(dbgs() << "GIR_ConstrainOperandRC(OutMIs[" << InsnID << "], "
- << OpIdx << ", " << RCEnum << ")\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
+ << InsnID << "], " << OpIdx << ", " << RCEnum
+ << ")\n");
break;
}
+
case GIR_ConstrainSelectedInstOperands: {
- int64_t InsnID = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
RBI);
- DEBUG(dbgs() << "GIR_ConstrainSelectedInstOperands(OutMIs[" << InsnID
- << "])\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIR_ConstrainSelectedInstOperands(OutMIs["
+ << InsnID << "])\n");
break;
}
+
case GIR_MergeMemOperands: {
- int64_t InsnID = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- for (const auto *FromMI : State.MIs)
- for (const auto &MMO : FromMI->memoperands())
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
+ << InsnID << "]");
+ int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
+ while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
+ GIU_MergeMemOperands_EndOfList) {
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << ", MIs[" << MergeInsnID << "]");
+ for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
OutMIs[InsnID].addMemOperand(MMO);
- DEBUG(dbgs() << "GIR_MergeMemOperands(OutMIs[" << InsnID << "])\n");
+ }
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << ")\n");
break;
}
+
case GIR_EraseFromParent: {
- int64_t InsnID = *Command++;
+ int64_t InsnID = MatchTable[CurrentIdx++];
assert(State.MIs[InsnID] &&
"Attempted to erase an undefined instruction");
State.MIs[InsnID]->eraseFromParent();
- DEBUG(dbgs() << "GIR_EraseFromParent(MIs[" << InsnID << "])\n");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
+ << InsnID << "])\n");
+ break;
+ }
+
+ case GIR_MakeTempReg: {
+ int64_t TempRegID = MatchTable[CurrentIdx++];
+ int64_t TypeID = MatchTable[CurrentIdx++];
+
+ State.TempRegisters[TempRegID] =
+ MRI.createGenericVirtualRegister(MatcherInfo.TypeObjects[TypeID]);
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
+ << "] = GIR_MakeTempReg(" << TypeID << ")\n");
+ break;
+ }
+
+ case GIR_Coverage: {
+ int64_t RuleID = MatchTable[CurrentIdx++];
+ CoverageInfo.setCovered(RuleID);
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs()
+ << CurrentIdx << ": GIR_Coverage(" << RuleID << ")");
break;
}
case GIR_Done:
- DEBUG(dbgs() << "GIR_Done");
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIR_Done");
return true;
default:
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
new file mode 100644
index 000000000000..e7945ff5bf4f
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -0,0 +1,287 @@
+//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h --===========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file contains some helper functions which try to cleanup artifacts
+// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
+// the types match. This file also contains some combines of merges that happens
+// at the end of the legalization.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "legalizer"
+
+namespace llvm {
+class LegalizationArtifactCombiner {
+ MachineIRBuilder &Builder;
+ MachineRegisterInfo &MRI;
+ const LegalizerInfo &LI;
+
+public:
+ LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+ const LegalizerInfo &LI)
+ : Builder(B), MRI(MRI), LI(LI) {}
+
+ bool tryCombineAnyExt(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+ if (MI.getOpcode() != TargetOpcode::G_ANYEXT)
+ return false;
+ if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+ MI.getOperand(1).getReg(), MRI)) {
+ DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = DefMI->getOperand(1).getReg();
+ Builder.setInstr(MI);
+ // We get a copy/trunc/extend depending on the sizes
+ Builder.buildAnyExtOrTrunc(DstReg, SrcReg);
+ markInstAndDefDead(MI, *DefMI, DeadInsts);
+ return true;
+ }
+ return tryFoldImplicitDef(MI, DeadInsts);
+ }
+
+ bool tryCombineZExt(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+ if (MI.getOpcode() != TargetOpcode::G_ZEXT)
+ return false;
+ if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+ MI.getOperand(1).getReg(), MRI)) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ if (isInstUnsupported(TargetOpcode::G_AND, DstTy) ||
+ isInstUnsupported(TargetOpcode::G_CONSTANT, DstTy))
+ return false;
+ DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ Builder.setInstr(MI);
+ unsigned ZExtSrc = MI.getOperand(1).getReg();
+ LLT ZExtSrcTy = MRI.getType(ZExtSrc);
+ APInt Mask = APInt::getAllOnesValue(ZExtSrcTy.getSizeInBits());
+ auto MaskCstMIB = Builder.buildConstant(DstTy, Mask.getZExtValue());
+ unsigned TruncSrc = DefMI->getOperand(1).getReg();
+ // We get a copy/trunc/extend depending on the sizes
+ auto SrcCopyOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+ Builder.buildAnd(DstReg, SrcCopyOrTrunc, MaskCstMIB);
+ markInstAndDefDead(MI, *DefMI, DeadInsts);
+ return true;
+ }
+ return tryFoldImplicitDef(MI, DeadInsts);
+ }
+
+ bool tryCombineSExt(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+ if (MI.getOpcode() != TargetOpcode::G_SEXT)
+ return false;
+ if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+ MI.getOperand(1).getReg(), MRI)) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ if (isInstUnsupported(TargetOpcode::G_SHL, DstTy) ||
+ isInstUnsupported(TargetOpcode::G_ASHR, DstTy) ||
+ isInstUnsupported(TargetOpcode::G_CONSTANT, DstTy))
+ return false;
+ DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ Builder.setInstr(MI);
+ unsigned SExtSrc = MI.getOperand(1).getReg();
+ LLT SExtSrcTy = MRI.getType(SExtSrc);
+ unsigned SizeDiff = DstTy.getSizeInBits() - SExtSrcTy.getSizeInBits();
+ auto SizeDiffMIB = Builder.buildConstant(DstTy, SizeDiff);
+ unsigned TruncSrcReg = DefMI->getOperand(1).getReg();
+ // We get a copy/trunc/extend depending on the sizes
+ auto SrcCopyExtOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrcReg);
+ auto ShlMIB = Builder.buildInstr(TargetOpcode::G_SHL, DstTy,
+ SrcCopyExtOrTrunc, SizeDiffMIB);
+ Builder.buildInstr(TargetOpcode::G_ASHR, DstReg, ShlMIB, SizeDiffMIB);
+ markInstAndDefDead(MI, *DefMI, DeadInsts);
+ return true;
+ }
+ return tryFoldImplicitDef(MI, DeadInsts);
+ }
+
+ /// Try to fold sb = EXTEND (G_IMPLICIT_DEF sa) -> sb = G_IMPLICIT_DEF
+ bool tryFoldImplicitDef(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode != TargetOpcode::G_ANYEXT && Opcode != TargetOpcode::G_ZEXT &&
+ Opcode != TargetOpcode::G_SEXT)
+ return false;
+
+ if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
+ MI.getOperand(1).getReg(), MRI)) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ if (isInstUnsupported(TargetOpcode::G_IMPLICIT_DEF, DstTy))
+ return false;
+ DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+ Builder.setInstr(MI);
+ Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
+ markInstAndDefDead(MI, *DefMI, DeadInsts);
+ return true;
+ }
+ return false;
+ }
+
+ bool tryCombineMerges(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+ if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+ return false;
+
+ unsigned NumDefs = MI.getNumOperands() - 1;
+ unsigned SrcReg = MI.getOperand(NumDefs).getReg();
+ MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
+ if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES))
+ return false;
+
+ const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
+
+ if (NumMergeRegs < NumDefs) {
+ if (NumDefs % NumMergeRegs != 0)
+ return false;
+
+ Builder.setInstr(MI);
+ // Transform to UNMERGEs, for example
+ // %1 = G_MERGE_VALUES %4, %5
+ // %9, %10, %11, %12 = G_UNMERGE_VALUES %1
+ // to
+ // %9, %10 = G_UNMERGE_VALUES %4
+ // %11, %12 = G_UNMERGE_VALUES %5
+
+ const unsigned NewNumDefs = NumDefs / NumMergeRegs;
+ for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
+ SmallVector<unsigned, 2> DstRegs;
+ for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
+ ++j, ++DefIdx)
+ DstRegs.push_back(MI.getOperand(DefIdx).getReg());
+
+ Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+ }
+
+ } else if (NumMergeRegs > NumDefs) {
+ if (NumMergeRegs % NumDefs != 0)
+ return false;
+
+ Builder.setInstr(MI);
+ // Transform to MERGEs
+ // %6 = G_MERGE_VALUES %17, %18, %19, %20
+ // %7, %8 = G_UNMERGE_VALUES %6
+ // to
+ // %7 = G_MERGE_VALUES %17, %18
+ // %8 = G_MERGE_VALUES %19, %20
+
+ const unsigned NumRegs = NumMergeRegs / NumDefs;
+ for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
+ SmallVector<unsigned, 2> Regs;
+ for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
+ ++j, ++Idx)
+ Regs.push_back(MergeI->getOperand(Idx).getReg());
+
+ Builder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs);
+ }
+
+ } else {
+ // FIXME: is a COPY appropriate if the types mismatch? We know both
+ // registers are allocatable by now.
+ if (MRI.getType(MI.getOperand(0).getReg()) !=
+ MRI.getType(MergeI->getOperand(1).getReg()))
+ return false;
+
+ for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
+ MRI.replaceRegWith(MI.getOperand(Idx).getReg(),
+ MergeI->getOperand(Idx + 1).getReg());
+ }
+
+ markInstAndDefDead(MI, *MergeI, DeadInsts);
+ return true;
+ }
+
+ /// Try to combine away MI.
+ /// Returns true if it combined away the MI.
+ /// Adds instructions that are dead as a result of the combine
+ /// into DeadInsts, which can include MI.
+ bool tryCombineInstruction(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case TargetOpcode::G_ANYEXT:
+ return tryCombineAnyExt(MI, DeadInsts);
+ case TargetOpcode::G_ZEXT:
+ return tryCombineZExt(MI, DeadInsts);
+ case TargetOpcode::G_SEXT:
+ return tryCombineSExt(MI, DeadInsts);
+ case TargetOpcode::G_UNMERGE_VALUES:
+ return tryCombineMerges(MI, DeadInsts);
+ case TargetOpcode::G_TRUNC: {
+ bool Changed = false;
+ for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg()))
+ Changed |= tryCombineInstruction(Use, DeadInsts);
+ return Changed;
+ }
+ }
+ }
+
+private:
+ /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
+ /// dead due to MI being killed, then mark DefMI as dead too.
+ /// Some of the combines (extends(trunc)), try to walk through redundant
+ /// copies in between the extends and the truncs, and this attempts to collect
+ /// the in between copies if they're dead.
+ void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts) {
+ DeadInsts.push_back(&MI);
+
+ // Collect all the copy instructions that are made dead, due to deleting
+ // this instruction. Collect all of them until the Trunc(DefMI).
+ // Eg,
+ // %1(s1) = G_TRUNC %0(s32)
+ // %2(s1) = COPY %1(s1)
+ // %3(s1) = COPY %2(s1)
+ // %4(s32) = G_ANYEXT %3(s1)
+ // In this case, we would have replaced %4 with a copy of %0,
+ // and as a result, %3, %2, %1 are dead.
+ MachineInstr *PrevMI = &MI;
+ while (PrevMI != &DefMI) {
+ // If we're dealing with G_UNMERGE_VALUES, tryCombineMerges doesn't really try
+ // to fold copies in between and we can ignore them here.
+ if (PrevMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
+ break;
+ unsigned PrevRegSrc = PrevMI->getOperand(1).getReg();
+ MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
+ if (MRI.hasOneUse(PrevRegSrc)) {
+ if (TmpDef != &DefMI) {
+ assert(TmpDef->getOpcode() == TargetOpcode::COPY &&
+ "Expecting copy here");
+ DeadInsts.push_back(TmpDef);
+ }
+ } else
+ break;
+ PrevMI = TmpDef;
+ }
+ if ((PrevMI == &DefMI ||
+ DefMI.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
+ MRI.hasOneUse(DefMI.getOperand(0).getReg()))
+ DeadInsts.push_back(&DefMI);
+ }
+
+ /// Checks if the target legalizer info has specified anything about the
+ /// instruction, or if unsupported.
+ bool isInstUnsupported(unsigned Opcode, const LLT &DstTy) const {
+ auto Action = LI.getAction({Opcode, 0, DstTy});
+ return Action.first == LegalizerInfo::LegalizeAction::Unsupported ||
+ Action.first == LegalizerInfo::LegalizeAction::NotFound;
+ }
+};
+
+} // namespace llvm
diff --git a/include/llvm/CodeGen/GlobalISel/Legalizer.h b/include/llvm/CodeGen/GlobalISel/Legalizer.h
index 9b9b8b563a30..8284ab6dac65 100644
--- a/include/llvm/CodeGen/GlobalISel/Legalizer.h
+++ b/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -58,9 +58,6 @@ public:
bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
const TargetInstrInfo &TII);
- bool combineMerges(MachineInstr &MI, MachineRegisterInfo &MRI,
- const TargetInstrInfo &TII, MachineIRBuilder &MIRBuilder);
-
bool runOnMachineFunction(MachineFunction &MF) override;
};
} // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 1fd45b52e3ac..8bd8a9dcd0e2 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -89,6 +89,9 @@ public:
/// functions
MachineIRBuilder MIRBuilder;
+ /// Expose LegalizerInfo so the clients can re-use.
+ const LegalizerInfo &getLegalizerInfo() const { return LI; }
+
private:
/// Helper function to split a wide generic register into bitwise blocks with
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index c259e93fdd36..b6735d538b37 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -20,11 +20,12 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/Support/LowLevelTypeImpl.h"
-#include "llvm/Target/TargetOpcodes.h"
-#include <cstdint>
#include <cassert>
+#include <cstdint>
#include <tuple>
+#include <unordered_map>
#include <utility>
namespace llvm {
@@ -120,27 +121,144 @@ public:
}
}
+ typedef std::pair<uint16_t, LegalizeAction> SizeAndAction;
+ typedef std::vector<SizeAndAction> SizeAndActionsVec;
+ using SizeChangeStrategy =
+ std::function<SizeAndActionsVec(const SizeAndActionsVec &v)>;
+
/// More friendly way to set an action for common types that have an LLT
/// representation.
+ /// The LegalizeAction must be one for which NeedsLegalizingToDifferentSize
+ /// returns false.
void setAction(const InstrAspect &Aspect, LegalizeAction Action) {
+ assert(!needsLegalizingToDifferentSize(Action));
TablesInitialized = false;
- unsigned Opcode = Aspect.Opcode - FirstOp;
- if (Actions[Opcode].size() <= Aspect.Idx)
- Actions[Opcode].resize(Aspect.Idx + 1);
- Actions[Aspect.Opcode - FirstOp][Aspect.Idx][Aspect.Type] = Action;
+ const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+ if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx)
+ SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1);
+ SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action;
}
- /// If an operation on a given vector type (say <M x iN>) isn't explicitly
- /// specified, we proceed in 2 stages. First we legalize the underlying scalar
- /// (so that there's at least one legal vector with that scalar), then we
- /// adjust the number of elements in the vector so that it is legal. The
- /// desired action in the first step is controlled by this function.
- void setScalarInVectorAction(unsigned Opcode, LLT ScalarTy,
- LegalizeAction Action) {
- assert(!ScalarTy.isVector());
- ScalarInVectorActions[std::make_pair(Opcode, ScalarTy)] = Action;
+ /// The setAction calls record the non-size-changing legalization actions
+ /// to take on specificly-sized types. The SizeChangeStrategy defines what
+ /// to do when the size of the type needs to be changed to reach a legally
+ /// sized type (i.e., one that was defined through a setAction call).
+ /// e.g.
+ /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
+ /// setLegalizeScalarToDifferentSizeStrategy(
+ /// G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
+ /// will end up defining getAction({G_ADD, 0, T}) to return the following
+ /// actions for different scalar types T:
+ /// LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
+ /// LLT::scalar(32): {Legal, 0, LLT::scalar(32)}
+ /// LLT::scalar(33)..: {NarrowScalar, 0, LLT::scalar(32)}
+ ///
+ /// If no SizeChangeAction gets defined, through this function,
+ /// the default is unsupportedForDifferentSizes.
+ void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode,
+ const unsigned TypeIdx,
+ SizeChangeStrategy S) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+ ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+ ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+ }
+
+ /// See also setLegalizeScalarToDifferentSizeStrategy.
+ /// This function allows to set the SizeChangeStrategy for vector elements.
+ void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode,
+ const unsigned TypeIdx,
+ SizeChangeStrategy S) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+ VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+ VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+ }
+
+ /// A SizeChangeStrategy for the common case where legalization for a
+ /// particular operation consists of only supporting a specific set of type
+ /// sizes. E.g.
+ /// setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
+ /// setAction ({G_DIV, 0, LLT::scalar(64)}, Legal);
+ /// setLegalizeScalarToDifferentSizeStrategy(
+ /// G_DIV, 0, unsupportedForDifferentSizes);
+ /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64,
+ /// and Unsupported for all other scalar types T.
+ static SizeAndActionsVec
+ unsupportedForDifferentSizes(const SizeAndActionsVec &v) {
+ return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported,
+ Unsupported);
}
+ /// A SizeChangeStrategy for the common case where legalization for a
+ /// particular operation consists of widening the type to a large legal type,
+ /// unless there is no such type and then instead it should be narrowed to the
+ /// largest legal type.
+ static SizeAndActionsVec
+ widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) {
+ assert(v.size() > 0 &&
+ "At least one size that can be legalized towards is needed"
+ " for this SizeChangeStrategy");
+ return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+ NarrowScalar);
+ }
+
+ static SizeAndActionsVec
+ widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) {
+ return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+ Unsupported);
+ }
+
+ static SizeAndActionsVec
+ narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) {
+ return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+ Unsupported);
+ }
+
+ static SizeAndActionsVec
+ narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) {
+ assert(v.size() > 0 &&
+ "At least one size that can be legalized towards is needed"
+ " for this SizeChangeStrategy");
+ return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+ WidenScalar);
+ }
+
+ /// A SizeChangeStrategy for the common case where legalization for a
+ /// particular vector operation consists of having more elements in the
+ /// vector, to a type that is legal. Unless there is no such type and then
+ /// instead it should be legalized towards the widest vector that's still
+ /// legal. E.g.
+ /// setAction({G_ADD, LLT::vector(8, 8)}, Legal);
+ /// setAction({G_ADD, LLT::vector(16, 8)}, Legal);
+ /// setAction({G_ADD, LLT::vector(2, 32)}, Legal);
+ /// setAction({G_ADD, LLT::vector(4, 32)}, Legal);
+ /// setLegalizeVectorElementToDifferentSizeStrategy(
+ /// G_ADD, 0, moreToWiderTypesAndLessToWidest);
+ /// will result in the following getAction results:
+ /// * getAction({G_ADD, LLT::vector(8,8)}) returns
+ /// (Legal, vector(8,8)).
+ /// * getAction({G_ADD, LLT::vector(9,8)}) returns
+ /// (MoreElements, vector(16,8)).
+ /// * getAction({G_ADD, LLT::vector(8,32)}) returns
+ /// (FewerElements, vector(4,32)).
+ static SizeAndActionsVec
+ moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) {
+ return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements,
+ FewerElements);
+ }
+
+ /// Helper function to implement many typical SizeChangeStrategy functions.
+ static SizeAndActionsVec
+ increaseToLargerTypesAndDecreaseToLargest(const SizeAndActionsVec &v,
+ LegalizeAction IncreaseAction,
+ LegalizeAction DecreaseAction);
+ /// Helper function to implement many typical SizeChangeStrategy functions.
+ static SizeAndActionsVec
+ decreaseToSmallerTypesAndIncreaseToSmallest(const SizeAndActionsVec &v,
+ LegalizeAction DecreaseAction,
+ LegalizeAction IncreaseAction);
+
/// Determine what action should be taken to legalize the given generic
/// instruction opcode, type-index and type. Requires computeTables to have
/// been called.
@@ -158,55 +276,6 @@ public:
std::tuple<LegalizeAction, unsigned, LLT>
getAction(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
- /// Iterate the given function (typically something like doubling the width)
- /// on Ty until we find a legal type for this operation.
- Optional<LLT> findLegalizableSize(const InstrAspect &Aspect,
- function_ref<LLT(LLT)> NextType) const {
- LegalizeAction Action;
- const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx];
- LLT Ty = Aspect.Type;
- do {
- Ty = NextType(Ty);
- auto ActionIt = Map.find(Ty);
- if (ActionIt == Map.end()) {
- auto DefaultIt = DefaultActions.find(Aspect.Opcode);
- if (DefaultIt == DefaultActions.end())
- return None;
- Action = DefaultIt->second;
- } else
- Action = ActionIt->second;
- } while (needsLegalizingToDifferentSize(Action));
- return Ty;
- }
-
- /// Find what type it's actually OK to perform the given operation on, given
- /// the general approach we've decided to take.
- Optional<LLT> findLegalType(const InstrAspect &Aspect, LegalizeAction Action) const;
-
- std::pair<LegalizeAction, LLT> findLegalAction(const InstrAspect &Aspect,
- LegalizeAction Action) const {
- auto LegalType = findLegalType(Aspect, Action);
- if (!LegalType)
- return std::make_pair(LegalizeAction::Unsupported, LLT());
- return std::make_pair(Action, *LegalType);
- }
-
- /// Find the specified \p Aspect in the primary (explicitly set) Actions
- /// table. Returns either the action the target requested or NotFound if there
- /// was no setAction call.
- LegalizeAction findInActions(const InstrAspect &Aspect) const {
- if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp)
- return NotFound;
- if (Aspect.Idx >= Actions[Aspect.Opcode - FirstOp].size())
- return NotFound;
- const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx];
- auto ActionIt = Map.find(Aspect.Type);
- if (ActionIt == Map.end())
- return NotFound;
-
- return ActionIt->second;
- }
-
bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
virtual bool legalizeCustom(MachineInstr &MI,
@@ -214,20 +283,181 @@ public:
MachineIRBuilder &MIRBuilder) const;
private:
- static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
- static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+ /// The SizeAndActionsVec is a representation mapping between all natural
+ /// numbers and an Action. The natural number represents the bit size of
+ /// the InstrAspect. For example, for a target with native support for 32-bit
+ /// and 64-bit additions, you'd express that as:
+ /// setScalarAction(G_ADD, 0,
+ /// {{1, WidenScalar}, // bit sizes [ 1, 31[
+ /// {32, Legal}, // bit sizes [32, 33[
+ /// {33, WidenScalar}, // bit sizes [33, 64[
+ /// {64, Legal}, // bit sizes [64, 65[
+ /// {65, NarrowScalar} // bit sizes [65, +inf[
+ /// });
+ /// It may be that only 64-bit pointers are supported on your target:
+ /// setPointerAction(G_GEP, 0, LLT:pointer(1),
+ /// {{1, Unsupported}, // bit sizes [ 1, 63[
+ /// {64, Legal}, // bit sizes [64, 65[
+ /// {65, Unsupported}, // bit sizes [65, +inf[
+ /// });
+ void setScalarAction(const unsigned Opcode, const unsigned TypeIndex,
+ const SizeAndActionsVec &SizeAndActions) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ SmallVector<SizeAndActionsVec, 1> &Actions = ScalarActions[OpcodeIdx];
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
+ void setPointerAction(const unsigned Opcode, const unsigned TypeIndex,
+ const unsigned AddressSpace,
+ const SizeAndActionsVec &SizeAndActions) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) ==
+ AddrSpace2PointerActions[OpcodeIdx].end())
+ AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}};
+ SmallVector<SizeAndActionsVec, 1> &Actions =
+ AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second;
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
- using TypeMap = DenseMap<LLT, LegalizeAction>;
- using SIVActionMap = DenseMap<std::pair<unsigned, LLT>, LegalizeAction>;
+ /// If an operation on a given vector type (say <M x iN>) isn't explicitly
+ /// specified, we proceed in 2 stages. First we legalize the underlying scalar
+ /// (so that there's at least one legal vector with that scalar), then we
+ /// adjust the number of elements in the vector so that it is legal. The
+ /// desired action in the first step is controlled by this function.
+ void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex,
+ const SizeAndActionsVec &SizeAndActions) {
+ unsigned OpcodeIdx = Opcode - FirstOp;
+ SmallVector<SizeAndActionsVec, 1> &Actions =
+ ScalarInVectorActions[OpcodeIdx];
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
+
+ /// See also setScalarInVectorAction.
+ /// This function let's you specify the number of elements in a vector that
+ /// are legal for a legal element size.
+ void setVectorNumElementAction(const unsigned Opcode,
+ const unsigned TypeIndex,
+ const unsigned ElementSize,
+ const SizeAndActionsVec &SizeAndActions) {
+ const unsigned OpcodeIdx = Opcode - FirstOp;
+ if (NumElements2Actions[OpcodeIdx].find(ElementSize) ==
+ NumElements2Actions[OpcodeIdx].end())
+ NumElements2Actions[OpcodeIdx][ElementSize] = {{}};
+ SmallVector<SizeAndActionsVec, 1> &Actions =
+ NumElements2Actions[OpcodeIdx].find(ElementSize)->second;
+ setActions(TypeIndex, Actions, SizeAndActions);
+ }
- SmallVector<TypeMap, 1> Actions[LastOp - FirstOp + 1];
- SIVActionMap ScalarInVectorActions;
- DenseMap<std::pair<unsigned, LLT>, uint16_t> MaxLegalVectorElts;
- DenseMap<unsigned, LegalizeAction> DefaultActions;
+ /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes,
+ /// i.e. it's OK if it doesn't start from size 1.
+ static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) {
+#ifndef NDEBUG
+ // The sizes should be in increasing order
+ int prev_size = -1;
+ for(auto SizeAndAction: v) {
+ assert(SizeAndAction.first > prev_size);
+ prev_size = SizeAndAction.first;
+ }
+ // - for every Widen action, there should be a larger bitsize that
+ // can be legalized towards (e.g. Legal, Lower, Libcall or Custom
+ // action).
+ // - for every Narrow action, there should be a smaller bitsize that
+ // can be legalized towards.
+ int SmallestNarrowIdx = -1;
+ int LargestWidenIdx = -1;
+ int SmallestLegalizableToSameSizeIdx = -1;
+ int LargestLegalizableToSameSizeIdx = -1;
+ for(size_t i=0; i<v.size(); ++i) {
+ switch (v[i].second) {
+ case FewerElements:
+ case NarrowScalar:
+ if (SmallestNarrowIdx == -1)
+ SmallestNarrowIdx = i;
+ break;
+ case WidenScalar:
+ case MoreElements:
+ LargestWidenIdx = i;
+ break;
+ case Unsupported:
+ break;
+ default:
+ if (SmallestLegalizableToSameSizeIdx == -1)
+ SmallestLegalizableToSameSizeIdx = i;
+ LargestLegalizableToSameSizeIdx = i;
+ }
+ }
+ if (SmallestNarrowIdx != -1) {
+ assert(SmallestLegalizableToSameSizeIdx != -1);
+ assert(SmallestNarrowIdx > SmallestLegalizableToSameSizeIdx);
+ }
+ if (LargestWidenIdx != -1)
+ assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx);
+#endif
+ }
+
+ /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with
+ /// from size 1.
+ static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) {
+#ifndef NDEBUG
+ // Data structure invariant: The first bit size must be size 1.
+ assert(v.size() >= 1);
+ assert(v[0].first == 1);
+ checkPartialSizeAndActionsVector(v);
+#endif
+ }
+
+ /// Sets actions for all bit sizes on a particular generic opcode, type
+ /// index and scalar or pointer type.
+ void setActions(unsigned TypeIndex,
+ SmallVector<SizeAndActionsVec, 1> &Actions,
+ const SizeAndActionsVec &SizeAndActions) {
+ checkFullSizeAndActionsVector(SizeAndActions);
+ if (Actions.size() <= TypeIndex)
+ Actions.resize(TypeIndex + 1);
+ Actions[TypeIndex] = SizeAndActions;
+ }
+
+ static SizeAndAction findAction(const SizeAndActionsVec &Vec,
+ const uint32_t Size);
+
+ /// Returns the next action needed to get the scalar or pointer type closer
+ /// to being legal
+ /// E.g. findLegalAction({G_REM, 13}) should return
+ /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will
+ /// probably be called, which should return (Lower, 32).
+ /// This is assuming the setScalarAction on G_REM was something like:
+ /// setScalarAction(G_REM, 0,
+ /// {{1, WidenScalar}, // bit sizes [ 1, 31[
+ /// {32, Lower}, // bit sizes [32, 33[
+ /// {33, NarrowScalar} // bit sizes [65, +inf[
+ /// });
+ std::pair<LegalizeAction, LLT>
+ findScalarLegalAction(const InstrAspect &Aspect) const;
+
+ /// Returns the next action needed towards legalizing the vector type.
+ std::pair<LegalizeAction, LLT>
+ findVectorLegalAction(const InstrAspect &Aspect) const;
+
+ static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
+ static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
- bool TablesInitialized = false;
+ // Data structures used temporarily during construction of legality data:
+ typedef DenseMap<LLT, LegalizeAction> TypeMap;
+ SmallVector<TypeMap, 1> SpecifiedActions[LastOp - FirstOp + 1];
+ SmallVector<SizeChangeStrategy, 1>
+ ScalarSizeChangeStrategies[LastOp - FirstOp + 1];
+ SmallVector<SizeChangeStrategy, 1>
+ VectorElementSizeChangeStrategies[LastOp - FirstOp + 1];
+ bool TablesInitialized;
+
+ // Data structures used by getAction:
+ SmallVector<SizeAndActionsVec, 1> ScalarActions[LastOp - FirstOp + 1];
+ SmallVector<SizeAndActionsVec, 1> ScalarInVectorActions[LastOp - FirstOp + 1];
+ std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+ AddrSpace2PointerActions[LastOp - FirstOp + 1];
+ std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+ NumElements2Actions[LastOp - FirstOp + 1];
};
-} // end namespace llvm
+} // end namespace llvm.
#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 85e6fef1f3c2..aa875c11d86f 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -70,13 +70,33 @@ class MachineIRBuilder {
return getMF().getRegInfo().createVirtualRegister(RC);
}
- unsigned getRegFromArg(unsigned Reg) { return Reg; }
+ void addUseFromArg(MachineInstrBuilder &MIB, unsigned Reg) {
+ MIB.addUse(Reg);
+ }
+ void addUseFromArg(MachineInstrBuilder &MIB, const MachineInstrBuilder &UseMIB) {
+ MIB.addUse(UseMIB->getOperand(0).getReg());
+ }
+
+ void addUsesFromArgs(MachineInstrBuilder &MIB) { }
+ template<typename UseArgTy, typename ... UseArgsTy>
+ void addUsesFromArgs(MachineInstrBuilder &MIB, UseArgTy &&Arg1, UseArgsTy &&... Args) {
+ addUseFromArg(MIB, Arg1);
+ addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+ }
+ unsigned getRegFromArg(unsigned Reg) { return Reg; }
unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
return MIB->getOperand(0).getReg();
}
public:
+ /// Some constructors for easy use.
+ MachineIRBuilder() = default;
+ MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
+ MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+ setInstr(MI);
+ }
+
/// Getter for the function we currently build.
MachineFunction &getMF() {
assert(MF && "MachineFunction is not set");
@@ -146,9 +166,7 @@ public:
MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
UseArgsTy &&... Args) {
auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
- unsigned It[] = {(getRegFromArg(Args))...};
- for (const auto &i : It)
- MIB.addUse(i);
+ addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
return MIB;
}
@@ -168,11 +186,12 @@ public:
const MDNode *Expr);
/// Build and insert a DBG_VALUE instruction expressing the fact that the
- /// associated \p Variable lives in memory at \p Reg + \p Offset (suitably
- /// modified by \p Expr).
- MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, unsigned Offset,
+ /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
+ /// Expr).
+ MachineInstrBuilder buildIndirectDbgValue(unsigned Reg,
const MDNode *Variable,
const MDNode *Expr);
+
/// Build and insert a DBG_VALUE instruction expressing the fact that the
/// associated \p Variable lives in the stack slot specified by \p FI
/// (suitably modified by \p Expr).
@@ -181,11 +200,11 @@ public:
/// Build and insert a DBG_VALUE instructions specifying that \p Variable is
/// given by \p C (suitably modified by \p Expr).
- MachineInstrBuilder buildConstDbgValue(const Constant &C, unsigned Offset,
+ MachineInstrBuilder buildConstDbgValue(const Constant &C,
const MDNode *Variable,
const MDNode *Expr);
- /// Build and insert \p Res<def> = G_FRAME_INDEX \p Idx
+ /// Build and insert \p Res = G_FRAME_INDEX \p Idx
///
/// G_FRAME_INDEX materializes the address of an alloca value or other
/// stack-based object.
@@ -196,7 +215,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
- /// Build and insert \p Res<def> = G_GLOBAL_VALUE \p GV
+ /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
///
/// G_GLOBAL_VALUE materializes the address of the specified global
/// into \p Res.
@@ -208,7 +227,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
- /// Build and insert \p Res<def> = G_ADD \p Op0, \p Op1
+ /// Build and insert \p Res = G_ADD \p Op0, \p Op1
///
/// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
/// truncated to their width.
@@ -226,7 +245,7 @@ public:
return buildAdd(Res, (getRegFromArg(UseArgs))...);
}
- /// Build and insert \p Res<def> = G_SUB \p Op0, \p Op1
+ /// Build and insert \p Res = G_SUB \p Op0, \p Op1
///
/// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
/// truncated to their width.
@@ -239,7 +258,7 @@ public:
MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Build and insert \p Res<def> = G_MUL \p Op0, \p Op1
+ /// Build and insert \p Res = G_MUL \p Op0, \p Op1
///
/// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
/// truncated to their width.
@@ -252,7 +271,7 @@ public:
MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Build and insert \p Res<def> = G_GEP \p Op0, \p Op1
+ /// Build and insert \p Res = G_GEP \p Op0, \p Op1
///
/// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res.
@@ -266,7 +285,7 @@ public:
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Materialize and insert \p Res<def> = G_GEP \p Op0, (G_CONSTANT \p Value)
+ /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
///
/// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res. If \p Value is zero then no
@@ -286,7 +305,7 @@ public:
const LLT &ValueTy,
uint64_t Value);
- /// Build and insert \p Res<def> = G_PTR_MASK \p Op0, \p NumBits
+ /// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
///
/// G_PTR_MASK clears the low bits of a pointer operand without destroying its
/// pointer properties. This has the effect of rounding the address *down* to
@@ -302,7 +321,7 @@ public:
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0,
uint32_t NumBits);
- /// Build and insert \p Res<def>, \p CarryOut<def> = G_UADDE \p Op0,
+ /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
/// \p Op1, \p CarryIn
///
/// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
@@ -319,7 +338,7 @@ public:
MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
unsigned Op1, unsigned CarryIn);
- /// Build and insert \p Res<def> = G_AND \p Op0, \p Op1
+ /// Build and insert \p Res = G_AND \p Op0, \p Op1
///
/// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
/// Op1.
@@ -329,10 +348,14 @@ public:
/// with the same (scalar or vector) type).
///
/// \return a MachineInstrBuilder for the newly created instruction.
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+ return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+ }
MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Build and insert \p Res<def> = G_OR \p Op0, \p Op1
+ /// Build and insert \p Res = G_OR \p Op0, \p Op1
///
/// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
/// Op1.
@@ -344,7 +367,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
- /// Build and insert \p Res<def> = G_ANYEXT \p Op0
+ /// Build and insert \p Res = G_ANYEXT \p Op0
///
/// G_ANYEXT produces a register of the specified width, with bits 0 to
/// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
@@ -357,9 +380,14 @@ public:
/// \pre \p Op must be smaller than \p Res
///
/// \return The newly created instruction.
+
MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
+ template <typename DstType, typename ArgType>
+ MachineInstrBuilder buildAnyExt(DstType &&Res, ArgType &&Arg) {
+ return buildAnyExt(getDestFromArg(Res), getRegFromArg(Arg));
+ }
- /// Build and insert \p Res<def> = G_SEXT \p Op
+ /// Build and insert \p Res = G_SEXT \p Op
///
/// G_SEXT produces a register of the specified width, with bits 0 to
/// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
@@ -373,7 +401,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_ZEXT \p Op
+ /// Build and insert \p Res = G_ZEXT \p Op
///
/// G_ZEXT produces a register of the specified width, with bits 0 to
/// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
@@ -387,7 +415,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
/// ///
/// \pre setBasicBlock or setMI must have been called.
@@ -397,7 +425,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
/// ///
/// \pre setBasicBlock or setMI must have been called.
@@ -407,6 +435,32 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
+ // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ template <typename DstTy, typename UseArgTy>
+ MachineInstrBuilder buildAnyExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+ return buildAnyExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+ }
+ MachineInstrBuilder buildAnyExtOrTrunc(unsigned Res, unsigned Op);
+
+ /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
+ /// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
+ /// \p Op.
+ /// ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar or vector type.
+ /// \pre \p Op must be a generic virtual register with scalar or vector type.
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, unsigned Res,
+ unsigned Op);
+
/// Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildCast(unsigned Dst, unsigned Src);
@@ -480,7 +534,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
- /// Build and insert \p Res<def> = COPY Op
+ /// Build and insert \p Res = COPY Op
///
/// Register-to-register COPY sets \p Res to \p Op.
///
@@ -488,8 +542,12 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildCopy(unsigned Res, unsigned Op);
+ template <typename DstType, typename SrcType>
+ MachineInstrBuilder buildCopy(DstType &&Res, SrcType &&Src) {
+ return buildCopy(getDestFromArg(Res), getRegFromArg(Src));
+ }
- /// Build and insert `Res<def> = G_LOAD Addr, MMO`.
+ /// Build and insert `Res = G_LOAD Addr, MMO`.
///
/// Loads the value stored at \p Addr. Puts the result in \p Res.
///
@@ -513,7 +571,7 @@ public:
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO);
- /// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0`.
+ /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res and \p Src must be generic virtual registers.
@@ -540,7 +598,7 @@ public:
void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
ArrayRef<uint64_t> Indices);
- /// Build and insert \p Res<def> = G_MERGE_VALUES \p Op0, ...
+ /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
///
/// G_MERGE_VALUES combines the input elements contiguously into a larger
/// register.
@@ -553,7 +611,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildMerge(unsigned Res, ArrayRef<unsigned> Ops);
- /// Build and insert \p Res0<def>, ... = G_UNMERGE_VALUES \p Op
+ /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
///
/// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
///
@@ -581,7 +639,7 @@ public:
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
bool HasSideEffects);
- /// Build and insert \p Res<def> = G_FPTRUNC \p Op
+ /// Build and insert \p Res = G_FPTRUNC \p Op
///
/// G_FPTRUNC converts a floating-point value into one with a smaller type.
///
@@ -593,7 +651,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_TRUNC \p Op
+ /// Build and insert \p Res = G_TRUNC \p Op
///
/// G_TRUNC extracts the low bits of a type. For a vector type each element is
/// truncated independently before being packed into the destination.
@@ -605,6 +663,10 @@ public:
///
/// \return The newly created instruction.
MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
+ template <typename DstType, typename SrcType>
+ MachineInstrBuilder buildTrunc(DstType &&Res, SrcType &&Src) {
+ return buildTrunc(getDestFromArg(Res), getRegFromArg(Src));
+ }
/// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
///
@@ -649,7 +711,7 @@ public:
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
unsigned Op0, unsigned Op1);
- /// Build and insert \p Res<def> = G_INSERT_VECTOR_ELT \p Val,
+ /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
/// \p Elt, \p Idx
///
/// \pre setBasicBlock or setMI must have been called.
@@ -662,7 +724,7 @@ public:
MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val,
unsigned Elt, unsigned Idx);
- /// Build and insert \p Res<def> = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
+ /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res must be a generic virtual register with scalar type.
@@ -672,6 +734,24 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
unsigned Idx);
+
+ /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+ /// MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+ /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+ /// Addr in \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register of scalar type.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+ /// registers of the same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
+ unsigned CmpVal, unsigned NewVal,
+ MachineMemOperand &MMO);
};
} // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 60905c7ec226..02868b220984 100644
--- a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -407,6 +407,10 @@ protected:
mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
MapOfInstructionMappings;
+ /// Getting the minimal register class of a physreg is expensive.
+ /// Cache this information as we get it.
+ mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;
+
/// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
/// RegisterBank instances.
RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
@@ -427,6 +431,11 @@ protected:
return *RegBanks[ID];
}
+ /// Get the MinimalPhysRegClass for Reg.
+ /// \pre Reg is a physical register.
+ const TargetRegisterClass &
+ getMinimalPhysRegClass(unsigned Reg, const TargetRegisterInfo &TRI) const;
+
/// Try to get the mapping of \p MI.
/// See getInstrMapping for more details on what a mapping represents.
///
@@ -699,8 +708,8 @@ public:
/// virtual register.
///
/// \pre \p Reg != 0 (NoRegister).
- static unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI);
+ unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI) const;
/// Check that information hold by this instance make sense for the
/// given \p TRI.
diff --git a/include/llvm/CodeGen/GlobalISel/Utils.h b/include/llvm/CodeGen/GlobalISel/Utils.h
index 50ddbeb9432a..5864c15cc8eb 100644
--- a/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -79,5 +79,11 @@ Optional<int64_t> getConstantVRegVal(unsigned VReg,
const ConstantFP* getConstantFPVRegVal(unsigned VReg,
const MachineRegisterInfo &MRI);
+/// See if Reg is defined by an single def instruction that is
+/// Opcode. Also try to do trivial folding if it's a COPY with
+/// same types. Returns null otherwise.
+MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg,
+ const MachineRegisterInfo &MRI);
+
} // End namespace llvm.
#endif